-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtransformers_hf_api.py
More file actions
166 lines (132 loc) · 4.45 KB
/
transformers_hf_api.py
File metadata and controls
166 lines (132 loc) · 4.45 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
import os
import datasets
import evaluate
import numpy as np
from sklearn.metrics import (
accuracy_score,
confusion_matrix,
precision_score,
recall_score,
)
from transformers import (
Trainer,
TrainingArguments,
)
from niacin.text import en
from niacin.augment import randaugment
# Split dataset
splits = ["train_coling2022", "test_coling2022"]
dataset = "cardiffnlp/tweet_topic_single"
train_all = datasets.load_dataset(dataset, split=splits[0])
test = datasets.load_dataset(dataset, split=splits[1])
# Apply data augmentation
augmentor = randaugment.RandAugment(
[
en.add_synonyms,
en.add_hyponyms,
en.add_misspelling,
en.swap_words,
en.add_contractions,
# en.add_whitespace,
],
n=3,
m=10,
shuffle=False,
)
augmented_train_data = {"text": [], "label": [], "date": [], "id": [], "label_name": []}
new_text = ""
for i, (text, label, date, id, label_name) in enumerate(
zip(
train_all["text"],
train_all["label"],
train_all["date"],
train_all["id"],
train_all["label_name"],
)
):
# print(text)
for tx in augmentor:
new_text = tx(text)
# print("here")
# Sometimes the augmentation doesn't work, so we need to check if the text has changed
if text != new_text:
# Quick and sloppy
augmented_train_data["text"].append(new_text)
augmented_train_data["label"].append(label)
augmented_train_data["date"].append(date)
augmented_train_data["id"].append(id)
augmented_train_data["label_name"].append(label_name)
# print(augmented_train_data[count]["text"], "\n")
# count += 1
# Cast augmented data to datasets object
augmented_dataset = datasets.Dataset.from_dict(
{
"text": augmented_train_data["text"],
"date": augmented_train_data["date"],
"label": augmented_train_data["label"],
"label_name": augmented_train_data["label_name"],
"id": augmented_train_data["id"],
}
)
augmented_dataset = augmented_dataset.cast(train_all.features)
full_train = datasets.concatenate_datasets([train_all, augmented_dataset])
print(full_train)
# Split concatenated dataset
train_val = full_train.train_test_split(test_size=0.2, shuffle=True)
train = train_val["train"]
val = train_val["test"]
# Initializing a model (with random weights) from the configuration
from transformers import RobertaTokenizer, RobertaForSequenceClassification
# Load the pre-trained BERT model and tokenizer
tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
model = RobertaForSequenceClassification.from_pretrained("roberta-base", num_labels=6)
# for param in model.base_model.parameters():
# param.requires_grad = False
# Tokenising function to be mapped
def tokenize_function(examples):
return tokenizer(examples["text"], padding="max_length", truncation=True)
train_token = train.map(tokenize_function, batched=True)
val_token = val.map(tokenize_function, batched=True)
test_token = test.map(tokenize_function, batched=True)
training_args = TrainingArguments(output_dir=os.getcwd())
metric = evaluate.load("accuracy")
def compute_metrics(eval_pred):
logits, labels = eval_pred
predictions = np.argmax(logits, axis=-1)
return metric.compute(predictions=predictions, references=labels)
training_args = TrainingArguments(
output_dir=os.getcwd(),
evaluation_strategy="epoch",
learning_rate=1e-4,
per_device_eval_batch_size=8,
per_device_train_batch_size=8,
num_train_epochs=10,
weight_decay=0.01,
# load_best_model_at_end=True,
metric_for_best_model="accuracy",
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_token,
eval_dataset=val_token,
compute_metrics=compute_metrics,
)
# Train and evaluatemodel
trainer.train()
# Evaluate on the test set
predictions = trainer.predict(test_token)
# Get predicted labels from model
pred_labels = np.argmax(predictions.predictions, axis=-1)
# Get true labels from test set
true_labels = test["label"]
# Compute evaluation metrics
accuracy = accuracy_score(true_labels, pred_labels)
precision = precision_score(true_labels, pred_labels, average="weighted")
recall = recall_score(true_labels, pred_labels, average="weighted")
conf_matrix = confusion_matrix(true_labels, pred_labels)
# Print evaluation metrics
print(f"Accuracy: {accuracy}")
print(f"Precision: {precision}")
print(f"Recall: {recall}")
print(f"Confusion matrix:\n{conf_matrix}")