run regressor
Browse files- train_regressor_bert.py +118 -0
train_regressor_bert.py
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import (
|
2 |
+
AutoTokenizer,
|
3 |
+
DataCollatorWithPadding,
|
4 |
+
TrainingArguments,
|
5 |
+
Trainer,
|
6 |
+
AutoModelForSequenceClassification,
|
7 |
+
)
|
8 |
+
from datasets import load_dataset, ClassLabel
|
9 |
+
import numpy as np
|
10 |
+
import evaluate
|
11 |
+
import argparse
|
12 |
+
import os
|
13 |
+
from sklearn.metrics import classification_report, confusion_matrix
|
14 |
+
|
15 |
+
|
16 |
+
def compute_metrics(eval_pred):
|
17 |
+
precision_metric = evaluate.load("precision")
|
18 |
+
recall_metric = evaluate.load("recall")
|
19 |
+
f1_metric = evaluate.load("f1")
|
20 |
+
accuracy_metric = evaluate.load("accuracy")
|
21 |
+
|
22 |
+
logits, labels = eval_pred
|
23 |
+
preds = np.round(logits.squeeze()).clip(0, 5).astype(int)
|
24 |
+
labels = np.round(labels.squeeze()).astype(int)
|
25 |
+
precision = precision_metric.compute(
|
26 |
+
predictions=preds, references=labels, average="macro"
|
27 |
+
)["precision"]
|
28 |
+
recall = recall_metric.compute(
|
29 |
+
predictions=preds, references=labels, average="macro"
|
30 |
+
)["recall"]
|
31 |
+
f1 = f1_metric.compute(predictions=preds, references=labels, average="macro")["f1"]
|
32 |
+
accuracy = accuracy_metric.compute(predictions=preds, references=labels)["accuracy"]
|
33 |
+
|
34 |
+
report = classification_report(labels, preds)
|
35 |
+
cm = confusion_matrix(labels, preds)
|
36 |
+
print("Validation Report:\n" + report)
|
37 |
+
print("Confusion Matrix:\n" + str(cm))
|
38 |
+
|
39 |
+
return {
|
40 |
+
"precision": precision,
|
41 |
+
"recall": recall,
|
42 |
+
"f1_macro": f1,
|
43 |
+
"accuracy": accuracy,
|
44 |
+
}
|
45 |
+
|
46 |
+
|
47 |
+
def main(args):
|
48 |
+
dataset = load_dataset(
|
49 |
+
args.dataset_name, split="train", cache_dir="/home/perk/.cache/", num_proc=8
|
50 |
+
)
|
51 |
+
dataset = dataset.map(
|
52 |
+
lambda x: {args.target_column: np.clip(int(x[args.target_column]), 0, 5)}, num_proc=8
|
53 |
+
)
|
54 |
+
|
55 |
+
dataset = dataset.cast_column(
|
56 |
+
args.target_column, ClassLabel(names=[str(i) for i in range(6)])
|
57 |
+
)
|
58 |
+
dataset = dataset.train_test_split(
|
59 |
+
train_size=0.9, seed=42, stratify_by_column=args.target_column
|
60 |
+
)
|
61 |
+
|
62 |
+
tokenizer = AutoTokenizer.from_pretrained(args.base_model_name)
|
63 |
+
|
64 |
+
def preprocess(examples):
|
65 |
+
batch = tokenizer(examples["text"], truncation=True, max_length=512)
|
66 |
+
batch["labels"] = np.float32(examples[args.target_column])
|
67 |
+
return batch
|
68 |
+
|
69 |
+
dataset = dataset.map(preprocess, batched=True)
|
70 |
+
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
|
71 |
+
model = AutoModelForSequenceClassification.from_pretrained(args.base_model_name, num_labels=1, classifier_dropout=0.0, hidden_dropout_prob=0.0)
|
72 |
+
|
73 |
+
for param in model.bert.embeddings.parameters():
|
74 |
+
param.requires_grad = False
|
75 |
+
for param in model.bert.encoder.parameters():
|
76 |
+
param.requires_grad = False
|
77 |
+
|
78 |
+
training_args = TrainingArguments(
|
79 |
+
output_dir=args.checkpoint_dir,
|
80 |
+
evaluation_strategy="steps",
|
81 |
+
save_strategy="steps",
|
82 |
+
eval_steps=1000,
|
83 |
+
save_steps=1000,
|
84 |
+
logging_steps=100,
|
85 |
+
learning_rate=3e-4,
|
86 |
+
num_train_epochs=20,
|
87 |
+
seed=0,
|
88 |
+
per_device_train_batch_size=32,
|
89 |
+
per_device_eval_batch_size=32,
|
90 |
+
load_best_model_at_end=True,
|
91 |
+
metric_for_best_model="f1_macro",
|
92 |
+
greater_is_better=True,
|
93 |
+
bf16=True,
|
94 |
+
)
|
95 |
+
|
96 |
+
trainer = Trainer(
|
97 |
+
model=model,
|
98 |
+
args=training_args,
|
99 |
+
train_dataset=dataset["train"],
|
100 |
+
eval_dataset=dataset["test"],
|
101 |
+
tokenizer=tokenizer,
|
102 |
+
data_collator=data_collator,
|
103 |
+
compute_metrics=compute_metrics,
|
104 |
+
)
|
105 |
+
|
106 |
+
trainer.train()
|
107 |
+
trainer.save_model(os.path.join(args.checkpoint_dir, "final"))
|
108 |
+
|
109 |
+
|
110 |
+
if __name__ == "__main__":
|
111 |
+
parser = argparse.ArgumentParser()
|
112 |
+
parser.add_argument("--base_model_name", type=str, default="Snowflake/snowflake-arctic-embed-m")
|
113 |
+
parser.add_argument("--dataset_name", type=str, default="HuggingFaceFW/fineweb-edu-llama3-annotations")
|
114 |
+
parser.add_argument("--target_column", type=str, default="score")
|
115 |
+
parser.add_argument("--checkpoint_dir", type=str, default="/fsx/anton/cosmopedia/edu_score/bert_snowflake_regression")
|
116 |
+
args = parser.parse_args()
|
117 |
+
|
118 |
+
main(args)
|