init
Browse files- training_scripts/finetune_t5.py +12 -13
training_scripts/finetune_t5.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
""" Fine-tune T5 on topic classification (multi-label multi-class classification)
|
2 |
```
|
3 |
-
python finetune_t5.py --dataset-name ja --model-alias mt5-small-tweet-topic-ja --model-organization cardiffnlp
|
4 |
```
|
5 |
"""
|
6 |
import json
|
@@ -8,7 +8,7 @@ import logging
|
|
8 |
import os
|
9 |
import argparse
|
10 |
import gc
|
11 |
-
from typing import List, Set
|
12 |
from shutil import copyfile
|
13 |
from statistics import mean
|
14 |
from itertools import product
|
@@ -46,7 +46,7 @@ def load_model(
|
|
46 |
return model
|
47 |
|
48 |
|
49 |
-
def get_f1_score(references: List[Set[str]], predictions: List[Set[str]]):
|
50 |
scores = []
|
51 |
for g, r in zip(references, predictions):
|
52 |
tp = len(set(g).intersection(set(r)))
|
@@ -84,11 +84,10 @@ def train(
|
|
84 |
skip_test: bool = False,
|
85 |
skip_upload: bool = False,
|
86 |
eval_steps: float = 0.25,
|
87 |
-
eval_batch_size: int =
|
88 |
"""Fine-tune seq2seq model."""
|
89 |
logging.info(f'[CONFIG]\n\t *LM: {model_name}, \n\t *Data: {dataset} ({dataset_name})')
|
90 |
-
|
91 |
-
if output_dir is None:
|
92 |
output_dir = f'ckpt/{os.path.basename(model_name)}.{os.path.basename(dataset)}.{dataset_name}'
|
93 |
# dataset process
|
94 |
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name, use_auth_token=use_auth_token)
|
@@ -117,7 +116,7 @@ def train(
|
|
117 |
else:
|
118 |
tokenized_dataset[f'{s}_ds'] = tokenized_dataset[s]
|
119 |
|
120 |
-
def compute_metric(eval_pred): # for parameter search
|
121 |
|
122 |
def decode_tokens(token_ids) -> List[Set[str]]:
|
123 |
return [
|
@@ -137,10 +136,11 @@ def train(
|
|
137 |
|
138 |
if not skip_train:
|
139 |
lr = [1e-6, 1e-4] if lr is None else lr
|
140 |
-
batch = [64] if batch
|
141 |
-
epoch = [1, 3, 5] if epoch
|
|
|
142 |
for n, (lr_tmp, batch_tmp, epoch_tmp) in enumerate(product(lr, batch, epoch)):
|
143 |
-
logging.info(f"[TRAIN {n}/{len(lr) * len(batch)}] lr: {lr_tmp}, batch: {batch_tmp}")
|
144 |
model = load_model(
|
145 |
model_name=model_name, use_auth_token=use_auth_token, low_cpu_mem_usage=model_low_cpu_mem_usage
|
146 |
)
|
@@ -161,7 +161,6 @@ def train(
|
|
161 |
eval_dataset=tokenized_dataset['validation_ds'],
|
162 |
compute_metrics=compute_metric,
|
163 |
)
|
164 |
-
|
165 |
# train
|
166 |
result = trainer.train()
|
167 |
trainer.save_model() # Saves the tokenizer too for easy upload
|
@@ -169,13 +168,13 @@ def train(
|
|
169 |
trainer.log_metrics("train", metrics)
|
170 |
trainer.save_metrics("train", metrics)
|
171 |
trainer.save_state()
|
172 |
-
|
173 |
# evaluate
|
174 |
metrics = trainer.evaluate()
|
175 |
trainer.log_metrics("eval", metrics)
|
176 |
trainer.save_metrics("eval", metrics)
|
177 |
-
|
178 |
del trainer
|
|
|
179 |
gc.collect()
|
180 |
torch.cuda.empty_cache()
|
181 |
else:
|
|
|
1 |
""" Fine-tune T5 on topic classification (multi-label multi-class classification)
|
2 |
```
|
3 |
+
python finetune_t5.py --dataset-name ja --model-alias mt5-small-tweet-topic-ja --model-organization cardiffnlp --low-cpu-mem-usage
|
4 |
```
|
5 |
"""
|
6 |
import json
|
|
|
8 |
import os
|
9 |
import argparse
|
10 |
import gc
|
11 |
+
from typing import List, Set, Dict
|
12 |
from shutil import copyfile
|
13 |
from statistics import mean
|
14 |
from itertools import product
|
|
|
46 |
return model
|
47 |
|
48 |
|
49 |
+
def get_f1_score(references: List[Set[str]], predictions: List[Set[str]]) -> Dict[str, float]:
|
50 |
scores = []
|
51 |
for g, r in zip(references, predictions):
|
52 |
tp = len(set(g).intersection(set(r)))
|
|
|
84 |
skip_test: bool = False,
|
85 |
skip_upload: bool = False,
|
86 |
eval_steps: float = 0.25,
|
87 |
+
eval_batch_size: int = None):
|
88 |
"""Fine-tune seq2seq model."""
|
89 |
logging.info(f'[CONFIG]\n\t *LM: {model_name}, \n\t *Data: {dataset} ({dataset_name})')
|
90 |
+
if not output_dir:
|
|
|
91 |
output_dir = f'ckpt/{os.path.basename(model_name)}.{os.path.basename(dataset)}.{dataset_name}'
|
92 |
# dataset process
|
93 |
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name, use_auth_token=use_auth_token)
|
|
|
116 |
else:
|
117 |
tokenized_dataset[f'{s}_ds'] = tokenized_dataset[s]
|
118 |
|
119 |
+
def compute_metric(eval_pred) -> Dict[str, float]: # for parameter search
|
120 |
|
121 |
def decode_tokens(token_ids) -> List[Set[str]]:
|
122 |
return [
|
|
|
136 |
|
137 |
if not skip_train:
|
138 |
lr = [1e-6, 1e-4] if lr is None else lr
|
139 |
+
batch = [64] if not batch else batch
|
140 |
+
epoch = [1, 3, 5] if not epoch else epoch
|
141 |
+
eval_batch_size = min(batch) if not eval_batch_size else eval_batch_size
|
142 |
for n, (lr_tmp, batch_tmp, epoch_tmp) in enumerate(product(lr, batch, epoch)):
|
143 |
+
logging.info(f"[TRAIN {n}/{len(lr) * len(batch) * len(epoch)}] lr: {lr_tmp}, batch: {batch_tmp}")
|
144 |
model = load_model(
|
145 |
model_name=model_name, use_auth_token=use_auth_token, low_cpu_mem_usage=model_low_cpu_mem_usage
|
146 |
)
|
|
|
161 |
eval_dataset=tokenized_dataset['validation_ds'],
|
162 |
compute_metrics=compute_metric,
|
163 |
)
|
|
|
164 |
# train
|
165 |
result = trainer.train()
|
166 |
trainer.save_model() # Saves the tokenizer too for easy upload
|
|
|
168 |
trainer.log_metrics("train", metrics)
|
169 |
trainer.save_metrics("train", metrics)
|
170 |
trainer.save_state()
|
|
|
171 |
# evaluate
|
172 |
metrics = trainer.evaluate()
|
173 |
trainer.log_metrics("eval", metrics)
|
174 |
trainer.save_metrics("eval", metrics)
|
175 |
+
# clean up memory
|
176 |
del trainer
|
177 |
+
del model
|
178 |
gc.collect()
|
179 |
torch.cuda.empty_cache()
|
180 |
else:
|