init
Browse files- training_scripts/finetune_t5.py +330 -0
training_scripts/finetune_t5.py
ADDED
@@ -0,0 +1,330 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" Fine-tune T5 on topic classification (multi-label multi-class classification)
|
2 |
+
```
|
3 |
+
python finetune_t5.py --dataset-name ja --model-alias mt5-small-tweet-topic-ja --model-organization cardiffnlp
|
4 |
+
```
|
5 |
+
"""
|
6 |
+
import json
|
7 |
+
import logging
|
8 |
+
import os
|
9 |
+
import multiprocessing
|
10 |
+
import argparse
|
11 |
+
import gc
|
12 |
+
from typing import List, Set
|
13 |
+
from shutil import copyfile
|
14 |
+
from statistics import mean
|
15 |
+
|
16 |
+
import torch
|
17 |
+
import transformers
|
18 |
+
from datasets import load_dataset
|
19 |
+
from transformers import Seq2SeqTrainer, Seq2SeqTrainingArguments, pipeline
|
20 |
+
from ray import tune, init
|
21 |
+
from huggingface_hub import Repository
|
22 |
+
|
23 |
+
|
24 |
+
os.environ['TOKENIZERS_PARALLELISM'] = 'false' # turn-off the warning message
|
25 |
+
os.environ['WANDB_DISABLED'] = 'true' # disable wandb
|
26 |
+
|
27 |
+
|
28 |
+
def load_model(
|
29 |
+
model_name: str,
|
30 |
+
use_auth_token: bool = False,
|
31 |
+
low_cpu_mem_usage: bool = False) -> transformers.PreTrainedModel:
|
32 |
+
"""Load language model from huggingface model hub."""
|
33 |
+
# config & tokenizer
|
34 |
+
config = transformers.AutoConfig.from_pretrained(model_name, use_auth_token=use_auth_token)
|
35 |
+
if config.model_type == 't5': # T5 model requires T5ForConditionalGeneration class
|
36 |
+
model_class = transformers.T5ForConditionalGeneration.from_pretrained
|
37 |
+
elif config.model_type == 'mt5':
|
38 |
+
model_class = transformers.MT5ForConditionalGeneration.from_pretrained
|
39 |
+
elif config.model_type == 'bart':
|
40 |
+
model_class = transformers.BartForConditionalGeneration.from_pretrained
|
41 |
+
elif config.model_type == 'mbart':
|
42 |
+
model_class = transformers.MBartForConditionalGeneration.from_pretrained
|
43 |
+
else:
|
44 |
+
raise ValueError(f'unsupported model type: {config.model_type}')
|
45 |
+
param = {'config': config, 'use_auth_token': use_auth_token, 'low_cpu_mem_usage': low_cpu_mem_usage}
|
46 |
+
model = model_class(model_name, **param)
|
47 |
+
return model
|
48 |
+
|
49 |
+
|
50 |
+
def get_f1_score(references: List[Set[str]], predictions: List[Set[str]]):
|
51 |
+
scores = []
|
52 |
+
for g, r in zip(references, predictions):
|
53 |
+
tp = len(set(g).intersection(set(r)))
|
54 |
+
fp = len([_g for _g in g if _g not in r])
|
55 |
+
fn = len([_r for _r in r if _r not in g])
|
56 |
+
if tp == 0:
|
57 |
+
f1 = 0
|
58 |
+
else:
|
59 |
+
f1 = 2 * tp / (2 * tp + fp + fn)
|
60 |
+
scores.append(f1)
|
61 |
+
return {'f1': mean(scores)}
|
62 |
+
|
63 |
+
def train(
|
64 |
+
model_name: str,
|
65 |
+
model_low_cpu_mem_usage: bool,
|
66 |
+
dataset: str,
|
67 |
+
dataset_name: str,
|
68 |
+
dataset_column_label: str,
|
69 |
+
dataset_column_text: str,
|
70 |
+
dataset_split_train: str,
|
71 |
+
dataset_split_validation: str,
|
72 |
+
dataset_split_test: str,
|
73 |
+
search_range_lr: List,
|
74 |
+
search_range_epoch: List,
|
75 |
+
search_list_batch: List,
|
76 |
+
down_sample_train: int,
|
77 |
+
down_sample_validation: int,
|
78 |
+
random_seed: int,
|
79 |
+
use_auth_token: bool,
|
80 |
+
n_trials: int,
|
81 |
+
eval_step: int,
|
82 |
+
parallel_cpu: bool,
|
83 |
+
output_dir: str,
|
84 |
+
ray_result_dir: str,
|
85 |
+
model_alias: str,
|
86 |
+
model_organization: str,
|
87 |
+
eval_batch_size: int = 16):
|
88 |
+
"""Fine-tune seq2seq model."""
|
89 |
+
logging.info(
|
90 |
+
f'[CONFIG]\n\t *LM: {model_name}, \n\t *Data: {dataset} ({dataset_name}), \n\t *Num of Trial: {n_trials}'
|
91 |
+
)
|
92 |
+
# set up the output directory
|
93 |
+
if output_dir is None:
|
94 |
+
output_dir = f'ckpt/{os.path.basename(model_name)}.{os.path.basename(dataset)}.{dataset_name}'
|
95 |
+
ray_result_dir = ray_result_dir
|
96 |
+
if ray_result_dir is None:
|
97 |
+
ray_result_dir = f'ray/{os.path.basename(model_name)}.{os.path.basename(dataset)}.{dataset_name}'
|
98 |
+
|
99 |
+
# define search space
|
100 |
+
search_range_lr = [1e-6, 1e-4] if search_range_lr is None else search_range_lr
|
101 |
+
assert len(search_range_lr) == 2, f'`search_range_lr` should contain [min_lr, max_lr]: {search_range_lr}'
|
102 |
+
search_range_epoch = [2, 6] if search_range_epoch is None else search_range_epoch
|
103 |
+
assert len(search_range_epoch) == 2, f'`search_range_epoch` should contain [min_epoch, max_epoch]: {search_range_epoch}'
|
104 |
+
search_list_batch = [64, 128] if search_list_batch is None else search_list_batch
|
105 |
+
search_space = {
|
106 |
+
'learning_rate': tune.loguniform(search_range_lr[0], search_range_lr[1]),
|
107 |
+
'num_train_epochs': tune.choice(list(range(search_range_epoch[0], search_range_epoch[1]))),
|
108 |
+
'per_device_train_batch_size': tune.choice(search_list_batch)
|
109 |
+
}
|
110 |
+
resources_per_trial = {'cpu': multiprocessing.cpu_count() if parallel_cpu else 1, 'gpu': torch.cuda.device_count()}
|
111 |
+
init(ignore_reinit_error=True, num_cpus=resources_per_trial['cpu'])
|
112 |
+
logging.info(f'[RESOURCE]\n{json.dumps(resources_per_trial, indent=4)}')
|
113 |
+
|
114 |
+
# dataset process
|
115 |
+
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name, use_auth_token=use_auth_token)
|
116 |
+
dataset_split = {
|
117 |
+
'train': [dataset_split_train, down_sample_train],
|
118 |
+
'validation': [dataset_split_validation, down_sample_validation]
|
119 |
+
}
|
120 |
+
dataset_instance = load_dataset(dataset, dataset_name, use_auth_token=use_auth_token)
|
121 |
+
tokenized_dataset = {}
|
122 |
+
for s, (s_dataset, down_sample) in dataset_split.items():
|
123 |
+
tokenized_dataset[s] = []
|
124 |
+
dataset_tmp = dataset_instance[s_dataset]
|
125 |
+
dataset_tmp.shuffle(random_seed)
|
126 |
+
for i in dataset_tmp:
|
127 |
+
model_inputs = tokenizer(i[dataset_column_text], truncation=True)
|
128 |
+
model_inputs['labels'] = tokenizer(text_target=i[dataset_column_label], truncation=True)['input_ids']
|
129 |
+
tokenized_dataset[s].append(model_inputs)
|
130 |
+
|
131 |
+
if down_sample is not None and len(dataset_tmp) > down_sample:
|
132 |
+
tokenized_dataset[f'{s}_ds'] = []
|
133 |
+
dataset_tmp = dataset_tmp.select(list(range(down_sample)))
|
134 |
+
for i in dataset_tmp:
|
135 |
+
model_inputs = tokenizer(i[dataset_column_text], truncation=True)
|
136 |
+
model_inputs['labels'] = tokenizer(text_target=i[dataset_column_label], truncation=True)['input_ids']
|
137 |
+
tokenized_dataset[f'{s}_ds'].append(model_inputs)
|
138 |
+
else:
|
139 |
+
tokenized_dataset[f'{s}_ds'] = tokenized_dataset[s]
|
140 |
+
|
141 |
+
def compute_metric(eval_pred): # for parameter search
|
142 |
+
|
143 |
+
def decode_tokens(token_ids) -> List[Set[str]]:
|
144 |
+
return [
|
145 |
+
set(tokenizer.decode(list(filter(lambda x: x != -100, r)), skip_special_tokens=True).split(',')) for r
|
146 |
+
in token_ids
|
147 |
+
]
|
148 |
+
|
149 |
+
predictions, reference_token_ids = eval_pred
|
150 |
+
# format reference
|
151 |
+
references_decode = decode_tokens(reference_token_ids)
|
152 |
+
# format prediction
|
153 |
+
logit, loss = predictions
|
154 |
+
generation_token_id = logit.argmax(-1)
|
155 |
+
generation_token_id[logit.min(-1) == -100] = -100
|
156 |
+
generation_decode = decode_tokens(generation_token_id)
|
157 |
+
return get_f1_score(references_decode, generation_decode)
|
158 |
+
|
159 |
+
if not os.path.exists(f'{output_dir}/model/pytorch_model.bin'):
|
160 |
+
trainer = Seq2SeqTrainer(
|
161 |
+
# model=model,
|
162 |
+
args=Seq2SeqTrainingArguments(
|
163 |
+
output_dir=f'{output_dir}/runs',
|
164 |
+
evaluation_strategy='steps',
|
165 |
+
eval_steps=eval_step,
|
166 |
+
seed=random_seed
|
167 |
+
),
|
168 |
+
data_collator=transformers.DataCollatorForSeq2Seq(tokenizer, model=load_model(
|
169 |
+
model_name=model_name,
|
170 |
+
use_auth_token=use_auth_token,
|
171 |
+
low_cpu_mem_usage=model_low_cpu_mem_usage)),
|
172 |
+
train_dataset=tokenized_dataset['train_ds'],
|
173 |
+
eval_dataset=tokenized_dataset['validation_ds'],
|
174 |
+
compute_metrics=compute_metric,
|
175 |
+
model_init=lambda x: load_model(
|
176 |
+
model_name=model_name,
|
177 |
+
use_auth_token=use_auth_token,
|
178 |
+
low_cpu_mem_usage=model_low_cpu_mem_usage)
|
179 |
+
)
|
180 |
+
os.makedirs(f'{output_dir}/model', exist_ok=True)
|
181 |
+
if not os.path.exists(f'{output_dir}/model/hyperparameters.json'):
|
182 |
+
# grid search
|
183 |
+
best_run = trainer.hyperparameter_search(
|
184 |
+
hp_space=lambda x: search_space,
|
185 |
+
local_dir=ray_result_dir,
|
186 |
+
direction='maximize',
|
187 |
+
backend='ray',
|
188 |
+
n_trials=n_trials,
|
189 |
+
resources_per_trial=resources_per_trial
|
190 |
+
)
|
191 |
+
with open(f'{output_dir}/model/hyperparameters.json', 'w') as f:
|
192 |
+
json.dump(best_run.hyperparameters, f)
|
193 |
+
else:
|
194 |
+
logging.info('skip hyperparameter search (already done)')
|
195 |
+
|
196 |
+
# fine-tuning with the best config
|
197 |
+
logging.info(f'fine-tuning with the best config')
|
198 |
+
with open(f'{output_dir}/model/hyperparameters.json') as f:
|
199 |
+
best_hyperparameters = json.load(f)
|
200 |
+
for n, v in best_hyperparameters.items():
|
201 |
+
setattr(trainer.args, n, v)
|
202 |
+
setattr(trainer, 'train_dataset', tokenized_dataset['train'])
|
203 |
+
setattr(trainer.args, 'evaluation_strategy', 'no')
|
204 |
+
trainer.train()
|
205 |
+
trainer.save_model(f'{output_dir}/model')
|
206 |
+
tokenizer.save_pretrained(f'{output_dir}/model')
|
207 |
+
logging.info(f'model saved at {output_dir}/model')
|
208 |
+
del trainer
|
209 |
+
gc.collect()
|
210 |
+
torch.cuda.empty_cache()
|
211 |
+
else:
|
212 |
+
logging.info('skip hyperparameter search & model training (already done)')
|
213 |
+
|
214 |
+
# get metric on the test set
|
215 |
+
if dataset_split_test is not None and not os.path.exists(f'{output_dir}/model/evaluation_metrics.json'):
|
216 |
+
logging.info('run evaluation on test set')
|
217 |
+
if not os.path.exists(f'{output_dir}/model/prediction_test.txt'):
|
218 |
+
pipe = pipeline(
|
219 |
+
'text2text-generation',
|
220 |
+
model=f'{output_dir}/model',
|
221 |
+
device='cuda:0' if resources_per_trial['gpu'] > 0 else 'cpu'
|
222 |
+
)
|
223 |
+
input_data = [i[dataset_column_text] for i in dataset_instance[dataset_split_test]]
|
224 |
+
output = pipe(input_data, batch_size=eval_batch_size)
|
225 |
+
output = [i['generated_text'] for i in output]
|
226 |
+
with open(f'{output_dir}/model/prediction_test.txt', 'w') as f:
|
227 |
+
f.write('\n'.join(output))
|
228 |
+
with open(f'{output_dir}/model/prediction_test.txt') as f:
|
229 |
+
output = [set(i.split(',')) for i in f.read().split('\n')]
|
230 |
+
dataset_tmp = dataset_instance[dataset_split_test]
|
231 |
+
label_list = dataset_tmp[dataset_column_label]
|
232 |
+
_references = [
|
233 |
+
set([_l for __i, _l in zip(_i[dataset_column_label], label_list) if __i == 1]) for _i in dataset_tmp
|
234 |
+
]
|
235 |
+
eval_metric = get_f1_score(_references, output)
|
236 |
+
logging.info(json.dumps(eval_metric, indent=4))
|
237 |
+
with open(f'{output_dir}/model/evaluation_metrics.json', 'w') as f:
|
238 |
+
json.dump(eval_metric, f)
|
239 |
+
|
240 |
+
if model_alias is not None:
|
241 |
+
assert model_organization is not None, 'model_organization must be specified when model_alias is specified'
|
242 |
+
logging.info('uploading to huggingface')
|
243 |
+
args = {'use_auth_token': use_auth_token, 'organization': model_organization}
|
244 |
+
model = load_model(model_name=f'{output_dir}/model')
|
245 |
+
model.push_to_hub(model_alias, **args)
|
246 |
+
tokenizer.push_to_hub(model_alias, **args)
|
247 |
+
repo = Repository(model_alias, f'{model_organization}/{model_alias}')
|
248 |
+
copyfile(f'{output_dir}/model/hyperparameters.json', f'{model_alias}/hyperparameters.json')
|
249 |
+
if os.path.exists(f'{output_dir}/model/prediction_test.txt'):
|
250 |
+
copyfile(f'{output_dir}/model/prediction_test.txt', f'{model_alias}/prediction_test.txt')
|
251 |
+
if os.path.exists(f'{output_dir}/model/evaluation_metrics.json'):
|
252 |
+
copyfile(f'{output_dir}/model/evaluation_metrics.json', f'{model_alias}/evaluation_metrics.json')
|
253 |
+
sample = [i[dataset_column_text] for i in dataset_instance[dataset_split_train]]
|
254 |
+
sample = [i for i in sample if ''' not in i and ''' not in i][:3]
|
255 |
+
widget = '\n'.join([f"- text: '{t}'\n example_title: example {_n + 1}" for _n, t in enumerate(sample)])
|
256 |
+
with open(f'{model_alias}/README.md', 'w') as f:
|
257 |
+
f.write(f"""
|
258 |
+
---
|
259 |
+
widget:
|
260 |
+
{widget}
|
261 |
+
---
|
262 |
+
|
263 |
+
# {model_organization}/{model_alias}
|
264 |
+
|
265 |
+
This is [{model_name}](https://huggingface.co/{model_name}) fine-tuned on [{dataset} ({dataset_name})](https://huggingface.co/datasets/{dataset}).
|
266 |
+
|
267 |
+
### Usage
|
268 |
+
|
269 |
+
```python
|
270 |
+
from transformers import pipeline
|
271 |
+
|
272 |
+
pipe = pipeline('text2text-generation', model='{model_organization}/{model_alias}')
|
273 |
+
output = pipe('{sample[0]}')
|
274 |
+
```
|
275 |
+
""")
|
276 |
+
repo.push_to_hub()
|
277 |
+
|
278 |
+
|
279 |
+
if __name__ == '__main__':
|
280 |
+
# arguments
|
281 |
+
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')
|
282 |
+
parser = argparse.ArgumentParser(description='Seq2Seq LM Fine-tuning on topic classification.')
|
283 |
+
parser.add_argument('-m', '--model-name', default='google/mt5-small', type=str)
|
284 |
+
parser.add_argument('--low-cpu-mem-usage', action='store_true')
|
285 |
+
parser.add_argument('-d', '--dataset', default='cardiffnlp/tweet_topic_multilingual', type=str)
|
286 |
+
parser.add_argument('--dataset-name', default='ja', type=str)
|
287 |
+
parser.add_argument('--dataset-column-label', default='label_name_flatten', type=str)
|
288 |
+
parser.add_argument('--dataset-column-text', default='text', type=str)
|
289 |
+
parser.add_argument('--dataset-split-train', default='train', type=str)
|
290 |
+
parser.add_argument('--dataset-split-validation', default='validation', type=str)
|
291 |
+
parser.add_argument('--dataset-split-test', default='test', type=str)
|
292 |
+
parser.add_argument('--search-range-lr', nargs='+', default=None, type=float)
|
293 |
+
parser.add_argument('--search-range-epoch', nargs='+', default=None, type=int)
|
294 |
+
parser.add_argument('--search-list-batch', nargs='+', default=None, type=int)
|
295 |
+
parser.add_argument('--down-sample-train', default=None, type=int)
|
296 |
+
parser.add_argument('--down-sample-validation', default=2000, type=int)
|
297 |
+
parser.add_argument('--random-seed', default=42, type=int)
|
298 |
+
parser.add_argument('--use-auth-token', action='store_true')
|
299 |
+
parser.add_argument('--n-trials', default=20, type=int)
|
300 |
+
parser.add_argument('--eval-step', default=100, type=int)
|
301 |
+
parser.add_argument('--parallel-cpu', action='store_true')
|
302 |
+
parser.add_argument('--output-dir', default=None, type=str)
|
303 |
+
parser.add_argument('--ray-result-dir', default=None, type=str)
|
304 |
+
parser.add_argument('--model-alias', default=None, type=str)
|
305 |
+
parser.add_argument('--model-organization', default=None, type=str)
|
306 |
+
opt = parser.parse_args()
|
307 |
+
|
308 |
+
train(model_name=opt.model_name,
|
309 |
+
model_low_cpu_mem_usage=opt.low_cpu_mem_usage,
|
310 |
+
dataset=opt.dataset,
|
311 |
+
dataset_name=opt.dataset_name,
|
312 |
+
dataset_column_label=opt.dataset_column_label,
|
313 |
+
dataset_column_text=opt.dataset_column_text,
|
314 |
+
dataset_split_train=opt.dataset_split_train,
|
315 |
+
dataset_split_validation=opt.dataset_split_validation,
|
316 |
+
dataset_split_test=opt.dataset_split_test,
|
317 |
+
search_range_lr=opt.search_range_lr,
|
318 |
+
search_range_epoch=opt.search_range_epoch,
|
319 |
+
search_list_batch=opt.search_list_batch,
|
320 |
+
down_sample_train=opt.down_sample_train,
|
321 |
+
down_sample_validation=opt.down_sample_validation,
|
322 |
+
random_seed=opt.random_seed,
|
323 |
+
use_auth_token=opt.use_auth_token,
|
324 |
+
n_trials=opt.n_trials,
|
325 |
+
eval_step=opt.eval_step,
|
326 |
+
parallel_cpu=opt.parallel_cpu,
|
327 |
+
output_dir=opt.output_dir,
|
328 |
+
ray_result_dir=opt.ray_result_dir,
|
329 |
+
model_alias=opt.model_alias,
|
330 |
+
model_organization=opt.model_organization)
|