asahi417 commited on
Commit
12106f5
1 Parent(s): 769ed64
training_scripts/finetune_t5.py CHANGED
@@ -13,10 +13,10 @@ from typing import List, Set, Dict
13
  from shutil import copyfile
14
  from statistics import mean
15
  from itertools import product
 
16
 
17
  import torch
18
  import transformers
19
- from numba import cuda
20
  from datasets import load_dataset
21
  from transformers import Seq2SeqTrainer, Seq2SeqTrainingArguments, pipeline
22
  from huggingface_hub import Repository
@@ -85,7 +85,6 @@ def train(
85
  skip_train: bool = False,
86
  skip_test: bool = False,
87
  skip_upload: bool = False,
88
- eval_steps: float = 0.25,
89
  eval_batch_size: int = None):
90
  """Fine-tune seq2seq model."""
91
  logging.info(f'[CONFIG]\n\t *LM: {model_name}, \n\t *Data: {dataset} ({dataset_name})')
@@ -144,7 +143,7 @@ def train(
144
  for n, (lr_tmp, batch_tmp, epoch_tmp) in enumerate(product(lr, batch, epoch)):
145
  logging.info(f"[TRAIN {n}/{len(lr) * len(batch) * len(epoch)}] lr: {lr_tmp}, batch: {batch_tmp}")
146
  output_dir_tmp = f"{output_dir}/model_lr_{lr_tmp}_batch_{batch_tmp}_epoch_{epoch_tmp}"
147
- if os.path.exists(output_dir_tmp):
148
  continue
149
  model = load_model(
150
  model_name=model_name, use_auth_token=use_auth_token, low_cpu_mem_usage=model_low_cpu_mem_usage
@@ -183,29 +182,29 @@ def train(
183
  # cuda.get_current_device().reset()
184
 
185
  model_score = {}
186
- for eval_file in glob(f"{output_dir}/model_*/eval_result.json"):
187
  with open(eval_file) as f:
188
  model_score[os.path.dirname(eval_file)] = json.load(f)['eval_f1']
189
  best_model = max(model_score, key=model_score.get)
190
-
191
  else:
192
  logging.info('skip hyperparameter search & model training (already done)')
193
 
194
  # get metric on the test set
195
  if not skip_test:
196
  logging.info('run evaluation on test set')
197
- if not os.path.exists(f'{output_dir}/model/prediction_test.txt'):
198
  pipe = pipeline(
199
  'text2text-generation',
200
- model=f'{output_dir}/model',
201
  device='cuda:0' if torch.cuda.is_available() else 'cpu',
202
  )
203
  input_data = [i[dataset_column_text] for i in dataset_instance[dataset_split_test]]
204
  output = pipe(input_data, batch_size=eval_batch_size)
205
  output = [i['generated_text'] for i in output]
206
- with open(f'{output_dir}/model/prediction_test.txt', 'w') as f:
207
  f.write('\n'.join(output))
208
- with open(f'{output_dir}/model/prediction_test.txt') as f:
209
  output = [set(i.split(',')) for i in f.read().split('\n')]
210
  dataset_tmp = dataset_instance[dataset_split_test]
211
  label_list = dataset_tmp[dataset_column_label]
@@ -214,7 +213,7 @@ def train(
214
  ]
215
  eval_metric = get_f1_score(_references, output)
216
  logging.info(json.dumps(eval_metric, indent=4))
217
- with open(f'{output_dir}/model/evaluation_metrics.json', 'w') as f:
218
  json.dump(eval_metric, f)
219
 
220
  if not skip_upload:
@@ -222,15 +221,15 @@ def train(
222
  'model_organization must be specified when model_alias is specified'
223
  logging.info('uploading to huggingface')
224
  args = {'use_auth_token': use_auth_token, 'organization': model_organization}
225
- model = load_model(model_name=f'{output_dir}/model')
226
  model.push_to_hub(model_alias, **args)
227
  tokenizer.push_to_hub(model_alias, **args)
228
  repo = Repository(model_alias, f'{model_organization}/{model_alias}')
229
- copyfile(f'{output_dir}/model/hyperparameters.json', f'{model_alias}/hyperparameters.json')
230
- if os.path.exists(f'{output_dir}/model/prediction_test.txt'):
231
- copyfile(f'{output_dir}/model/prediction_test.txt', f'{model_alias}/prediction_test.txt')
232
- if os.path.exists(f'{output_dir}/model/evaluation_metrics.json'):
233
- copyfile(f'{output_dir}/model/evaluation_metrics.json', f'{model_alias}/evaluation_metrics.json')
234
  sample = [i[dataset_column_text] for i in dataset_instance[dataset_split_train]]
235
  sample = [i for i in sample if ''' not in i and ''' not in i][:3]
236
  widget = '\n'.join([f"- text: '{t}'\n example_title: example {_n + 1}" for _n, t in enumerate(sample)])
@@ -303,7 +302,6 @@ if __name__ == '__main__':
303
  down_sample_validation=opt.down_sample_validation,
304
  random_seed=opt.random_seed,
305
  use_auth_token=opt.use_auth_token,
306
- eval_steps=opt.eval_steps,
307
  output_dir=opt.output_dir,
308
  model_alias=opt.model_alias,
309
  model_organization=opt.model_organization,
 
13
  from shutil import copyfile
14
  from statistics import mean
15
  from itertools import product
16
+ from distutils.dir_util import copy_tree
17
 
18
  import torch
19
  import transformers
 
20
  from datasets import load_dataset
21
  from transformers import Seq2SeqTrainer, Seq2SeqTrainingArguments, pipeline
22
  from huggingface_hub import Repository
 
85
  skip_train: bool = False,
86
  skip_test: bool = False,
87
  skip_upload: bool = False,
 
88
  eval_batch_size: int = None):
89
  """Fine-tune seq2seq model."""
90
  logging.info(f'[CONFIG]\n\t *LM: {model_name}, \n\t *Data: {dataset} ({dataset_name})')
 
143
  for n, (lr_tmp, batch_tmp, epoch_tmp) in enumerate(product(lr, batch, epoch)):
144
  logging.info(f"[TRAIN {n}/{len(lr) * len(batch) * len(epoch)}] lr: {lr_tmp}, batch: {batch_tmp}")
145
  output_dir_tmp = f"{output_dir}/model_lr_{lr_tmp}_batch_{batch_tmp}_epoch_{epoch_tmp}"
146
+ if os.path.exists(f"{output_dir_tmp}/eval_results.json"):
147
  continue
148
  model = load_model(
149
  model_name=model_name, use_auth_token=use_auth_token, low_cpu_mem_usage=model_low_cpu_mem_usage
 
182
  # cuda.get_current_device().reset()
183
 
184
  model_score = {}
185
+ for eval_file in glob(f"{output_dir}/model_*/eval_results.json"):
186
  with open(eval_file) as f:
187
  model_score[os.path.dirname(eval_file)] = json.load(f)['eval_f1']
188
  best_model = max(model_score, key=model_score.get)
189
+ copy_tree(best_model, f'{output_dir}/best_model')
190
  else:
191
  logging.info('skip hyperparameter search & model training (already done)')
192
 
193
  # get metric on the test set
194
  if not skip_test:
195
  logging.info('run evaluation on test set')
196
+ if not os.path.exists(f'{output_dir}/best_model/prediction_test.txt'):
197
  pipe = pipeline(
198
  'text2text-generation',
199
+ model=f'{output_dir}/best_model',
200
  device='cuda:0' if torch.cuda.is_available() else 'cpu',
201
  )
202
  input_data = [i[dataset_column_text] for i in dataset_instance[dataset_split_test]]
203
  output = pipe(input_data, batch_size=eval_batch_size)
204
  output = [i['generated_text'] for i in output]
205
+ with open(f'{output_dir}/best_model/prediction_test.txt', 'w') as f:
206
  f.write('\n'.join(output))
207
+ with open(f'{output_dir}/best_model/prediction_test.txt') as f:
208
  output = [set(i.split(',')) for i in f.read().split('\n')]
209
  dataset_tmp = dataset_instance[dataset_split_test]
210
  label_list = dataset_tmp[dataset_column_label]
 
213
  ]
214
  eval_metric = get_f1_score(_references, output)
215
  logging.info(json.dumps(eval_metric, indent=4))
216
+ with open(f'{output_dir}/best_model/evaluation_metrics.json', 'w') as f:
217
  json.dump(eval_metric, f)
218
 
219
  if not skip_upload:
 
221
  'model_organization must be specified when model_alias is specified'
222
  logging.info('uploading to huggingface')
223
  args = {'use_auth_token': use_auth_token, 'organization': model_organization}
224
+ model = load_model(model_name=f'{output_dir}/best_model')
225
  model.push_to_hub(model_alias, **args)
226
  tokenizer.push_to_hub(model_alias, **args)
227
  repo = Repository(model_alias, f'{model_organization}/{model_alias}')
228
+ copyfile(f'{output_dir}/best_model/hyperparameters.json', f'{model_alias}/hyperparameters.json')
229
+ if os.path.exists(f'{output_dir}/best_model/prediction_test.txt'):
230
+ copyfile(f'{output_dir}/best_model/prediction_test.txt', f'{model_alias}/prediction_test.txt')
231
+ if os.path.exists(f'{output_dir}/best_model/evaluation_metrics.json'):
232
+ copyfile(f'{output_dir}/best_model/evaluation_metrics.json', f'{model_alias}/evaluation_metrics.json')
233
  sample = [i[dataset_column_text] for i in dataset_instance[dataset_split_train]]
234
  sample = [i for i in sample if ''' not in i and ''' not in i][:3]
235
  widget = '\n'.join([f"- text: '{t}'\n example_title: example {_n + 1}" for _n, t in enumerate(sample)])
 
302
  down_sample_validation=opt.down_sample_validation,
303
  random_seed=opt.random_seed,
304
  use_auth_token=opt.use_auth_token,
 
305
  output_dir=opt.output_dir,
306
  model_alias=opt.model_alias,
307
  model_organization=opt.model_organization,
training_scripts/script.sh CHANGED
@@ -1,7 +1,6 @@
1
- python finetune_t5.py --dataset-name ja --model-alias mt5-small-tweet-topic-ja --model-organization cardiffnlp
2
- python finetune_t5.py --dataset-name gr --model-alias mt5-small-tweet-topic-gr --model-organization cardiffnlp
3
- python finetune_t5.py --dataset-name es --model-alias mt5-small-tweet-topic-es --model-organization cardiffnlp
4
- python finetune_t5.py --dataset-name en --model-alias mt5-small-tweet-topic-en --model-organization cardiffnlp
5
 
6
- python finetune_t5.py --dataset-name ja --skip-test --skip-upload
7
 
 
1
+ python finetune_t5.py --dataset-name ja --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-ja --model-organization cardiffnlp
2
+ python finetune_t5.py --dataset-name gr --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-gr --model-organization cardiffnlp
3
+ python finetune_t5.py --dataset-name es --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-es --model-organization cardiffnlp
4
+ python finetune_t5.py --dataset-name en --low-cpu-mem-usage --model-alias mt5-small-tweet-topic-en --model-organization cardiffnlp
5
 
 
6