init
Browse files
training_scripts/finetune_t5.py
CHANGED
@@ -136,9 +136,9 @@ def train(
|
|
136 |
return get_f1_score(references_decode, generation_decode)
|
137 |
|
138 |
if not skip_train:
|
139 |
-
lr = [1e-6, 1e-4] if lr is None else lr
|
140 |
batch = [64] if not batch else batch
|
141 |
-
epoch = [
|
142 |
eval_batch_size = min(batch) if not eval_batch_size else eval_batch_size
|
143 |
for n, (lr_tmp, batch_tmp, epoch_tmp) in enumerate(product(lr, batch, epoch)):
|
144 |
logging.info(f"[TRAIN {n}/{len(lr) * len(batch) * len(epoch)}] lr: {lr_tmp}, batch: {batch_tmp}")
|
@@ -185,6 +185,7 @@ def train(
|
|
185 |
for eval_file in glob(f"{output_dir}/model_*/eval_results.json"):
|
186 |
with open(eval_file) as f:
|
187 |
model_score[os.path.dirname(eval_file)] = json.load(f)['eval_f1']
|
|
|
188 |
best_model = max(model_score, key=model_score.get)
|
189 |
copy_tree(best_model, f'{output_dir}/best_model')
|
190 |
else:
|
|
|
136 |
return get_f1_score(references_decode, generation_decode)
|
137 |
|
138 |
if not skip_train:
|
139 |
+
lr = [1e-6, 1e-5, 1e-4] if lr is None else lr
|
140 |
batch = [64] if not batch else batch
|
141 |
+
epoch = [3, 5] if not epoch else epoch
|
142 |
eval_batch_size = min(batch) if not eval_batch_size else eval_batch_size
|
143 |
for n, (lr_tmp, batch_tmp, epoch_tmp) in enumerate(product(lr, batch, epoch)):
|
144 |
logging.info(f"[TRAIN {n}/{len(lr) * len(batch) * len(epoch)}] lr: {lr_tmp}, batch: {batch_tmp}")
|
|
|
185 |
for eval_file in glob(f"{output_dir}/model_*/eval_results.json"):
|
186 |
with open(eval_file) as f:
|
187 |
model_score[os.path.dirname(eval_file)] = json.load(f)['eval_f1']
|
188 |
+
logging.info(f"- Search Result\n{json.dumps(model_score, indent=4)}")
|
189 |
best_model = max(model_score, key=model_score.get)
|
190 |
copy_tree(best_model, f'{output_dir}/best_model')
|
191 |
else:
|