asahi417 commited on
Commit
d13fe3a
1 Parent(s): eb17e47
Files changed (1) hide show
  1. training_scripts/finetune_t5.py +10 -0
training_scripts/finetune_t5.py CHANGED
@@ -8,6 +8,7 @@ import logging
8
  import os
9
  import argparse
10
  import gc
 
11
  from typing import List, Set, Dict
12
  from shutil import copyfile
13
  from statistics import mean
@@ -15,6 +16,7 @@ from itertools import product
15
 
16
  import torch
17
  import transformers
 
18
  from datasets import load_dataset
19
  from transformers import Seq2SeqTrainer, Seq2SeqTrainingArguments, pipeline
20
  from huggingface_hub import Repository
@@ -176,6 +178,14 @@ def train(
176
  del model
177
  gc.collect()
178
  torch.cuda.empty_cache()
 
 
 
 
 
 
 
 
179
  else:
180
  logging.info('skip hyperparameter search & model training (already done)')
181
 
 
8
  import os
9
  import argparse
10
  import gc
11
+ from glob import glob
12
  from typing import List, Set, Dict
13
  from shutil import copyfile
14
  from statistics import mean
 
16
 
17
  import torch
18
  import transformers
19
+ import numba
20
  from datasets import load_dataset
21
  from transformers import Seq2SeqTrainer, Seq2SeqTrainingArguments, pipeline
22
  from huggingface_hub import Repository
 
178
  del model
179
  gc.collect()
180
  torch.cuda.empty_cache()
181
+ numba.cuda.get_current_device().reset()
182
+
183
+ model_score = {}
184
+ for eval_file in glob(f"{output_dir}/model_*/eval_result.json"):
185
+ with open(eval_file) as f:
186
+ model_score[os.path.dirname(eval_file)] = json.load(f)['eval_f1']
187
+ best_model = max(model_score, key=model_score.get)
188
+
189
  else:
190
  logging.info('skip hyperparameter search & model training (already done)')
191