asahi417 commited on
Commit
8ea930a
1 Parent(s): 9a436ff

Update lm_finetuning.py

Browse files
Files changed (1) hide show
  1. lm_finetuning.py +7 -4
lm_finetuning.py CHANGED
@@ -64,13 +64,11 @@ def get_metrics():
64
  def compute_metric_search(eval_pred):
65
  logits, labels = eval_pred
66
  predictions = np.array([[int(sigmoid(j) > 0.5) for j in i] for i in logits])
67
- print(labels.shape, logits.shape, predictions.shape)
68
  return metric_f1.compute(predictions=predictions, references=labels, average='micro')
69
 
70
  def compute_metric_all(eval_pred):
71
  logits, labels = eval_pred
72
  predictions = np.array([[int(sigmoid(j) > 0.5) for j in i] for i in logits])
73
- print(labels.shape, logits.shape, predictions.shape)
74
  return {
75
  'f1': metric_f1.compute(predictions=predictions, references=labels, average='micro')['f1'],
76
  'f1_macro': metric_f1.compute(predictions=predictions, references=labels, average='macro')['f1'],
@@ -167,7 +165,8 @@ def main():
167
  model = AutoModelForSequenceClassification.from_pretrained(
168
  best_model_path,
169
  num_labels=len(dataset[opt.split_train]['label'][0]),
170
- local_files_only=not network)
 
171
  trainer = Trainer(
172
  model=model,
173
  args=TrainingArguments(
@@ -179,7 +178,11 @@ def main():
179
  eval_dataset=tokenized_datasets[opt.split_test],
180
  compute_metrics=compute_metric_all,
181
  model_init=lambda x: AutoModelForSequenceClassification.from_pretrained(
182
- opt.model, return_dict=True, num_labels=len(dataset[opt.split_train]['label'][0]))
 
 
 
 
183
  )
184
  summary_file = pj(opt.output_dir, opt.summary_file)
185
  if not opt.skip_eval:
 
64
  def compute_metric_search(eval_pred):
65
  logits, labels = eval_pred
66
  predictions = np.array([[int(sigmoid(j) > 0.5) for j in i] for i in logits])
 
67
  return metric_f1.compute(predictions=predictions, references=labels, average='micro')
68
 
69
  def compute_metric_all(eval_pred):
70
  logits, labels = eval_pred
71
  predictions = np.array([[int(sigmoid(j) > 0.5) for j in i] for i in logits])
 
72
  return {
73
  'f1': metric_f1.compute(predictions=predictions, references=labels, average='micro')['f1'],
74
  'f1_macro': metric_f1.compute(predictions=predictions, references=labels, average='macro')['f1'],
 
165
  model = AutoModelForSequenceClassification.from_pretrained(
166
  best_model_path,
167
  num_labels=len(dataset[opt.split_train]['label'][0]),
168
+ local_files_only=not network,
169
+ problem_type="multi_label_classification")
170
  trainer = Trainer(
171
  model=model,
172
  args=TrainingArguments(
 
178
  eval_dataset=tokenized_datasets[opt.split_test],
179
  compute_metrics=compute_metric_all,
180
  model_init=lambda x: AutoModelForSequenceClassification.from_pretrained(
181
+ opt.model,
182
+ return_dict=True,
183
+ num_labels=len(dataset[opt.split_train]['label'][0]),
184
+ local_files_only=not network,
185
+ problem_type="multi_label_classification")
186
  )
187
  summary_file = pj(opt.output_dir, opt.summary_file)
188
  if not opt.skip_eval: