asahi417 commited on
Commit
3abb414
1 Parent(s): 64a3d3d

Update lm_finetuning.py

Browse files
Files changed (1) hide show
  1. lm_finetuning.py +4 -4
lm_finetuning.py CHANGED
@@ -96,7 +96,7 @@ def main():
96
  # setup model
97
  tokenizer = AutoTokenizer.from_pretrained(opt.model, local_files_only=not network)
98
  model = AutoModelForSequenceClassification.from_pretrained(
99
- opt.model, num_labels=dataset[opt.split_train].features['label'].num_classes, local_files_only=not network)
100
  tokenized_datasets = dataset.map(
101
  lambda x: tokenizer(x["text"], padding="max_length", truncation=True, max_length=opt.seq_length),
102
  batched=True)
@@ -116,7 +116,7 @@ def main():
116
  train_dataset=tokenized_datasets[opt.split_train],
117
  eval_dataset=tokenized_datasets[opt.split_validation],
118
  compute_metrics=compute_metric_search,
119
- model_init=lambda x: AutoModelForSequenceClassification.from_pretrained(opt.model, num_labels=dataset[opt.train_split].features['label'].num_classes, local_files_only=not network, return_dict=True)
120
  )
121
  # parameter search
122
  if PARALLEL:
@@ -150,7 +150,7 @@ def main():
150
 
151
  # evaluation
152
  model = AutoModelForSequenceClassification.from_pretrained(
153
- opt.model, num_labels=dataset[opt.train_split].features['label'].num_classes, local_files_only=not network)
154
  trainer = Trainer(
155
  model=model,
156
  args=TrainingArguments(
@@ -162,7 +162,7 @@ def main():
162
  eval_dataset=tokenized_datasets[opt.split_test],
163
  compute_metrics=compute_metric_all,
164
  model_init=lambda x: AutoModelForSequenceClassification.from_pretrained(
165
- opt.model, num_labels=dataset[opt.train_split].features['label'].num_classes, local_files_only=not network, return_dict=True)
166
  )
167
  summary_file = pj(opt.output_dir, opt.summary_file)
168
  if not opt.skip_eval:
 
96
  # setup model
97
  tokenizer = AutoTokenizer.from_pretrained(opt.model, local_files_only=not network)
98
  model = AutoModelForSequenceClassification.from_pretrained(
99
+ opt.model, num_labels=6, local_files_only=not network)
100
  tokenized_datasets = dataset.map(
101
  lambda x: tokenizer(x["text"], padding="max_length", truncation=True, max_length=opt.seq_length),
102
  batched=True)
 
116
  train_dataset=tokenized_datasets[opt.split_train],
117
  eval_dataset=tokenized_datasets[opt.split_validation],
118
  compute_metrics=compute_metric_search,
119
+ model_init=lambda x: AutoModelForSequenceClassification.from_pretrained(opt.model, num_labels=6, local_files_only=not network, return_dict=True)
120
  )
121
  # parameter search
122
  if PARALLEL:
 
150
 
151
  # evaluation
152
  model = AutoModelForSequenceClassification.from_pretrained(
153
+ opt.model, num_labels=6, local_files_only=not network)
154
  trainer = Trainer(
155
  model=model,
156
  args=TrainingArguments(
 
162
  eval_dataset=tokenized_datasets[opt.split_test],
163
  compute_metrics=compute_metric_all,
164
  model_init=lambda x: AutoModelForSequenceClassification.from_pretrained(
165
+ opt.model, num_labels=6, local_files_only=not network, return_dict=True)
166
  )
167
  summary_file = pj(opt.output_dir, opt.summary_file)
168
  if not opt.skip_eval: