asahi417 commited on
Commit
60b610a
1 Parent(s): 19391a5

Update lm_finetuning.py

Browse files
Files changed (1) hide show
  1. lm_finetuning.py +10 -6
lm_finetuning.py CHANGED
@@ -2,8 +2,12 @@
2
  wandb offline
3
  export WANDB_DISABLED='true'
4
  export RAY_RESULTS='ray_results'
5
- python lm_finetuning.py -m "roberta-large" -c "ckpt/roberta_large" --push-to-hub --hf-organization "cardiffnlp" -a "roberta-large-tweet-topic-multi" --split-train "train_all" --split-valid "validation_2021" --split-test "test_2021"
6
- python lm_finetuning.py -m "roberta-base" -c "ckpt/roberta_base" --push-to-hub --hf-organization "cardiffnlp" -a "twitter-roberta-base-2019-90m-tweet-topic-multi"
 
 
 
 
7
  python lm_finetuning.py -m "cardiffnlp/twitter-roberta-base-2019-90m" -c "ckpt/twitter-roberta-base-2019-90m" --push-to-hub --hf-organization "cardiffnlp" -a "twitter-roberta-base-2019-90m-tweet-topic-multi"
8
  python lm_finetuning.py -m "cardiffnlp/twitter-roberta-base-dec2020" -c "ckpt/twitter-roberta-base-dec2020"
9
  python lm_finetuning.py -m "cardiffnlp/twitter-roberta-base-dec2021" -c "ckpt/twitter-roberta-base-dec2021"
@@ -90,7 +94,7 @@ def main():
90
  tokenizer = AutoTokenizer.from_pretrained(opt.model, local_files_only=not network)
91
  model = AutoModelForSequenceClassification.from_pretrained(
92
  opt.model,
93
- num_labels=len(dataset[opt.split_train][0]['label']),
94
  local_files_only=not network,
95
  problem_type="multi_label_classification"
96
  )
@@ -114,7 +118,7 @@ def main():
114
  eval_dataset=tokenized_datasets[opt.split_validation],
115
  compute_metrics=compute_metric_search,
116
  model_init=lambda x: AutoModelForSequenceClassification.from_pretrained(
117
- opt.model, return_dict=True, num_labels=dataset[opt.split_train].features['label'].num_classes)
118
  )
119
  # parameter search
120
  if PARALLEL:
@@ -149,7 +153,7 @@ def main():
149
  # evaluation
150
  model = AutoModelForSequenceClassification.from_pretrained(
151
  best_model_path,
152
- num_labels=dataset[opt.split_train].features['label'].num_classes,
153
  local_files_only=not network)
154
  trainer = Trainer(
155
  model=model,
@@ -162,7 +166,7 @@ def main():
162
  eval_dataset=tokenized_datasets[opt.split_test],
163
  compute_metrics=compute_metric_all,
164
  model_init=lambda x: AutoModelForSequenceClassification.from_pretrained(
165
- opt.model, return_dict=True, num_labels=dataset[opt.split_train].features['label'].num_classes)
166
  )
167
  summary_file = pj(opt.output_dir, opt.summary_file)
168
  if not opt.skip_eval:
 
2
  wandb offline
3
  export WANDB_DISABLED='true'
4
  export RAY_RESULTS='ray_results'
5
+ python lm_finetuning.py -m "roberta-large" -o "ckpt/roberta_large" --push-to-hub --hf-organization "cardiffnlp" -a "roberta-large-tweet-topic-multi-all" --split-train "train_all" --split-valid "validation_2021" --split-test "test_2021"
6
+ python lm_finetuning.py -m "roberta-large" -o "ckpt/roberta_large" --push-to-hub --hf-organization "cardiffnlp" -a "roberta-large-tweet-topic-multi-2020" --split-train "train_2020" --split-valid "validation_2020" --split-test "test_2021"
7
+
8
+
9
+ python lm_finetuning.py -m "roberta-base" -c "ckpt/roberta_base" --push-to-hub --hf-organization "cardiffnlp" -a "roberta-large-tweet-topic-multi" --split-train "train_all" --split-valid "validation_2021" --split-test "test_2021"
10
+
11
  python lm_finetuning.py -m "cardiffnlp/twitter-roberta-base-2019-90m" -c "ckpt/twitter-roberta-base-2019-90m" --push-to-hub --hf-organization "cardiffnlp" -a "twitter-roberta-base-2019-90m-tweet-topic-multi"
12
  python lm_finetuning.py -m "cardiffnlp/twitter-roberta-base-dec2020" -c "ckpt/twitter-roberta-base-dec2020"
13
  python lm_finetuning.py -m "cardiffnlp/twitter-roberta-base-dec2021" -c "ckpt/twitter-roberta-base-dec2021"
 
94
  tokenizer = AutoTokenizer.from_pretrained(opt.model, local_files_only=not network)
95
  model = AutoModelForSequenceClassification.from_pretrained(
96
  opt.model,
97
+ num_labels=len(dataset[opt.split_train]['label'][0]),
98
  local_files_only=not network,
99
  problem_type="multi_label_classification"
100
  )
 
118
  eval_dataset=tokenized_datasets[opt.split_validation],
119
  compute_metrics=compute_metric_search,
120
  model_init=lambda x: AutoModelForSequenceClassification.from_pretrained(
121
+ opt.model, return_dict=True, num_labels=len(data[opt.split_train]['label'][0]))
122
  )
123
  # parameter search
124
  if PARALLEL:
 
153
  # evaluation
154
  model = AutoModelForSequenceClassification.from_pretrained(
155
  best_model_path,
156
+ num_labels=len(data[opt.split_train]['label'][0]),
157
  local_files_only=not network)
158
  trainer = Trainer(
159
  model=model,
 
166
  eval_dataset=tokenized_datasets[opt.split_test],
167
  compute_metrics=compute_metric_all,
168
  model_init=lambda x: AutoModelForSequenceClassification.from_pretrained(
169
+ opt.model, return_dict=True, num_labels=len(data[opt.split_train]['label'][0]))
170
  )
171
  summary_file = pj(opt.output_dir, opt.summary_file)
172
  if not opt.skip_eval: