Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
1k<10K
ArXiv:
Tags:
License:
asahi417 commited on
Commit
a9562e5
1 Parent(s): 4c6566d

Update lm_finetuning.py

Browse files
Files changed (1) hide show
  1. lm_finetuning.py +34 -12
lm_finetuning.py CHANGED
@@ -1,3 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import argparse
2
  import json
3
  import logging
@@ -52,7 +73,9 @@ def main():
52
  parser = argparse.ArgumentParser(description='Fine-tuning language model.')
53
  parser.add_argument('-m', '--model', help='transformer LM', default='roberta-base', type=str)
54
  parser.add_argument('-d', '--dataset', help='', default='cardiffnlp/tweet_topic_single', type=str)
55
- parser.add_argument('--dataset-name', help='huggingface dataset name', default='citation_intent', type=str)
 
 
56
  parser.add_argument('-l', '--seq-length', help='', default=128, type=int)
57
  parser.add_argument('--random-seed', help='', default=42, type=int)
58
  parser.add_argument('--eval-step', help='', default=50, type=int)
@@ -68,7 +91,7 @@ def main():
68
  opt = parser.parse_args()
69
  assert opt.summary_file.endswith('.json'), f'`--summary-file` should be a json file {opt.summary_file}'
70
  # setup data
71
- dataset = load_dataset(opt.dataset, opt.dataset_name)
72
  network = internet_connection()
73
  # setup model
74
  tokenizer = AutoTokenizer.from_pretrained(opt.model, local_files_only=not network)
@@ -90,11 +113,11 @@ def main():
90
  eval_steps=opt.eval_step,
91
  seed=opt.random_seed
92
  ),
93
- train_dataset=tokenized_datasets["train"],
94
- eval_dataset=tokenized_datasets["validation"],
95
  compute_metrics=compute_metric_search,
96
- model_init=lambda x: AutoModelForSequenceClassification.from_pretrained(
97
- opt.model, return_dict=True, num_labels=dataset['train'].features['label'].num_classes)
98
  )
99
  # parameter search
100
  if PARALLEL:
@@ -128,9 +151,7 @@ def main():
128
 
129
  # evaluation
130
  model = AutoModelForSequenceClassification.from_pretrained(
131
- best_model_path,
132
- num_labels=dataset['train'].features['label'].num_classes,
133
- local_files_only=not network)
134
  trainer = Trainer(
135
  model=model,
136
  args=TrainingArguments(
@@ -138,11 +159,11 @@ def main():
138
  evaluation_strategy="no",
139
  seed=opt.random_seed
140
  ),
141
- train_dataset=tokenized_datasets["train"],
142
- eval_dataset=tokenized_datasets["test"],
143
  compute_metrics=compute_metric_all,
144
  model_init=lambda x: AutoModelForSequenceClassification.from_pretrained(
145
- opt.model, return_dict=True, num_labels=dataset['train'].features['label'].num_classes)
146
  )
147
  summary_file = pj(opt.output_dir, opt.summary_file)
148
  if not opt.skip_eval:
@@ -168,3 +189,4 @@ def main():
168
 
169
  if __name__ == '__main__':
170
  main()
 
1
+ '''
2
+ wandb offline
3
+ export WANDB_DISABLED='true'
4
+ export RAY_RESULTS='ray_results'
5
+
6
+ python lm_finetuning.py -m "roberta-large" -o "ckpt/2021/roberta-large" --push-to-hub --hf-organization "cardiffnlp" -a "roberta-large-tweet-topic-multi-all" --split-train "train_all" --split-valid "validation_2021" --split-test "test_2021"
7
+ python lm_finetuning.py -m "roberta-large" -o "ckpt/2020/roberta-large" --push-to-hub --hf-organization "cardiffnlp" -a "roberta-large-tweet-topic-multi-2020" --split-train "train_2020" --split-valid "validation_2020" --split-test "test_2021"
8
+
9
+ python lm_finetuning.py -m "roberta-base" -o "ckpt/2021/roberta_base" --push-to-hub --hf-organization "cardiffnlp" -a "roberta-base-tweet-topic-multi-all" --split-train "train_all" --split-valid "validation_2021" --split-test "test_2021"
10
+ python lm_finetuning.py -m "roberta-base" -o "ckpt/2020/roberta_base" --push-to-hub --hf-organization "cardiffnlp" -a "roberta-base-tweet-topic-multi-2020" --split-train "train_2020" --split-valid "validation_2020" --split-test "test_2021"
11
+
12
+ python lm_finetuning.py -m "cardiffnlp/twitter-roberta-base-2019-90m" -o "ckpt/2021/twitter-roberta-base-2019-90m" --push-to-hub --hf-organization "cardiffnlp" -a "twitter-roberta-base-2019-90m-tweet-topic-multi-all" --split-train "train_all" --split-valid "validation_2021" --split-test "test_2021"
13
+ python lm_finetuning.py -m "cardiffnlp/twitter-roberta-base-2019-90m" -o "ckpt/2020/twitter-roberta-base-2019-90m" --push-to-hub --hf-organization "cardiffnlp" -a "twitter-roberta-base-2019-90m-tweet-topic-multi-2020" --split-train "train_2020" --split-valid "validation_2020" --split-test "test_2021"
14
+
15
+ python lm_finetuning.py -m "cardiffnlp/twitter-roberta-base-dec2020" -o "ckpt/2021/twitter-roberta-base-dec2020" --push-to-hub --hf-organization "cardiffnlp" -a "twitter-roberta-base-dec2020-tweet-topic-multi-all" --split-train "train_all" --split-valid "validation_2021" --split-test "test_2021"
16
+ python lm_finetuning.py -m "cardiffnlp/twitter-roberta-base-dec2020" -o "ckpt/2020/twitter-roberta-base-dec2020" --push-to-hub --hf-organization "cardiffnlp" -a "twitter-roberta-base-dec2020-tweet-topic-multi-2020" --split-train "train_2020" --split-valid "validation_2020" --split-test "test_2021"
17
+
18
+ python lm_finetuning.py -m "cardiffnlp/twitter-roberta-base-dec2021" -o "ckpt/2021/twitter-roberta-base-dec2021" --push-to-hub --hf-organization "cardiffnlp" -a "twitter-roberta-base-dec2021-tweet-topic-multi-all" --split-train "train_all" --split-valid "validation_2021" --split-test "test_2021"
19
+ python lm_finetuning.py -m "cardiffnlp/twitter-roberta-base-dec2021" -o "ckpt/2020/twitter-roberta-base-dec2021" --push-to-hub --hf-organization "cardiffnlp" -a "twitter-roberta-base-dec2021-tweet-topic-multi-2020" --split-train "train_2020" --split-valid "validation_2020" --split-test "test_2021"
20
+ '''
21
+
22
  import argparse
23
  import json
24
  import logging
73
  parser = argparse.ArgumentParser(description='Fine-tuning language model.')
74
  parser.add_argument('-m', '--model', help='transformer LM', default='roberta-base', type=str)
75
  parser.add_argument('-d', '--dataset', help='', default='cardiffnlp/tweet_topic_single', type=str)
76
+ parser.add_argument('--split-train', help='', required=True, type=str)
77
+ parser.add_argument('--split-validation', help='', required=True, type=str)
78
+ parser.add_argument('--split-test', help='', required=True, type=str)
79
  parser.add_argument('-l', '--seq-length', help='', default=128, type=int)
80
  parser.add_argument('--random-seed', help='', default=42, type=int)
81
  parser.add_argument('--eval-step', help='', default=50, type=int)
91
  opt = parser.parse_args()
92
  assert opt.summary_file.endswith('.json'), f'`--summary-file` should be a json file {opt.summary_file}'
93
  # setup data
94
+ dataset = load_dataset(opt.dataset)
95
  network = internet_connection()
96
  # setup model
97
  tokenizer = AutoTokenizer.from_pretrained(opt.model, local_files_only=not network)
113
  eval_steps=opt.eval_step,
114
  seed=opt.random_seed
115
  ),
116
+ train_dataset=tokenized_datasets[opt.split_train],
117
+ eval_dataset=tokenized_datasets[opt.split_validation],
118
  compute_metrics=compute_metric_search,
119
+ model_init=lambda x: AutoModelForSequenceClassification.from_pretrained(opt.model, num_labels=dataset['train'].features['label'].num_classes, local_files_only=not network, return_dict=True)
120
+ )
121
  )
122
  # parameter search
123
  if PARALLEL:
151
 
152
  # evaluation
153
  model = AutoModelForSequenceClassification.from_pretrained(
154
+ opt.model, num_labels=dataset['train'].features['label'].num_classes, local_files_only=not network)
 
 
155
  trainer = Trainer(
156
  model=model,
157
  args=TrainingArguments(
159
  evaluation_strategy="no",
160
  seed=opt.random_seed
161
  ),
162
+ train_dataset=tokenized_datasets[opt.split_train],
163
+ eval_dataset=tokenized_datasets[opt.split_test],
164
  compute_metrics=compute_metric_all,
165
  model_init=lambda x: AutoModelForSequenceClassification.from_pretrained(
166
+ opt.model, num_labels=dataset['train'].features['label'].num_classes, local_files_only=not network, return_dict=True)
167
  )
168
  summary_file = pj(opt.output_dir, opt.summary_file)
169
  if not opt.skip_eval:
189
 
190
  if __name__ == '__main__':
191
  main()
192
+