asahi417 commited on
Commit
081c653
1 Parent(s): 74c9f62

Update lm_finetuning.py

Browse files
Files changed (1) hide show
  1. lm_finetuning.py +47 -4
lm_finetuning.py CHANGED
@@ -35,10 +35,21 @@ from datasets import load_dataset, load_metric
35
  from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer
36
  from ray import tune
37
 
 
 
38
  logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')
39
 
40
  PARALLEL = bool(int(os.getenv("PARALLEL", 1)))
41
  RAY_RESULTS = os.getenv("RAY_RESULTS", "ray_results")
 
 
 
 
 
 
 
 
 
42
 
43
 
44
  def internet_connection(host='http://google.com'):
@@ -96,7 +107,12 @@ def main():
96
  # setup model
97
  tokenizer = AutoTokenizer.from_pretrained(opt.model, local_files_only=not network)
98
  model = AutoModelForSequenceClassification.from_pretrained(
99
- opt.model, num_labels=6, local_files_only=not network)
 
 
 
 
 
100
  tokenized_datasets = dataset.map(
101
  lambda x: tokenizer(x["text"], padding="max_length", truncation=True, max_length=opt.seq_length),
102
  batched=True)
@@ -116,7 +132,14 @@ def main():
116
  train_dataset=tokenized_datasets[opt.split_train],
117
  eval_dataset=tokenized_datasets[opt.split_validation],
118
  compute_metrics=compute_metric_search,
119
- model_init=lambda x: AutoModelForSequenceClassification.from_pretrained(opt.model, num_labels=6, local_files_only=not network, return_dict=True)
 
 
 
 
 
 
 
120
  )
121
  # parameter search
122
  if PARALLEL:
@@ -150,7 +173,12 @@ def main():
150
 
151
  # evaluation
152
  model = AutoModelForSequenceClassification.from_pretrained(
153
- opt.model, num_labels=6, local_files_only=not network)
 
 
 
 
 
154
  trainer = Trainer(
155
  model=model,
156
  args=TrainingArguments(
@@ -162,7 +190,13 @@ def main():
162
  eval_dataset=tokenized_datasets[opt.split_test],
163
  compute_metrics=compute_metric_all,
164
  model_init=lambda x: AutoModelForSequenceClassification.from_pretrained(
165
- opt.model, num_labels=6, local_files_only=not network, return_dict=True)
 
 
 
 
 
 
166
  )
167
  summary_file = pj(opt.output_dir, opt.summary_file)
168
  if not opt.skip_eval:
@@ -181,6 +215,15 @@ def main():
181
  tokenizer.push_to_hub(opt.model_alias, **args)
182
  if os.path.exists(summary_file):
183
  shutil.copy2(summary_file, opt.model_alias)
 
 
 
 
 
 
 
 
 
184
  os.system(
185
  f"cd {opt.model_alias} && git lfs install && git add . && git commit -m 'model update' && git push && cd ../")
186
  shutil.rmtree(f"{opt.model_alias}") # clean up the cloned repo
 
35
  from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer
36
  from ray import tune
37
 
38
+ from readme import get_readme
39
+
40
  logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')
41
 
42
  PARALLEL = bool(int(os.getenv("PARALLEL", 1)))
43
  RAY_RESULTS = os.getenv("RAY_RESULTS", "ray_results")
44
+ LABEL2ID = {
45
+ "arts_&_culture": 0,
46
+ "business_&_entrepreneurs": 1,
47
+ "pop_culture": 2,
48
+ "daily_life": 3,
49
+ "sports_&_gaming": 4,
50
+ "science_&_technology": 5
51
+ }
52
+ ID2LABEL = {v: k for k, v in LABEL2ID.items()}
53
 
54
 
55
  def internet_connection(host='http://google.com'):
 
107
  # setup model
108
  tokenizer = AutoTokenizer.from_pretrained(opt.model, local_files_only=not network)
109
  model = AutoModelForSequenceClassification.from_pretrained(
110
+ opt.model,
111
+ num_labels=6,
112
+ local_files_only=not network,
113
+ id2label=ID2LABEL,
114
+ label2id=LABEL2ID
115
+ )
116
  tokenized_datasets = dataset.map(
117
  lambda x: tokenizer(x["text"], padding="max_length", truncation=True, max_length=opt.seq_length),
118
  batched=True)
 
132
  train_dataset=tokenized_datasets[opt.split_train],
133
  eval_dataset=tokenized_datasets[opt.split_validation],
134
  compute_metrics=compute_metric_search,
135
+ model_init=lambda x: AutoModelForSequenceClassification.from_pretrained(
136
+ opt.model,
137
+ num_labels=6,
138
+ local_files_only=not network,
139
+ return_dict=True,
140
+ id2label=ID2LABEL,
141
+ label2id=LABEL2ID
142
+ )
143
  )
144
  # parameter search
145
  if PARALLEL:
 
173
 
174
  # evaluation
175
  model = AutoModelForSequenceClassification.from_pretrained(
176
+ opt.model,
177
+ num_labels=6,
178
+ local_files_only=not network,
179
+ id2label=ID2LABEL,
180
+ label2id=LABEL2ID
181
+ )
182
  trainer = Trainer(
183
  model=model,
184
  args=TrainingArguments(
 
190
  eval_dataset=tokenized_datasets[opt.split_test],
191
  compute_metrics=compute_metric_all,
192
  model_init=lambda x: AutoModelForSequenceClassification.from_pretrained(
193
+ opt.model,
194
+ num_labels=6,
195
+ local_files_only=not network,
196
+ return_dict=True,
197
+ id2label=ID2LABEL,
198
+ label2id=LABEL2ID
199
+ )
200
  )
201
  summary_file = pj(opt.output_dir, opt.summary_file)
202
  if not opt.skip_eval:
 
215
  tokenizer.push_to_hub(opt.model_alias, **args)
216
  if os.path.exists(summary_file):
217
  shutil.copy2(summary_file, opt.model_alias)
218
+ extra_desc = f"This model is fine-tuned on `{opt.split_train}` split and validated on `{opt.split_test}` split of tweet_topic."
219
+ readme = get_readme(
220
+ model_name=opt.model_alias,
221
+ metric=f"{opt.model_alias}/{summary_file}",
222
+ language_model=opt.model,
223
+ extra_desc= extra_desc
224
+ )
225
+ with open(f"{opt.model_alias}/README.md", "w") as f:
226
+ f.write(readme)
227
  os.system(
228
  f"cd {opt.model_alias} && git lfs install && git add . && git commit -m 'model update' && git push && cd ../")
229
  shutil.rmtree(f"{opt.model_alias}") # clean up the cloned repo