asahi417 commited on
Commit
a3e1ce4
1 Parent(s): 6c8a3a5

Update lm_finetuning.py

Browse files
Files changed (1) hide show
  1. lm_finetuning.py +11 -2
lm_finetuning.py CHANGED
@@ -1,3 +1,13 @@
 
 
 
 
 
 
 
 
 
 
1
  import argparse
2
  import json
3
  import logging
@@ -55,7 +65,6 @@ def main():
55
  parser = argparse.ArgumentParser(description='Fine-tuning language model.')
56
  parser.add_argument('-m', '--model', help='transformer LM', default='roberta-base', type=str)
57
  parser.add_argument('-d', '--dataset', help='', default='cardiffnlp/tweet_topic_multi', type=str)
58
- parser.add_argument('--dataset-name', help='huggingface dataset name', default='citation_intent', type=str)
59
  parser.add_argument('-l', '--seq-length', help='', default=128, type=int)
60
  parser.add_argument('--random-seed', help='', default=42, type=int)
61
  parser.add_argument('--eval-step', help='', default=50, type=int)
@@ -71,7 +80,7 @@ def main():
71
  opt = parser.parse_args()
72
  assert opt.summary_file.endswith('.json'), f'`--summary-file` should be a json file {opt.summary_file}'
73
  # setup data
74
- dataset = load_dataset(opt.dataset, opt.dataset_name)
75
  network = internet_connection()
76
  # setup model
77
  tokenizer = AutoTokenizer.from_pretrained(opt.model, local_files_only=not network)
 
1
+ ```
2
+ wandb offline
3
+ export WANDB_DISABLED='true'
4
+ export RAY_RESULTS='ray_results'
5
+ python lm_finetuning.py -m "roberta-large" -c "ckpt/roberta_large" --push-to-hub --hf-organization "cardiffnlp" -a "twitter-roberta-base-2019-90m-tweet-topic-multi"
6
+ python lm_finetuning.py -m "roberta-base" -c "ckpt/roberta_base" --push-to-hub --hf-organization "cardiffnlp" -a "twitter-roberta-base-2019-90m-tweet-topic-multi"
7
+ python lm_finetuning.py -m "cardiffnlp/twitter-roberta-base-2019-90m" -c "ckpt/twitter-roberta-base-2019-90m" --push-to-hub --hf-organization "cardiffnlp" -a "twitter-roberta-base-2019-90m-tweet-topic-multi"
8
+ python lm_finetuning.py -m "cardiffnlp/twitter-roberta-base-dec2020" -c "ckpt/twitter-roberta-base-dec2020"
9
+ python lm_finetuning.py -m "cardiffnlp/twitter-roberta-base-dec2021" -c "ckpt/twitter-roberta-base-dec2021"
10
+ ```
11
  import argparse
12
  import json
13
  import logging
 
65
  parser = argparse.ArgumentParser(description='Fine-tuning language model.')
66
  parser.add_argument('-m', '--model', help='transformer LM', default='roberta-base', type=str)
67
  parser.add_argument('-d', '--dataset', help='', default='cardiffnlp/tweet_topic_multi', type=str)
 
68
  parser.add_argument('-l', '--seq-length', help='', default=128, type=int)
69
  parser.add_argument('--random-seed', help='', default=42, type=int)
70
  parser.add_argument('--eval-step', help='', default=50, type=int)
 
80
  opt = parser.parse_args()
81
  assert opt.summary_file.endswith('.json'), f'`--summary-file` should be a json file {opt.summary_file}'
82
  # setup data
83
+ dataset = load_dataset(opt.dataset)
84
  network = internet_connection()
85
  # setup model
86
  tokenizer = AutoTokenizer.from_pretrained(opt.model, local_files_only=not network)