uer commited on
Commit
473d634
1 Parent(s): 31be858

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +5 -5
README.md CHANGED
@@ -59,7 +59,7 @@ python3 preprocess.py --corpus_path corpora/cluecorpussmall.txt \
59
  python3 pretrain.py --dataset_path cluecorpussmall_t5_seq128_dataset.pt \
60
  --vocab_path models/google_zh_with_sentinel_vocab.txt \
61
  --config_path models/t5/small_config.json \
62
- --output_model_path models/cluecorpussmall_t5_seq128_model.bin \
63
  --world_size 8 --gpu_ranks 0 1 2 3 4 5 6 7 \
64
  --total_steps 1000000 --save_checkpoint_steps 100000 --report_steps 50000 \
65
  --learning_rate 1e-3 --batch_size 64 \
@@ -75,17 +75,17 @@ Stage2:
75
  ```
76
  python3 preprocess.py --corpus_path corpora/cluecorpussmall.txt \
77
  --vocab_path models/google_zh_with_sentinel_vocab.txt \
78
- --dataset_path cluecorpussmall_t5_seq512_dataset.pt \
79
  --processes_num 32 --seq_length 512 \
80
  --dynamic_masking --target t5
81
  ```
82
 
83
  ```
84
  python3 pretrain.py --dataset_path cluecorpussmall_t5_seq512_dataset.pt \
85
- --pretrained_model_path models/cluecorpussmall_t5_seq128_model.bin-1000000 \
86
  --vocab_path models/google_zh_with_sentinel_vocab.txt \
87
  --config_path models/t5/small_config.json \
88
- --output_model_path models/cluecorpussmall_t5_seq512_model.bin \
89
  --world_size 8 --gpu_ranks 0 1 2 3 4 5 6 7 \
90
  --total_steps 250000 --save_checkpoint_steps 50000 --report_steps 10000 \
91
  --learning_rate 5e-4 --batch_size 16 \
@@ -98,7 +98,7 @@ python3 pretrain.py --dataset_path cluecorpussmall_t5_seq512_dataset.pt \
98
  Finally, we convert the pre-trained model into Huggingface's format:
99
 
100
  ```
101
- python3 scripts/convert_t5_from_uer_to_huggingface.py --input_model_path cluecorpussmall_t5_seq512_model.bin-250000 \
102
  --output_model_path pytorch_model.bin \
103
  --layers_num 6 \
104
  --type t5
 
59
  python3 pretrain.py --dataset_path cluecorpussmall_t5_seq128_dataset.pt \
60
  --vocab_path models/google_zh_with_sentinel_vocab.txt \
61
  --config_path models/t5/small_config.json \
62
+ --output_model_path models/cluecorpussmall_t5_small_seq128_model.bin \
63
  --world_size 8 --gpu_ranks 0 1 2 3 4 5 6 7 \
64
  --total_steps 1000000 --save_checkpoint_steps 100000 --report_steps 50000 \
65
  --learning_rate 1e-3 --batch_size 64 \
 
75
  ```
76
  python3 preprocess.py --corpus_path corpora/cluecorpussmall.txt \
77
  --vocab_path models/google_zh_with_sentinel_vocab.txt \
78
+ --dataset_path cluecorpussmall_t5_small_seq512_dataset.pt \
79
  --processes_num 32 --seq_length 512 \
80
  --dynamic_masking --target t5
81
  ```
82
 
83
  ```
84
  python3 pretrain.py --dataset_path cluecorpussmall_t5_seq512_dataset.pt \
85
+ --pretrained_model_path models/cluecorpussmall_t5_small_seq128_model.bin-1000000 \
86
  --vocab_path models/google_zh_with_sentinel_vocab.txt \
87
  --config_path models/t5/small_config.json \
88
+ --output_model_path models/cluecorpussmall_t5_small_seq512_model.bin \
89
  --world_size 8 --gpu_ranks 0 1 2 3 4 5 6 7 \
90
  --total_steps 250000 --save_checkpoint_steps 50000 --report_steps 10000 \
91
  --learning_rate 5e-4 --batch_size 16 \
 
98
  Finally, we convert the pre-trained model into Huggingface's format:
99
 
100
  ```
101
+ python3 scripts/convert_t5_from_uer_to_huggingface.py --input_model_path cluecorpussmall_t5_small_seq512_model.bin-250000 \
102
  --output_model_path pytorch_model.bin \
103
  --layers_num 6 \
104
  --type t5