Update README.md
Browse files
README.md
CHANGED
@@ -115,7 +115,7 @@ python3 preprocess.py --corpus_path corpora/cluecorpussmall.txt \
|
|
115 |
--vocab_path models/google_zh_vocab.txt \
|
116 |
--dataset_path cluecorpussmall_seq128_dataset.pt \
|
117 |
--processes_num 32 --seq_length 128 \
|
118 |
-
--dynamic_masking --
|
119 |
```
|
120 |
|
121 |
```
|
@@ -126,7 +126,7 @@ python3 pretrain.py --dataset_path cluecorpussmall_seq128_dataset.pt \
|
|
126 |
--world_size 8 --gpu_ranks 0 1 2 3 4 5 6 7 \
|
127 |
--total_steps 1000000 --save_checkpoint_steps 100000 --report_steps 50000 \
|
128 |
--learning_rate 1e-4 --batch_size 64 \
|
129 |
-
--
|
130 |
```
|
131 |
|
132 |
Stage2:
|
@@ -136,19 +136,19 @@ python3 preprocess.py --corpus_path corpora/cluecorpussmall.txt \
|
|
136 |
--vocab_path models/google_zh_vocab.txt \
|
137 |
--dataset_path cluecorpussmall_seq512_dataset.pt \
|
138 |
--processes_num 32 --seq_length 512 \
|
139 |
-
--dynamic_masking --
|
140 |
```
|
141 |
|
142 |
```
|
143 |
python3 pretrain.py --dataset_path cluecorpussmall_seq512_dataset.pt \
|
144 |
-
--pretrained_model_path models/cluecorpussmall_roberta_medium_seq128_model.bin-1000000 \
|
145 |
--vocab_path models/google_zh_vocab.txt \
|
|
|
146 |
--config_path models/bert/medium_config.json \
|
147 |
--output_model_path models/cluecorpussmall_roberta_medium_seq512_model.bin \
|
148 |
--world_size 8 --gpu_ranks 0 1 2 3 4 5 6 7 \
|
149 |
--total_steps 250000 --save_checkpoint_steps 50000 --report_steps 10000 \
|
150 |
--learning_rate 5e-5 --batch_size 16 \
|
151 |
-
--
|
152 |
```
|
153 |
|
154 |
Finally, we convert the pre-trained model into Huggingface's format:
|
@@ -156,7 +156,7 @@ Finally, we convert the pre-trained model into Huggingface's format:
|
|
156 |
```
|
157 |
python3 scripts/convert_bert_from_uer_to_huggingface.py --input_model_path models/cluecorpussmall_roberta_medium_seq512_model.bin-250000 \
|
158 |
--output_model_path pytorch_model.bin \
|
159 |
-
--layers_num 8 --
|
160 |
```
|
161 |
|
162 |
### BibTeX entry and citation info
|
|
|
115 |
--vocab_path models/google_zh_vocab.txt \
|
116 |
--dataset_path cluecorpussmall_seq128_dataset.pt \
|
117 |
--processes_num 32 --seq_length 128 \
|
118 |
+
--dynamic_masking --data_processor mlm
|
119 |
```
|
120 |
|
121 |
```
|
|
|
126 |
--world_size 8 --gpu_ranks 0 1 2 3 4 5 6 7 \
|
127 |
--total_steps 1000000 --save_checkpoint_steps 100000 --report_steps 50000 \
|
128 |
--learning_rate 1e-4 --batch_size 64 \
|
129 |
+
--data_processor mlm --target mlm
|
130 |
```
|
131 |
|
132 |
Stage2:
|
|
|
136 |
--vocab_path models/google_zh_vocab.txt \
|
137 |
--dataset_path cluecorpussmall_seq512_dataset.pt \
|
138 |
--processes_num 32 --seq_length 512 \
|
139 |
+
--dynamic_masking --data_processor mlm
|
140 |
```
|
141 |
|
142 |
```
|
143 |
python3 pretrain.py --dataset_path cluecorpussmall_seq512_dataset.pt \
|
|
|
144 |
--vocab_path models/google_zh_vocab.txt \
|
145 |
+
--pretrained_model_path models/cluecorpussmall_roberta_medium_seq128_model.bin-1000000 \
|
146 |
--config_path models/bert/medium_config.json \
|
147 |
--output_model_path models/cluecorpussmall_roberta_medium_seq512_model.bin \
|
148 |
--world_size 8 --gpu_ranks 0 1 2 3 4 5 6 7 \
|
149 |
--total_steps 250000 --save_checkpoint_steps 50000 --report_steps 10000 \
|
150 |
--learning_rate 5e-5 --batch_size 16 \
|
151 |
+
--data_processor mlm --target mlm
|
152 |
```
|
153 |
|
154 |
Finally, we convert the pre-trained model into Huggingface's format:
|
|
|
156 |
```
|
157 |
python3 scripts/convert_bert_from_uer_to_huggingface.py --input_model_path models/cluecorpussmall_roberta_medium_seq512_model.bin-250000 \
|
158 |
--output_model_path pytorch_model.bin \
|
159 |
+
--layers_num 8 --type mlm
|
160 |
```
|
161 |
|
162 |
### BibTeX entry and citation info
|