uer commited on
Commit
4fb0f25
1 Parent(s): f819596

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +19 -18
README.md CHANGED
@@ -38,42 +38,43 @@ The model is pre-trained by [UER-py](https://github.com/dbiir/UER-py/) on [Tence
38
  ```
39
  python3 preprocess.py --corpus_path corpora/cluecorpussmall.txt \
40
  --vocab_path models/google_zh_vocab.txt \
41
- --dataset_path cluecorpussmall_bart_seq512_dataset.pt \
42
  --processes_num 32 --seq_length 512 \
43
- --dynamic_masking --target bart
44
  ```
45
 
46
  ```
47
- python3 pretrain.py --dataset_path cluecorpussmall_bart_seq512_dataset.pt \
48
  --vocab_path models/google_zh_vocab.txt \
49
- --config_path models/bart/base_config.json \
50
- --output_model_path models/cluecorpussmall_bart_base_seq512_model.bin \
51
  --world_size 8 --gpu_ranks 0 1 2 3 4 5 6 7 \
52
  --total_steps 1000000 --save_checkpoint_steps 100000 --report_steps 50000 \
53
- --learning_rate 1e-4 --batch_size 16 \
54
- --span_masking --span_max_length 3 \
55
- --embedding word_pos --tgt_embedding word_pos \
56
- --encoder transformer --mask fully_visible --decoder transformer \
57
- --target bart --tie_weights --has_lmtarget_bias
58
  ```
59
 
60
  Finally, we convert the pre-trained model into Huggingface's format:
61
 
62
  ```
63
- python3 scripts/convert_bart_from_uer_to_huggingface.py --input_model_path cluecorpussmall_bart_base_seq512_model.bin-250000 \
64
- --output_model_path pytorch_model.bin \
65
- --layers_num 6
66
  ```
67
 
68
 
69
  ### BibTeX entry and citation info
70
 
71
  ```
72
- @article{lewis2019bart,
73
- title={Bart: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension},
74
- author={Lewis, Mike and Liu, Yinhan and Goyal, Naman and Ghazvininejad, Marjan and Mohamed, Abdelrahman and Levy, Omer and Stoyanov, Ves and Zettlemoyer, Luke},
75
- journal={arXiv preprint arXiv:1910.13461},
76
- year={2019}
 
 
77
  }
78
 
79
  @article{zhao2019uer,
38
  ```
39
  python3 preprocess.py --corpus_path corpora/cluecorpussmall.txt \
40
  --vocab_path models/google_zh_vocab.txt \
41
+ --dataset_path cluecorpussmall_pegasus_seq512_dataset.pt \
42
  --processes_num 32 --seq_length 512 \
43
+ --target gsg --sentence_selection_strategy random
44
  ```
45
 
46
  ```
47
+ python3 pretrain.py --dataset_path cluecorpussmall_pegasus_seq512_dataset.pt \
48
  --vocab_path models/google_zh_vocab.txt \
49
+ --config_path models/pegasus/base_config.json \
50
+ --output_model_path models/cluecorpussmall_pegasus_base_seq512_model.bin \
51
  --world_size 8 --gpu_ranks 0 1 2 3 4 5 6 7 \
52
  --total_steps 1000000 --save_checkpoint_steps 100000 --report_steps 50000 \
53
+ --learning_rate 1e-4 --batch_size 8 \
54
+ --embedding word_sinusoidalpos --remove_embedding_layernorm --tgt_embedding word_sinusoidalpos \
55
+ --encoder transformer --mask fully_visible --layernorm_positioning pre --decoder transformer \
56
+ --target gsg --tie_weights --has_lmtarget_bias
 
57
  ```
58
 
59
  Finally, we convert the pre-trained model into Huggingface's format:
60
 
61
  ```
62
+ python3 scripts/convert_pegasus_from_uer_to_huggingface.py --input_model_path cluecorpussmall_pegasus_base_seq512_model.bin-250000 \
63
+ --output_model_path pytorch_model.bin \
64
+ --layers_num 12
65
  ```
66
 
67
 
68
  ### BibTeX entry and citation info
69
 
70
  ```
71
+ @inproceedings{zhang2020pegasus,
72
+ title={Pegasus: Pre-training with extracted gap-sentences for abstractive summarization},
73
+ author={Zhang, Jingqing and Zhao, Yao and Saleh, Mohammad and Liu, Peter},
74
+ booktitle={International Conference on Machine Learning},
75
+ pages={11328--11339},
76
+ year={2020},
77
+ organization={PMLR}
78
  }
79
 
80
  @article{zhao2019uer,