skywalker0803r commited on
Commit
575283e
1 Parent(s): 0fdce71

Upload 9 files

Browse files
Files changed (6) hide show
  1. .gitattributes +9 -26
  2. README.md +126 -0
  3. config.json +1 -9
  4. flax_model.msgpack +3 -0
  5. pytorch_model.bin +3 -0
  6. tf_model.h5 +3 -0
.gitattributes CHANGED
@@ -1,34 +1,17 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
  *.model filter=lfs diff=lfs merge=lfs -text
13
  *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
  *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
  *.pt filter=lfs diff=lfs merge=lfs -text
23
  *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tflite filter=lfs diff=lfs merge=lfs -text
29
- *.tgz filter=lfs diff=lfs merge=lfs -text
30
- *.wasm filter=lfs diff=lfs merge=lfs -text
31
- *.xz filter=lfs diff=lfs merge=lfs -text
32
- *.zip filter=lfs diff=lfs merge=lfs -text
33
- *.zst filter=lfs diff=lfs merge=lfs -text
34
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
2
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
 
 
 
 
4
  *.h5 filter=lfs diff=lfs merge=lfs -text
5
+ *.tflite filter=lfs diff=lfs merge=lfs -text
6
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.ot filter=lfs diff=lfs merge=lfs -text
8
+ *.onnx filter=lfs diff=lfs merge=lfs -text
9
+ *.arrow filter=lfs diff=lfs merge=lfs -text
10
+ *.ftz filter=lfs diff=lfs merge=lfs -text
11
  *.joblib filter=lfs diff=lfs merge=lfs -text
 
 
12
  *.model filter=lfs diff=lfs merge=lfs -text
13
  *.msgpack filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
14
  *.pb filter=lfs diff=lfs merge=lfs -text
 
 
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
17
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
README.md CHANGED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: zh
3
+ datasets: CLUECorpusSmall
4
+ widget:
5
+ - text: "作为电子extra0的平台,京东绝对是领先者。如今的刘强extra1已经是身价过extra2的老板。"
6
+
7
+
8
+ ---
9
+
10
+
11
+ # Chinese T5
12
+
13
+ ## Model description
14
+
15
+ This is the set of Chinese T5 models pre-trained by [UER-py](https://github.com/dbiir/UER-py/), which is introduced in [this paper](https://arxiv.org/abs/1909.05658).
16
+
17
+ The Text-to-Text Transfer Transformer (T5) leverages a unified text-to-text format and attains state-of-the-art results on a wide variety of English-language NLP tasks. Following their work, we released a series of Chinese T5 models.
18
+
19
+ You can download the set of Chinese T5 models either from the [UER-py Modelzoo page](https://github.com/dbiir/UER-py/wiki/Modelzoo), or via HuggingFace from the links below:
20
+
21
+ | | Link |
22
+ | -------- | :-----------------------: |
23
+ | **T5-Small** | [**L=6/H=512 (Small)**][small] |
24
+ | **T5-Base** | [**L=12/H=768 (Base)**][base] |
25
+
26
+ In T5, spans of the input sequence are masked by so-called sentinel token. Each sentinel token represents a unique mask token for the input sequence and should start with `<extra_id_0>`, `<extra_id_1>`, … up to `<extra_id_99>`. However, `<extra_id_xxx>` is separated into multiple parts in Huggingface's Hosted inference API. Therefore, we replace `<extra_id_xxx>` with `extraxxx` in vocabulary and BertTokenizer regards `extraxxx` as one sentinel token.
27
+
28
+ ## How to use
29
+
30
+ You can use this model directly with a pipeline for text2text generation (take the case of T5-Small):
31
+
32
+ ```python
33
+ >>> from transformers import BertTokenizer, T5ForConditionalGeneration, Text2TextGenerationPipeline
34
+ >>> tokenizer = BertTokenizer.from_pretrained("uer/t5-small-chinese-cluecorpussmall")
35
+ >>> model = T5ForConditionalGeneration.from_pretrained("uer/t5-small-chinese-cluecorpussmall")
36
+ >>> text2text_generator = Text2TextGenerationPipeline(model, tokenizer)
37
+ >>> text2text_generator("中国的首都是extra0京", max_length=50, do_sample=False)
38
+ [{'generated_text': 'extra0 北 extra1 extra2 extra3 extra4 extra5'}]
39
+ ```
40
+
41
+ ## Training data
42
+
43
+ [CLUECorpusSmall](https://github.com/CLUEbenchmark/CLUECorpus2020/) is used as training data.
44
+
45
+ ## Training procedure
46
+
47
+ The model is pre-trained by [UER-py](https://github.com/dbiir/UER-py/) on [Tencent Cloud](https://cloud.tencent.com/). We pre-train 1,000,000 steps with a sequence length of 128 and then pre-train 250,000 additional steps with a sequence length of 512. We use the same hyper-parameters on different model sizes.
48
+
49
+ Taking the case of T5-Small
50
+
51
+ Stage1:
52
+
53
+ ```
54
+ python3 preprocess.py --corpus_path corpora/cluecorpussmall.txt \
55
+ --vocab_path models/google_zh_with_sentinel_vocab.txt \
56
+ --dataset_path cluecorpussmall_t5_seq128_dataset.pt \
57
+ --processes_num 32 --seq_length 128 \
58
+ --dynamic_masking --data_processor t5
59
+ ```
60
+
61
+ ```
62
+ python3 pretrain.py --dataset_path cluecorpussmall_t5_seq128_dataset.pt \
63
+ --vocab_path models/google_zh_with_sentinel_vocab.txt \
64
+ --config_path models/t5/small_config.json \
65
+ --output_model_path models/cluecorpussmall_t5_small_seq128_model.bin \
66
+ --world_size 8 --gpu_ranks 0 1 2 3 4 5 6 7 \
67
+ --total_steps 1000000 --save_checkpoint_steps 100000 --report_steps 50000 \
68
+ --learning_rate 1e-3 --batch_size 64 \
69
+ --span_masking --span_geo_prob 0.3 --span_max_length 5
70
+
71
+ ```
72
+
73
+ Stage2:
74
+
75
+ ```
76
+ python3 preprocess.py --corpus_path corpora/cluecorpussmall.txt \
77
+ --vocab_path models/google_zh_with_sentinel_vocab.txt \
78
+ --dataset_path cluecorpussmall_t5_small_seq512_dataset.pt \
79
+ --processes_num 32 --seq_length 512 \
80
+ --dynamic_masking --data_processor t5
81
+ ```
82
+
83
+ ```
84
+ python3 pretrain.py --dataset_path cluecorpussmall_t5_seq512_dataset.pt \
85
+ --vocab_path models/google_zh_with_sentinel_vocab.txt \
86
+ --pretrained_model_path models/cluecorpussmall_t5_small_seq128_model.bin-1000000 \
87
+ --config_path models/t5/small_config.json \
88
+ --output_model_path models/cluecorpussmall_t5_small_seq512_model.bin \
89
+ --world_size 8 --gpu_ranks 0 1 2 3 4 5 6 7 \
90
+ --total_steps 250000 --save_checkpoint_steps 50000 --report_steps 10000 \
91
+ --learning_rate 5e-4 --batch_size 16 \
92
+ --span_masking --span_geo_prob 0.3 --span_max_length 5
93
+ ```
94
+
95
+ Finally, we convert the pre-trained model into Huggingface's format:
96
+
97
+ ```
98
+ python3 scripts/convert_t5_from_uer_to_huggingface.py --input_model_path cluecorpussmall_t5_small_seq512_model.bin-250000 \
99
+ --output_model_path pytorch_model.bin \
100
+ --layers_num 6 \
101
+ --type t5
102
+ ```
103
+
104
+
105
+ ### BibTeX entry and citation info
106
+
107
+ ```
108
+ @article{2020t5,
109
+ title = {Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer},
110
+ author = {Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu},
111
+ journal = {Journal of Machine Learning Research},
112
+ pages = {1-67},
113
+ year = {2020}
114
+ }
115
+
116
+ @article{zhao2019uer,
117
+ title={UER: An Open-Source Toolkit for Pre-training Models},
118
+ author={Zhao, Zhe and Chen, Hui and Zhang, Jinbin and Zhao, Xin and Liu, Tao and Lu, Wei and Chen, Xi and Deng, Haotang and Ju, Qi and Du, Xiaoyong},
119
+ journal={EMNLP-IJCNLP 2019},
120
+ pages={241},
121
+ year={2019}
122
+ }
123
+ ```
124
+
125
+ [small]:https://huggingface.co/uer/t5-small-chinese-cluecorpussmall
126
+ [base]:https://huggingface.co/uer/t5-base-chinese-cluecorpussmall
config.json CHANGED
@@ -1,29 +1,21 @@
1
  {
2
- "architectures": [
3
  "T5ForConditionalGeneration"
4
  ],
5
  "d_ff": 2048,
6
  "d_kv": 64,
7
  "d_model": 512,
8
  "decoder_start_token_id": 101,
9
- "dense_act_fn": "relu",
10
  "dropout_rate": 0.1,
11
- "eos_token_id": 1,
12
- "feed_forward_proj": "relu",
13
  "initializer_factor": 1.0,
14
  "is_encoder_decoder": true,
15
- "is_gated_act": false,
16
  "layer_norm_epsilon": 1e-06,
17
  "model_type": "t5",
18
  "n_positions": 512,
19
- "num_decoder_layers": 6,
20
  "num_heads": 8,
21
  "num_layers": 6,
22
  "pad_token_id": 0,
23
- "relative_attention_max_distance": 128,
24
  "relative_attention_num_buckets": 32,
25
  "tokenizer_class": "BertTokenizer",
26
- "transformers_version": "4.26.0",
27
- "use_cache": true,
28
  "vocab_size": 21228
29
  }
 
1
  {
2
+ "architectures": [
3
  "T5ForConditionalGeneration"
4
  ],
5
  "d_ff": 2048,
6
  "d_kv": 64,
7
  "d_model": 512,
8
  "decoder_start_token_id": 101,
 
9
  "dropout_rate": 0.1,
 
 
10
  "initializer_factor": 1.0,
11
  "is_encoder_decoder": true,
 
12
  "layer_norm_epsilon": 1e-06,
13
  "model_type": "t5",
14
  "n_positions": 512,
 
15
  "num_heads": 8,
16
  "num_layers": 6,
17
  "pad_token_id": 0,
 
18
  "relative_attention_num_buckets": 32,
19
  "tokenizer_class": "BertTokenizer",
 
 
20
  "vocab_size": 21228
21
  }
flax_model.msgpack ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aef9ed19e92fb88f6899da683357e8561ba21dbd2222073a1de1a580fc775d17
3
+ size 219709002
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45812ed7877f4dc392567fe996a2095e0e4c3c163d2b303c99937ff6929a52fe
3
+ size 219764039
tf_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac6ab31653426bf9eb6696653aa8f9e1403f34d44000de89d4873d5b55b0d815
3
+ size 219977904