iMountTai commited on
Commit
5b12551
1 Parent(s): e902507
Files changed (5) hide show
  1. README.md +22 -1
  2. config.json +24 -0
  3. pytorch_model.bin +3 -0
  4. tf_model.h5 +3 -0
  5. vocab.txt +0 -0
README.md CHANGED
@@ -1,3 +1,24 @@
1
  ---
2
- license: apache-2.0
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ language:
3
+ - zh
4
+ tags:
5
+ - bert
6
+ license: "apache-2.0"
7
  ---
8
+
9
+ # Please use 'Bert' related functions to load this model!
10
+
11
+ ## Chinese small pre-trained model MiniRBT
12
+ For further accelerating Chinese natural language processing, we launched a Chinese small pre-training model MiniRBT based on the self-developed knowledge distillation tool TextBrewer, combined with Whole Word Masking technology and Knowledge Distillation technology.
13
+
14
+ This repository is developed based on:https://github.com/iflytek/ta-minilm-demo
15
+
16
+ You may also interested in,
17
+ - Chinese LERT: https://github.com/ymcui/LERT
18
+ - Chinese PERT: https://github.com/ymcui/PERT
19
+ - Chinese MacBERT: https://github.com/ymcui/MacBERT
20
+ - Chinese ELECTRA: https://github.com/ymcui/Chinese-ELECTRA
21
+ - Chinese XLNet: https://github.com/ymcui/Chinese-XLNet
22
+ - Knowledge Distillation Toolkit - TextBrewer: https://github.com/airaria/TextBrewer
23
+
24
+ More resources by HFL: https://github.com/iflytek/HFL-Anthology
config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "D:\\order\\TA_model\\commit\\MiniRBT-H256",
3
+ "architectures": [
4
+ "BertForMaskedLM"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 256,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 1024,
13
+ "layer_norm_eps": 1e-12,
14
+ "max_position_embeddings": 512,
15
+ "model_type": "bert",
16
+ "num_attention_heads": 8,
17
+ "num_hidden_layers": 6,
18
+ "pad_token_id": 0,
19
+ "position_embedding_type": "absolute",
20
+ "transformers_version": "4.22.2",
21
+ "type_vocab_size": 2,
22
+ "use_cache": true,
23
+ "vocab_size": 21128
24
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6728ca573fd5197f28198eb7e6119a76e4d5dc626f953fca8c0dc5ba2798f9fc
3
+ size 41514155
tf_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3302d877d3565b8b01c65be9aa862977a1cc58431163c190b2b2b38c4aa168a8
3
+ size 63787792
vocab.txt ADDED
The diff for this file is too large to render. See raw diff