Narsil HF staff commited on
Commit
60f7bad
1 Parent(s): cc060e9

Initial commit.

Browse files
config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation": "gelu",
3
+ "attention_dropout": 0.1,
4
+ "dim": 32,
5
+ "dropout": 0.1,
6
+ "hidden_act": "gelu",
7
+ "hidden_dim": 37,
8
+ "initializer_range": 0.02,
9
+ "max_position_embeddings": 512,
10
+ "model_type": "distilbert",
11
+ "n_heads": 4,
12
+ "n_layers": 5,
13
+ "pad_token_id": 0,
14
+ "qa_dropout": 0.1,
15
+ "seq_classif_dropout": 0.2,
16
+ "sinusoidal_pos_embds": false,
17
+ "transformers_version": "4.10.0.dev0",
18
+ "vocab_size": 99
19
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3889bb6d57578029fa382bc1b87b18424e424e33536ac71d673609a6bc6ba5c
3
+ size 265157
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tf_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3901e58038dc298cc083dbd1e6ce1a4c694d6524e39a2213ad5cc7ded3a61979
3
+ size 12193432
tokenizer.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"version":"1.0","truncation":null,"padding":null,"added_tokens":[{"id":0,"special":true,"content":"[PAD]","single_word":false,"lstrip":false,"rstrip":false,"normalized":false},{"id":1,"special":true,"content":"[UNK]","single_word":false,"lstrip":false,"rstrip":false,"normalized":false},{"id":2,"special":true,"content":"[CLS]","single_word":false,"lstrip":false,"rstrip":false,"normalized":false},{"id":3,"special":true,"content":"[SEP]","single_word":false,"lstrip":false,"rstrip":false,"normalized":false},{"id":4,"special":true,"content":"[MASK]","single_word":false,"lstrip":false,"rstrip":false,"normalized":false}],"normalizer":{"type":"BertNormalizer","clean_text":true,"handle_chinese_chars":true,"strip_accents":null,"lowercase":true},"pre_tokenizer":{"type":"BertPreTokenizer"},"post_processor":{"type":"TemplateProcessing","single":[{"SpecialToken":{"id":"[CLS]","type_id":0}},{"Sequence":{"id":"A","type_id":0}},{"SpecialToken":{"id":"[SEP]","type_id":0}}],"pair":[{"SpecialToken":{"id":"[CLS]","type_id":0}},{"Sequence":{"id":"A","type_id":0}},{"SpecialToken":{"id":"[SEP]","type_id":0}},{"Sequence":{"id":"B","type_id":1}},{"SpecialToken":{"id":"[SEP]","type_id":1}}],"special_tokens":{"[CLS]":{"id":"[CLS]","ids":[2],"tokens":["[CLS]"]},"[SEP]":{"id":"[SEP]","ids":[3],"tokens":["[SEP]"]}}},"decoder":{"type":"WordPiece","prefix":"##","cleanup":true},"model":{"type":"WordPiece","unk_token":"[UNK]","continuing_subword_prefix":"##","max_input_chars_per_word":100,"vocab":{"[PAD]":0,"[UNK]":1,"[CLS]":2,"[SEP]":3,"[MASK]":4,"0":5,"1":6,"2":7,"3":8,"4":9,"5":10,"6":11,"7":12,"8":13,"9":14,"a":15,"b":16,"c":17,"d":18,"e":19,"f":20,"g":21,"h":22,"i":23,"j":24,"k":25,"l":26,"m":27,"n":28,"o":29,"p":30,"q":31,"r":32,"s":33,"t":34,"u":35,"v":36,"w":37,"x":38,"y":39,"z":40}}}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "name_or_path": "distilbert-base-uncased", "tokenizer_class": "DistilBertTokenizer"}
vocab.txt ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [PAD]
2
+ [UNK]
3
+ [CLS]
4
+ [SEP]
5
+ [MASK]
6
+ 0
7
+ 1
8
+ 2
9
+ 3
10
+ 4
11
+ 5
12
+ 6
13
+ 7
14
+ 8
15
+ 9
16
+ a
17
+ b
18
+ c
19
+ d
20
+ e
21
+ f
22
+ g
23
+ h
24
+ i
25
+ j
26
+ k
27
+ l
28
+ m
29
+ n
30
+ o
31
+ p
32
+ q
33
+ r
34
+ s
35
+ t
36
+ u
37
+ v
38
+ w
39
+ x
40
+ y
41
+ z