SaulLu commited on
Commit
8a27784
1 Parent(s): 3f9d959

add tokenizer example with model config

Browse files
config.json ADDED
@@ -0,0 +1 @@
 
1
+ { "model_type": "albert", "tokenizer_class": "PreTrainedTokenizerFast" }
special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "[CLS]",
4
+ "single_word": false,
5
+ "lstrip": false,
6
+ "rstrip": false,
7
+ "normalized": true
8
+ },
9
+ "eos_token": {
10
+ "content": "[SEP]",
11
+ "single_word": false,
12
+ "lstrip": false,
13
+ "rstrip": false,
14
+ "normalized": true
15
+ },
16
+ "unk_token": {
17
+ "content": "[UNK]",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": true
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "single_word": false,
26
+ "lstrip": false,
27
+ "rstrip": false,
28
+ "normalized": true
29
+ },
30
+ "pad_token": {
31
+ "content": "[PAD]",
32
+ "single_word": false,
33
+ "lstrip": false,
34
+ "rstrip": false,
35
+ "normalized": true
36
+ },
37
+ "cls_token": {
38
+ "content": "[CLS]",
39
+ "single_word": false,
40
+ "lstrip": false,
41
+ "rstrip": false,
42
+ "normalized": true
43
+ },
44
+ "mask_token": {
45
+ "content": "[MASK]",
46
+ "single_word": false,
47
+ "lstrip": true,
48
+ "rstrip": false,
49
+ "normalized": true
50
+ }
51
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_max_length": 512,
3
+ "bos_token": {
4
+ "content": "[CLS]",
5
+ "single_word": false,
6
+ "lstrip": false,
7
+ "rstrip": false,
8
+ "normalized": true,
9
+ "__type": "AddedToken"
10
+ },
11
+ "eos_token": {
12
+ "content": "[SEP]",
13
+ "single_word": false,
14
+ "lstrip": false,
15
+ "rstrip": false,
16
+ "normalized": true,
17
+ "__type": "AddedToken"
18
+ },
19
+ "sep_token": {
20
+ "content": "[SEP]",
21
+ "single_word": false,
22
+ "lstrip": false,
23
+ "rstrip": false,
24
+ "normalized": true,
25
+ "__type": "AddedToken"
26
+ },
27
+ "cls_token": {
28
+ "content": "[CLS]",
29
+ "single_word": false,
30
+ "lstrip": false,
31
+ "rstrip": false,
32
+ "normalized": true,
33
+ "__type": "AddedToken"
34
+ },
35
+ "unk_token": {
36
+ "content": "[UNK]",
37
+ "single_word": false,
38
+ "lstrip": false,
39
+ "rstrip": false,
40
+ "normalized": true,
41
+ "__type": "AddedToken"
42
+ },
43
+ "pad_token": {
44
+ "content": "[PAD]",
45
+ "single_word": false,
46
+ "lstrip": false,
47
+ "rstrip": false,
48
+ "normalized": true,
49
+ "__type": "AddedToken"
50
+ },
51
+ "mask_token": {
52
+ "content": "[MASK]",
53
+ "single_word": false,
54
+ "lstrip": true,
55
+ "rstrip": false,
56
+ "normalized": true,
57
+ "__type": "AddedToken"
58
+ }
59
+ }