scampion commited on
Commit
4ef325b
1 Parent(s): cf5fdb6

Upload tokenizer

Browse files
added_tokens.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "</s>": 65537,
3
+ "<mask>": 65540,
4
+ "<pad>": 65539,
5
+ "<s>": 65536,
6
+ "<unk>": 65538
7
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "[UNK]",
4
+ "[PAD]",
5
+ "[CLS]",
6
+ "[SEP]",
7
+ "[MASK]",
8
+ "<s>",
9
+ "</s>",
10
+ "<unk>",
11
+ "<pad>",
12
+ "<mask>"
13
+ ],
14
+ "bos_token": {
15
+ "content": "[CLS]",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "cls_token": {
22
+ "content": "[CLS]",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false
27
+ },
28
+ "eos_token": {
29
+ "content": "[SEP]",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ },
35
+ "mask_token": {
36
+ "content": "[MASK]",
37
+ "lstrip": true,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false
41
+ },
42
+ "pad_token": {
43
+ "content": "[PAD]",
44
+ "lstrip": false,
45
+ "normalized": false,
46
+ "rstrip": false,
47
+ "single_word": false
48
+ },
49
+ "sep_token": {
50
+ "content": "[SEP]",
51
+ "lstrip": false,
52
+ "normalized": false,
53
+ "rstrip": false,
54
+ "single_word": false
55
+ },
56
+ "unk_token": {
57
+ "content": "[UNK]",
58
+ "lstrip": false,
59
+ "normalized": false,
60
+ "rstrip": false,
61
+ "single_word": false
62
+ }
63
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "[UNK]",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "[PAD]",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "[CLS]",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "[SEP]",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "4": {
37
+ "content": "[MASK]",
38
+ "lstrip": true,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ },
44
+ "65536": {
45
+ "content": "<s>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": true
51
+ },
52
+ "65537": {
53
+ "content": "</s>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": true
59
+ },
60
+ "65538": {
61
+ "content": "<unk>",
62
+ "lstrip": false,
63
+ "normalized": false,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": true
67
+ },
68
+ "65539": {
69
+ "content": "<pad>",
70
+ "lstrip": false,
71
+ "normalized": false,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": true
75
+ },
76
+ "65540": {
77
+ "content": "<mask>",
78
+ "lstrip": true,
79
+ "normalized": false,
80
+ "rstrip": false,
81
+ "single_word": false,
82
+ "special": true
83
+ }
84
+ },
85
+ "additional_special_tokens": [
86
+ "[UNK]",
87
+ "[PAD]",
88
+ "[CLS]",
89
+ "[SEP]",
90
+ "[MASK]",
91
+ "<s>",
92
+ "</s>",
93
+ "<unk>",
94
+ "<pad>",
95
+ "<mask>"
96
+ ],
97
+ "bos_token": "[CLS]",
98
+ "clean_up_tokenization_spaces": true,
99
+ "cls_token": "[CLS]",
100
+ "eos_token": "[SEP]",
101
+ "errors": "replace",
102
+ "mask_token": "[MASK]",
103
+ "max_len": 512,
104
+ "max_length": 512,
105
+ "model_max_length": 512,
106
+ "pad_to_multiple_of": null,
107
+ "pad_token": "[PAD]",
108
+ "pad_token_type_id": 0,
109
+ "padding_side": "right",
110
+ "sep_token": "[SEP]",
111
+ "stride": 0,
112
+ "tokenizer_class": "RobertaTokenizer",
113
+ "trim_offsets": true,
114
+ "truncation_side": "right",
115
+ "truncation_strategy": "longest_first",
116
+ "unk_token": "[UNK]"
117
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff