GinnM commited on
Commit
a1f84c3
1 Parent(s): 4fd30ca

Upload tokenizer

Browse files
Files changed (3) hide show
  1. special_tokens_map.json +9 -0
  2. tokenizer.json +196 -0
  3. tokenizer_config.json +5 -0
special_tokens_map.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<cls>",
3
+ "cls_token": "<cls>",
4
+ "eos_token": "<eos>",
5
+ "mask_token": "<mask>",
6
+ "pad_token": "<pad>",
7
+ "sep_token": "<eos>",
8
+ "unk_token": "<unk>"
9
+ }
tokenizer.json ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "<cls>",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ },
15
+ {
16
+ "id": 1,
17
+ "content": "<pad>",
18
+ "single_word": false,
19
+ "lstrip": false,
20
+ "rstrip": false,
21
+ "normalized": false,
22
+ "special": true
23
+ },
24
+ {
25
+ "id": 2,
26
+ "content": "<eos>",
27
+ "single_word": false,
28
+ "lstrip": false,
29
+ "rstrip": false,
30
+ "normalized": false,
31
+ "special": true
32
+ },
33
+ {
34
+ "id": 3,
35
+ "content": "<unk>",
36
+ "single_word": false,
37
+ "lstrip": false,
38
+ "rstrip": false,
39
+ "normalized": false,
40
+ "special": true
41
+ },
42
+ {
43
+ "id": 32,
44
+ "content": "<mask>",
45
+ "single_word": false,
46
+ "lstrip": false,
47
+ "rstrip": false,
48
+ "normalized": false,
49
+ "special": true
50
+ }
51
+ ],
52
+ "normalizer": {
53
+ "type": "Sequence",
54
+ "normalizers": [
55
+ {
56
+ "type": "Strip",
57
+ "strip_left": true,
58
+ "strip_right": true
59
+ },
60
+ {
61
+ "type": "Replace",
62
+ "pattern": {
63
+ "Regex": "[\\s]"
64
+ },
65
+ "content": ""
66
+ }
67
+ ]
68
+ },
69
+ "pre_tokenizer": {
70
+ "type": "Split",
71
+ "pattern": {
72
+ "Regex": "[\\S]"
73
+ },
74
+ "behavior": "Isolated",
75
+ "invert": false
76
+ },
77
+ "post_processor": {
78
+ "type": "TemplateProcessing",
79
+ "single": [
80
+ {
81
+ "SpecialToken": {
82
+ "id": "<cls>",
83
+ "type_id": 0
84
+ }
85
+ },
86
+ {
87
+ "Sequence": {
88
+ "id": "A",
89
+ "type_id": 0
90
+ }
91
+ },
92
+ {
93
+ "SpecialToken": {
94
+ "id": "<eos>",
95
+ "type_id": 0
96
+ }
97
+ }
98
+ ],
99
+ "pair": [
100
+ {
101
+ "SpecialToken": {
102
+ "id": "<cls>",
103
+ "type_id": 0
104
+ }
105
+ },
106
+ {
107
+ "Sequence": {
108
+ "id": "A",
109
+ "type_id": 0
110
+ }
111
+ },
112
+ {
113
+ "SpecialToken": {
114
+ "id": "<eos>",
115
+ "type_id": 0
116
+ }
117
+ },
118
+ {
119
+ "Sequence": {
120
+ "id": "B",
121
+ "type_id": 1
122
+ }
123
+ },
124
+ {
125
+ "SpecialToken": {
126
+ "id": "<eos>",
127
+ "type_id": 1
128
+ }
129
+ }
130
+ ],
131
+ "special_tokens": {
132
+ "<cls>": {
133
+ "id": "<cls>",
134
+ "ids": [
135
+ 0
136
+ ],
137
+ "tokens": [
138
+ "<cls>"
139
+ ]
140
+ },
141
+ "<eos>": {
142
+ "id": "<eos>",
143
+ "ids": [
144
+ 2
145
+ ],
146
+ "tokens": [
147
+ "<eos>"
148
+ ]
149
+ }
150
+ }
151
+ },
152
+ "decoder": {
153
+ "type": "Metaspace",
154
+ "replacement": "▁",
155
+ "add_prefix_space": true
156
+ },
157
+ "model": {
158
+ "type": "WordLevel",
159
+ "vocab": {
160
+ "<cls>": 0,
161
+ "<pad>": 1,
162
+ "<eos>": 2,
163
+ "<unk>": 3,
164
+ "L": 4,
165
+ "A": 5,
166
+ "G": 6,
167
+ "V": 7,
168
+ "S": 8,
169
+ "E": 9,
170
+ "R": 10,
171
+ "T": 11,
172
+ "I": 12,
173
+ "D": 13,
174
+ "P": 14,
175
+ "K": 15,
176
+ "Q": 16,
177
+ "N": 17,
178
+ "F": 18,
179
+ "Y": 19,
180
+ "M": 20,
181
+ "H": 21,
182
+ "W": 22,
183
+ "C": 23,
184
+ "X": 24,
185
+ "B": 25,
186
+ "U": 26,
187
+ "Z": 27,
188
+ "O": 28,
189
+ ".": 29,
190
+ "-": 30,
191
+ "<null_1>": 31,
192
+ "<mask>": 32
193
+ },
194
+ "unk_token": "<unk>"
195
+ }
196
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "clean_up_tokenization_spaces": true,
3
+ "model_max_length": 1000000000000000019884624838656,
4
+ "tokenizer_class": "PreTrainedTokenizerFast"
5
+ }