linzheng commited on
Commit
a3db85f
·
verified ·
1 Parent(s): 474addc

Upload tokenizer

Browse files
special_tokens_map.json ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<repo_name>",
4
+ "<file_sep>",
5
+ "<t2v_token>",
6
+ "<v2t_token>",
7
+ "<|start_header_id|>",
8
+ "<|end_header_id|>",
9
+ "<|eot_id|>",
10
+ "<extra_id_12>",
11
+ "<extra_id_13>",
12
+ "<extra_id_14>",
13
+ "<extra_id_15>",
14
+ "<extra_id_16>",
15
+ "<extra_id_17>",
16
+ "<extra_id_18>",
17
+ "<extra_id_19>",
18
+ "<extra_id_20>",
19
+ "<extra_id_21>",
20
+ "<extra_id_22>",
21
+ "<extra_id_23>",
22
+ "<extra_id_24>",
23
+ "<extra_id_25>",
24
+ "<extra_id_26>",
25
+ "<extra_id_27>",
26
+ "<extra_id_28>",
27
+ "<extra_id_29>",
28
+ "<extra_id_30>",
29
+ "<extra_id_31>",
30
+ "<extra_id_32>",
31
+ "<extra_id_33>",
32
+ "<extra_id_34>",
33
+ "<extra_id_35>",
34
+ "<extra_id_36>",
35
+ "<extra_id_37>",
36
+ "<extra_id_38>",
37
+ "<extra_id_39>",
38
+ "<extra_id_40>",
39
+ "<extra_id_41>",
40
+ "<extra_id_42>",
41
+ "<extra_id_43>",
42
+ "<extra_id_44>",
43
+ "<extra_id_45>",
44
+ "<extra_id_46>",
45
+ "<extra_id_47>",
46
+ "<extra_id_48>",
47
+ "<extra_id_49>",
48
+ "<extra_id_50>",
49
+ "<extra_id_51>",
50
+ "<extra_id_52>",
51
+ "<extra_id_53>",
52
+ "<extra_id_54>",
53
+ "<extra_id_55>",
54
+ "<extra_id_56>",
55
+ "<extra_id_57>",
56
+ "<extra_id_58>",
57
+ "<extra_id_59>",
58
+ "<extra_id_60>",
59
+ "<extra_id_61>",
60
+ "<extra_id_62>",
61
+ "<extra_id_63>"
62
+ ],
63
+ "bos_token": {
64
+ "content": "<bos>",
65
+ "lstrip": false,
66
+ "normalized": true,
67
+ "rstrip": false,
68
+ "single_word": false
69
+ },
70
+ "eos_token": "<|eot_id|>",
71
+ "pad_token": {
72
+ "content": "<pad>",
73
+ "lstrip": false,
74
+ "normalized": true,
75
+ "rstrip": false,
76
+ "single_word": false
77
+ },
78
+ "sep_token": "<eos>",
79
+ "unk_token": {
80
+ "content": "<unk>",
81
+ "lstrip": false,
82
+ "normalized": true,
83
+ "rstrip": false,
84
+ "single_word": false
85
+ }
86
+ }
tokenization_evabyte.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+
3
+ """ Tokenization class for model EvaByte."""
4
+
5
+
6
+ from typing import List, Optional, Tuple
7
+
8
+ from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
9
+ from transformers.utils import logging
10
+
11
+
12
+ logger = logging.get_logger(__name__)
13
+
14
+
15
+ chat_template = """
16
+ {{- bos_token }}
17
+ {%- if messages[0]['role'] == 'system' %}
18
+ {%- set system_message = messages[0]['content'] %}
19
+ {%- set messages = messages[1:] %}
20
+ {%- else %}
21
+ {%- set system_message = "" %}
22
+ {%- endif %}
23
+
24
+ {{- '<|start_header_id|>system<|end_header_id|>\n\n' + system_message + '<|eot_id|>'}}
25
+
26
+ {%- for message in messages %}
27
+ {%- if (message['role'] != 'user') and (message['role'] != 'assistant') %}
28
+ {{- raise_exception('Conversation roles must be user or assistant') }}
29
+ {%- endif %}
30
+
31
+ {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] + '<|eot_id|>' }}
32
+ {%- endfor %}
33
+
34
+ {%- if add_generation_prompt %}
35
+ {{- '<|start_header_id|>' + 'assistant' + '<|end_header_id|>\n\n' }}
36
+ {%- endif %}
37
+ """
38
+
39
+ class EvaByteTokenizer(PreTrainedTokenizer):
40
+ def __init__(
41
+ self,
42
+ bos_token="<bos>",
43
+ eos_token="<eos>",
44
+ unk_token="<unk>",
45
+ sep_token="<sep>",
46
+ pad_token="<pad>",
47
+ extra_ids=59,
48
+ additional_special_tokens=None,
49
+ clean_up_tokenization_spaces=False,
50
+ **kwargs,
51
+ ) -> None:
52
+ num_base_special_tokens = 5
53
+ # Add extra_ids to the special token list
54
+ if extra_ids > 0 and additional_special_tokens is None:
55
+ additional_special_tokens = [f"<extra_id_{i}>" for i in range(num_base_special_tokens, extra_ids + num_base_special_tokens)]
56
+ elif extra_ids > 0 and additional_special_tokens is not None and len(additional_special_tokens) > 0:
57
+ # Check that we have the right number of extra_id special tokens
58
+ extra_tokens = len(set(filter(lambda x: bool("extra_id" in str(x)), additional_special_tokens)))
59
+ if extra_tokens != extra_ids:
60
+ raise ValueError(
61
+ f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
62
+ " provided to EvaByteTokenizer. In this case the additional_special_tokens must include the"
63
+ " extra_ids tokens"
64
+ )
65
+
66
+ #### override some reserved tokens to support chat template
67
+ for i, token in enumerate(additional_special_tokens):
68
+ if token == "<extra_id_5>":
69
+ token = "<repo_name>"
70
+ elif token == "<extra_id_6>":
71
+ token = "<file_sep>"
72
+ elif token == "<extra_id_7>":
73
+ token = "<t2v_token>"
74
+ elif token == "<extra_id_8>":
75
+ token = "<v2t_token>"
76
+ elif token == "<extra_id_9>":
77
+ token = "<|start_header_id|>"
78
+ elif token == "<extra_id_10>":
79
+ token = "<|end_header_id|>"
80
+ elif token == "<extra_id_11>":
81
+ token = "<|eot_id|>"
82
+ additional_special_tokens[i] = token
83
+
84
+ # lstrip and rstrip are set to False because we don't want to strip the whitespace from the special tokens
85
+ # this would be important for the byte tokenizer
86
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
87
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
88
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
89
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
90
+ sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
91
+
92
+ self._added_tokens_decoder = {
93
+ 0: pad_token,
94
+ 1: bos_token,
95
+ 2: eos_token,
96
+ 3: unk_token, # unk_token is a placeholder
97
+ 4: sep_token,
98
+ **{i: AddedToken(t, lstrip=False, rstrip=False) for i, t in enumerate(additional_special_tokens, start=num_base_special_tokens)},
99
+ }
100
+ self.offset = len(self._added_tokens_decoder)
101
+ self._utf_vocab_size = 2**8 # utf is 8 bits
102
+ self.add_bos_token = True
103
+ self.add_eos_token = False
104
+ super().__init__(
105
+ pad_token=pad_token,
106
+ bos_token=bos_token,
107
+ eos_token=eos_token,
108
+ unk_token=unk_token,
109
+ sep_token=sep_token,
110
+ extra_ids=0,
111
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
112
+ additional_special_tokens=additional_special_tokens,
113
+ **kwargs,
114
+ )
115
+ self.chat_template = chat_template
116
+
117
+
118
+ @property
119
+ def vocab_size(self):
120
+ return self._utf_vocab_size
121
+
122
+ def get_vocab(self):
123
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size + self.offset)}
124
+ vocab.update(self.added_tokens_encoder)
125
+ return vocab
126
+
127
+ # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.build_inputs_with_special_tokens
128
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
129
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
130
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
131
+
132
+ output = bos_token_id + token_ids_0 + eos_token_id
133
+
134
+ if token_ids_1 is not None:
135
+ output = output + bos_token_id + token_ids_1 + eos_token_id
136
+
137
+ return output
138
+
139
+ # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.get_special_tokens_mask
140
+ def get_special_tokens_mask(
141
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
142
+ ) -> List[int]:
143
+ """
144
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
145
+ special tokens using the tokenizer `prepare_for_model` method.
146
+
147
+ Args:
148
+ token_ids_0 (`List[int]`):
149
+ List of IDs.
150
+ token_ids_1 (`List[int]`, *optional*):
151
+ Optional second list of IDs for sequence pairs.
152
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
153
+ Whether or not the token list is already formatted with special tokens for the model.
154
+
155
+ Returns:
156
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
157
+ """
158
+ if already_has_special_tokens:
159
+ return super().get_special_tokens_mask(
160
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
161
+ )
162
+
163
+ bos_token_id = [1] if self.add_bos_token else []
164
+ eos_token_id = [1] if self.add_eos_token else []
165
+
166
+ if token_ids_1 is None:
167
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
168
+ return (
169
+ bos_token_id
170
+ + ([0] * len(token_ids_0))
171
+ + eos_token_id
172
+ + bos_token_id
173
+ + ([0] * len(token_ids_1))
174
+ + eos_token_id
175
+ )
176
+
177
+ # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.create_token_type_ids_from_sequences
178
+ def create_token_type_ids_from_sequences(
179
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
180
+ ) -> List[int]:
181
+ """
182
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
183
+ sequence pair mask has the following format:
184
+
185
+ ```
186
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
187
+ | first sequence | second sequence |
188
+ ```
189
+
190
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
191
+
192
+ Args:
193
+ token_ids_0 (`List[int]`):
194
+ List of ids.
195
+ token_ids_1 (`List[int]`, *optional*):
196
+ Optional second list of IDs for sequence pairs.
197
+
198
+ Returns:
199
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
200
+ """
201
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
202
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
203
+
204
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
205
+
206
+ if token_ids_1 is not None:
207
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
208
+
209
+ return output
210
+
211
+ def _tokenize(self, text: str) -> List[str]:
212
+ """Take as input a string and return a list of strings (tokens) for words/sub-words"""
213
+ tokens = [chr(i) for i in text.encode("utf-8")]
214
+ return tokens
215
+
216
+ def _convert_token_to_id(self, token):
217
+ """Converts a token (str) in an id using the vocab."""
218
+
219
+ if len(token) != 1:
220
+ token_id = None
221
+ else:
222
+ token_id = ord(token) + self.offset
223
+
224
+ return token_id
225
+
226
+ def _convert_id_to_token(self, index):
227
+ """Converts an index (integer) to a byte (str) using the vocab."""
228
+ token = chr(index - self.offset)
229
+ return token
230
+
231
+ def convert_tokens_to_string(self, tokens):
232
+ """Converts a sequence of bytes (string) to a single string."""
233
+ bstring = b""
234
+ for token in tokens:
235
+ if token in self.added_tokens_decoder:
236
+ tok_string = self.added_tokens_decoder[token].encode("utf-8")
237
+ elif token in self.added_tokens_encoder:
238
+ tok_string = token.encode("utf-8")
239
+ else:
240
+ tok_string = bytes([ord(token)])
241
+ bstring += tok_string
242
+ string = bstring.decode("utf-8", errors="ignore")
243
+ return string
244
+
245
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
246
+ return ()
tokenizer_config.json ADDED
@@ -0,0 +1,594 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<pad>",
5
+ "lstrip": false,
6
+ "normalized": true,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<bos>",
13
+ "lstrip": false,
14
+ "normalized": true,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "<eos>",
21
+ "lstrip": false,
22
+ "normalized": true,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": true,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "<sep>",
37
+ "lstrip": false,
38
+ "normalized": true,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "5": {
44
+ "content": "<repo_name>",
45
+ "lstrip": false,
46
+ "normalized": true,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": false
50
+ },
51
+ "6": {
52
+ "content": "<file_sep>",
53
+ "lstrip": false,
54
+ "normalized": true,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": false
58
+ },
59
+ "7": {
60
+ "content": "<t2v_token>",
61
+ "lstrip": false,
62
+ "normalized": true,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": false
66
+ },
67
+ "8": {
68
+ "content": "<v2t_token>",
69
+ "lstrip": false,
70
+ "normalized": true,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": false
74
+ },
75
+ "9": {
76
+ "content": "<|start_header_id|>",
77
+ "lstrip": false,
78
+ "normalized": true,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": false
82
+ },
83
+ "10": {
84
+ "content": "<|end_header_id|>",
85
+ "lstrip": false,
86
+ "normalized": true,
87
+ "rstrip": false,
88
+ "single_word": false,
89
+ "special": false
90
+ },
91
+ "11": {
92
+ "content": "<|eot_id|>",
93
+ "lstrip": false,
94
+ "normalized": true,
95
+ "rstrip": false,
96
+ "single_word": false,
97
+ "special": false
98
+ },
99
+ "12": {
100
+ "content": "<extra_id_12>",
101
+ "lstrip": false,
102
+ "normalized": true,
103
+ "rstrip": false,
104
+ "single_word": false,
105
+ "special": false
106
+ },
107
+ "13": {
108
+ "content": "<extra_id_13>",
109
+ "lstrip": false,
110
+ "normalized": true,
111
+ "rstrip": false,
112
+ "single_word": false,
113
+ "special": false
114
+ },
115
+ "14": {
116
+ "content": "<extra_id_14>",
117
+ "lstrip": false,
118
+ "normalized": true,
119
+ "rstrip": false,
120
+ "single_word": false,
121
+ "special": false
122
+ },
123
+ "15": {
124
+ "content": "<extra_id_15>",
125
+ "lstrip": false,
126
+ "normalized": true,
127
+ "rstrip": false,
128
+ "single_word": false,
129
+ "special": false
130
+ },
131
+ "16": {
132
+ "content": "<extra_id_16>",
133
+ "lstrip": false,
134
+ "normalized": true,
135
+ "rstrip": false,
136
+ "single_word": false,
137
+ "special": false
138
+ },
139
+ "17": {
140
+ "content": "<extra_id_17>",
141
+ "lstrip": false,
142
+ "normalized": true,
143
+ "rstrip": false,
144
+ "single_word": false,
145
+ "special": false
146
+ },
147
+ "18": {
148
+ "content": "<extra_id_18>",
149
+ "lstrip": false,
150
+ "normalized": true,
151
+ "rstrip": false,
152
+ "single_word": false,
153
+ "special": false
154
+ },
155
+ "19": {
156
+ "content": "<extra_id_19>",
157
+ "lstrip": false,
158
+ "normalized": true,
159
+ "rstrip": false,
160
+ "single_word": false,
161
+ "special": false
162
+ },
163
+ "20": {
164
+ "content": "<extra_id_20>",
165
+ "lstrip": false,
166
+ "normalized": true,
167
+ "rstrip": false,
168
+ "single_word": false,
169
+ "special": false
170
+ },
171
+ "21": {
172
+ "content": "<extra_id_21>",
173
+ "lstrip": false,
174
+ "normalized": true,
175
+ "rstrip": false,
176
+ "single_word": false,
177
+ "special": false
178
+ },
179
+ "22": {
180
+ "content": "<extra_id_22>",
181
+ "lstrip": false,
182
+ "normalized": true,
183
+ "rstrip": false,
184
+ "single_word": false,
185
+ "special": false
186
+ },
187
+ "23": {
188
+ "content": "<extra_id_23>",
189
+ "lstrip": false,
190
+ "normalized": true,
191
+ "rstrip": false,
192
+ "single_word": false,
193
+ "special": false
194
+ },
195
+ "24": {
196
+ "content": "<extra_id_24>",
197
+ "lstrip": false,
198
+ "normalized": true,
199
+ "rstrip": false,
200
+ "single_word": false,
201
+ "special": false
202
+ },
203
+ "25": {
204
+ "content": "<extra_id_25>",
205
+ "lstrip": false,
206
+ "normalized": true,
207
+ "rstrip": false,
208
+ "single_word": false,
209
+ "special": false
210
+ },
211
+ "26": {
212
+ "content": "<extra_id_26>",
213
+ "lstrip": false,
214
+ "normalized": true,
215
+ "rstrip": false,
216
+ "single_word": false,
217
+ "special": false
218
+ },
219
+ "27": {
220
+ "content": "<extra_id_27>",
221
+ "lstrip": false,
222
+ "normalized": true,
223
+ "rstrip": false,
224
+ "single_word": false,
225
+ "special": false
226
+ },
227
+ "28": {
228
+ "content": "<extra_id_28>",
229
+ "lstrip": false,
230
+ "normalized": true,
231
+ "rstrip": false,
232
+ "single_word": false,
233
+ "special": false
234
+ },
235
+ "29": {
236
+ "content": "<extra_id_29>",
237
+ "lstrip": false,
238
+ "normalized": true,
239
+ "rstrip": false,
240
+ "single_word": false,
241
+ "special": false
242
+ },
243
+ "30": {
244
+ "content": "<extra_id_30>",
245
+ "lstrip": false,
246
+ "normalized": true,
247
+ "rstrip": false,
248
+ "single_word": false,
249
+ "special": false
250
+ },
251
+ "31": {
252
+ "content": "<extra_id_31>",
253
+ "lstrip": false,
254
+ "normalized": true,
255
+ "rstrip": false,
256
+ "single_word": false,
257
+ "special": false
258
+ },
259
+ "32": {
260
+ "content": "<extra_id_32>",
261
+ "lstrip": false,
262
+ "normalized": true,
263
+ "rstrip": false,
264
+ "single_word": false,
265
+ "special": false
266
+ },
267
+ "33": {
268
+ "content": "<extra_id_33>",
269
+ "lstrip": false,
270
+ "normalized": true,
271
+ "rstrip": false,
272
+ "single_word": false,
273
+ "special": false
274
+ },
275
+ "34": {
276
+ "content": "<extra_id_34>",
277
+ "lstrip": false,
278
+ "normalized": true,
279
+ "rstrip": false,
280
+ "single_word": false,
281
+ "special": false
282
+ },
283
+ "35": {
284
+ "content": "<extra_id_35>",
285
+ "lstrip": false,
286
+ "normalized": true,
287
+ "rstrip": false,
288
+ "single_word": false,
289
+ "special": false
290
+ },
291
+ "36": {
292
+ "content": "<extra_id_36>",
293
+ "lstrip": false,
294
+ "normalized": true,
295
+ "rstrip": false,
296
+ "single_word": false,
297
+ "special": false
298
+ },
299
+ "37": {
300
+ "content": "<extra_id_37>",
301
+ "lstrip": false,
302
+ "normalized": true,
303
+ "rstrip": false,
304
+ "single_word": false,
305
+ "special": false
306
+ },
307
+ "38": {
308
+ "content": "<extra_id_38>",
309
+ "lstrip": false,
310
+ "normalized": true,
311
+ "rstrip": false,
312
+ "single_word": false,
313
+ "special": false
314
+ },
315
+ "39": {
316
+ "content": "<extra_id_39>",
317
+ "lstrip": false,
318
+ "normalized": true,
319
+ "rstrip": false,
320
+ "single_word": false,
321
+ "special": false
322
+ },
323
+ "40": {
324
+ "content": "<extra_id_40>",
325
+ "lstrip": false,
326
+ "normalized": true,
327
+ "rstrip": false,
328
+ "single_word": false,
329
+ "special": false
330
+ },
331
+ "41": {
332
+ "content": "<extra_id_41>",
333
+ "lstrip": false,
334
+ "normalized": true,
335
+ "rstrip": false,
336
+ "single_word": false,
337
+ "special": false
338
+ },
339
+ "42": {
340
+ "content": "<extra_id_42>",
341
+ "lstrip": false,
342
+ "normalized": true,
343
+ "rstrip": false,
344
+ "single_word": false,
345
+ "special": false
346
+ },
347
+ "43": {
348
+ "content": "<extra_id_43>",
349
+ "lstrip": false,
350
+ "normalized": true,
351
+ "rstrip": false,
352
+ "single_word": false,
353
+ "special": false
354
+ },
355
+ "44": {
356
+ "content": "<extra_id_44>",
357
+ "lstrip": false,
358
+ "normalized": true,
359
+ "rstrip": false,
360
+ "single_word": false,
361
+ "special": false
362
+ },
363
+ "45": {
364
+ "content": "<extra_id_45>",
365
+ "lstrip": false,
366
+ "normalized": true,
367
+ "rstrip": false,
368
+ "single_word": false,
369
+ "special": false
370
+ },
371
+ "46": {
372
+ "content": "<extra_id_46>",
373
+ "lstrip": false,
374
+ "normalized": true,
375
+ "rstrip": false,
376
+ "single_word": false,
377
+ "special": false
378
+ },
379
+ "47": {
380
+ "content": "<extra_id_47>",
381
+ "lstrip": false,
382
+ "normalized": true,
383
+ "rstrip": false,
384
+ "single_word": false,
385
+ "special": false
386
+ },
387
+ "48": {
388
+ "content": "<extra_id_48>",
389
+ "lstrip": false,
390
+ "normalized": true,
391
+ "rstrip": false,
392
+ "single_word": false,
393
+ "special": false
394
+ },
395
+ "49": {
396
+ "content": "<extra_id_49>",
397
+ "lstrip": false,
398
+ "normalized": true,
399
+ "rstrip": false,
400
+ "single_word": false,
401
+ "special": false
402
+ },
403
+ "50": {
404
+ "content": "<extra_id_50>",
405
+ "lstrip": false,
406
+ "normalized": true,
407
+ "rstrip": false,
408
+ "single_word": false,
409
+ "special": false
410
+ },
411
+ "51": {
412
+ "content": "<extra_id_51>",
413
+ "lstrip": false,
414
+ "normalized": true,
415
+ "rstrip": false,
416
+ "single_word": false,
417
+ "special": false
418
+ },
419
+ "52": {
420
+ "content": "<extra_id_52>",
421
+ "lstrip": false,
422
+ "normalized": true,
423
+ "rstrip": false,
424
+ "single_word": false,
425
+ "special": false
426
+ },
427
+ "53": {
428
+ "content": "<extra_id_53>",
429
+ "lstrip": false,
430
+ "normalized": true,
431
+ "rstrip": false,
432
+ "single_word": false,
433
+ "special": false
434
+ },
435
+ "54": {
436
+ "content": "<extra_id_54>",
437
+ "lstrip": false,
438
+ "normalized": true,
439
+ "rstrip": false,
440
+ "single_word": false,
441
+ "special": false
442
+ },
443
+ "55": {
444
+ "content": "<extra_id_55>",
445
+ "lstrip": false,
446
+ "normalized": true,
447
+ "rstrip": false,
448
+ "single_word": false,
449
+ "special": false
450
+ },
451
+ "56": {
452
+ "content": "<extra_id_56>",
453
+ "lstrip": false,
454
+ "normalized": true,
455
+ "rstrip": false,
456
+ "single_word": false,
457
+ "special": false
458
+ },
459
+ "57": {
460
+ "content": "<extra_id_57>",
461
+ "lstrip": false,
462
+ "normalized": true,
463
+ "rstrip": false,
464
+ "single_word": false,
465
+ "special": false
466
+ },
467
+ "58": {
468
+ "content": "<extra_id_58>",
469
+ "lstrip": false,
470
+ "normalized": true,
471
+ "rstrip": false,
472
+ "single_word": false,
473
+ "special": false
474
+ },
475
+ "59": {
476
+ "content": "<extra_id_59>",
477
+ "lstrip": false,
478
+ "normalized": true,
479
+ "rstrip": false,
480
+ "single_word": false,
481
+ "special": false
482
+ },
483
+ "60": {
484
+ "content": "<extra_id_60>",
485
+ "lstrip": false,
486
+ "normalized": true,
487
+ "rstrip": false,
488
+ "single_word": false,
489
+ "special": false
490
+ },
491
+ "61": {
492
+ "content": "<extra_id_61>",
493
+ "lstrip": false,
494
+ "normalized": true,
495
+ "rstrip": false,
496
+ "single_word": false,
497
+ "special": false
498
+ },
499
+ "62": {
500
+ "content": "<extra_id_62>",
501
+ "lstrip": false,
502
+ "normalized": true,
503
+ "rstrip": false,
504
+ "single_word": false,
505
+ "special": false
506
+ },
507
+ "63": {
508
+ "content": "<extra_id_63>",
509
+ "lstrip": false,
510
+ "normalized": true,
511
+ "rstrip": false,
512
+ "single_word": false,
513
+ "special": false
514
+ }
515
+ },
516
+ "additional_special_tokens": [
517
+ "<repo_name>",
518
+ "<file_sep>",
519
+ "<t2v_token>",
520
+ "<v2t_token>",
521
+ "<|start_header_id|>",
522
+ "<|end_header_id|>",
523
+ "<|eot_id|>",
524
+ "<extra_id_12>",
525
+ "<extra_id_13>",
526
+ "<extra_id_14>",
527
+ "<extra_id_15>",
528
+ "<extra_id_16>",
529
+ "<extra_id_17>",
530
+ "<extra_id_18>",
531
+ "<extra_id_19>",
532
+ "<extra_id_20>",
533
+ "<extra_id_21>",
534
+ "<extra_id_22>",
535
+ "<extra_id_23>",
536
+ "<extra_id_24>",
537
+ "<extra_id_25>",
538
+ "<extra_id_26>",
539
+ "<extra_id_27>",
540
+ "<extra_id_28>",
541
+ "<extra_id_29>",
542
+ "<extra_id_30>",
543
+ "<extra_id_31>",
544
+ "<extra_id_32>",
545
+ "<extra_id_33>",
546
+ "<extra_id_34>",
547
+ "<extra_id_35>",
548
+ "<extra_id_36>",
549
+ "<extra_id_37>",
550
+ "<extra_id_38>",
551
+ "<extra_id_39>",
552
+ "<extra_id_40>",
553
+ "<extra_id_41>",
554
+ "<extra_id_42>",
555
+ "<extra_id_43>",
556
+ "<extra_id_44>",
557
+ "<extra_id_45>",
558
+ "<extra_id_46>",
559
+ "<extra_id_47>",
560
+ "<extra_id_48>",
561
+ "<extra_id_49>",
562
+ "<extra_id_50>",
563
+ "<extra_id_51>",
564
+ "<extra_id_52>",
565
+ "<extra_id_53>",
566
+ "<extra_id_54>",
567
+ "<extra_id_55>",
568
+ "<extra_id_56>",
569
+ "<extra_id_57>",
570
+ "<extra_id_58>",
571
+ "<extra_id_59>",
572
+ "<extra_id_60>",
573
+ "<extra_id_61>",
574
+ "<extra_id_62>",
575
+ "<extra_id_63>"
576
+ ],
577
+ "auto_map": {
578
+ "AutoTokenizer": [
579
+ "tokenization_evabyte.EvaByteTokenizer",
580
+ null
581
+ ]
582
+ },
583
+ "bos_token": "<bos>",
584
+ "chat_template": "\n{{- bos_token }}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content'] %}\n {%- set messages = messages[1:] %}\n{%- else %}\n {%- set system_message = \"\" %}\n{%- endif %}\n\n{{- '<|start_header_id|>system<|end_header_id|>\n\n' + system_message + '<|eot_id|>'}}\n\n{%- for message in messages %}\n {%- if (message['role'] != 'user') and (message['role'] != 'assistant') %}\n {{- raise_exception('Conversation roles must be user or assistant') }}\n {%- endif %}\n\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] + '<|eot_id|>' }}\n{%- endfor %}\n\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>' + 'assistant' + '<|end_header_id|>\n\n' }}\n{%- endif %}\n",
585
+ "clean_up_tokenization_spaces": false,
586
+ "eos_token": "<|eot_id|>",
587
+ "extra_ids": 0,
588
+ "extra_special_tokens": {},
589
+ "model_max_length": 1000000000000000019884624838656,
590
+ "pad_token": "<pad>",
591
+ "sep_token": "<eos>",
592
+ "tokenizer_class": "EvaByteTokenizer",
593
+ "unk_token": "<unk>"
594
+ }