xiaotinghe commited on
Commit
15a8eb4
1 Parent(s): dcccc28

Upload tokenizer

Browse files
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "unk_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
tokenization_baichuan.py ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+
21
+ import os
22
+ from shutil import copyfile
23
+ from typing import Any, Dict, List, Optional, Tuple
24
+
25
+ import sentencepiece as spm
26
+
27
+ from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
28
+ from transformers.utils import logging
29
+
30
+
31
+ logger = logging.get_logger(__name__)
32
+
33
+ VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
34
+
35
+ PRETRAINED_VOCAB_FILES_MAP = {
36
+ "vocab_file": {},
37
+ "tokenizer_file": {},
38
+ }
39
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {}
40
+
41
+
42
+ class BaiChuanTokenizer(PreTrainedTokenizer):
43
+ """
44
+ Construct a BaiChuan tokenizer. Based on byte-level Byte-Pair-Encoding.
45
+
46
+ Args:
47
+ vocab_file (`str`):
48
+ Path to the vocabulary file.
49
+ """
50
+
51
+ vocab_files_names = VOCAB_FILES_NAMES
52
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
53
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
54
+ model_input_names = ["input_ids", "attention_mask"]
55
+
56
+ def __init__(
57
+ self,
58
+ vocab_file,
59
+ unk_token="<unk>",
60
+ bos_token="<s>",
61
+ eos_token="</s>",
62
+ pad_token=None,
63
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
64
+ add_bos_token=True,
65
+ add_eos_token=False,
66
+ clean_up_tokenization_spaces=False,
67
+ **kwargs,
68
+ ):
69
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
70
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
71
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
72
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
73
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
74
+ super().__init__(
75
+ bos_token=bos_token,
76
+ eos_token=eos_token,
77
+ unk_token=unk_token,
78
+ pad_token=pad_token,
79
+ add_bos_token=add_bos_token,
80
+ add_eos_token=add_eos_token,
81
+ sp_model_kwargs=self.sp_model_kwargs,
82
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
83
+ **kwargs,
84
+ )
85
+ self.vocab_file = vocab_file
86
+ self.add_bos_token = add_bos_token
87
+ self.add_eos_token = add_eos_token
88
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
89
+ self.sp_model.Load(vocab_file)
90
+
91
+ def __getstate__(self):
92
+ state = self.__dict__.copy()
93
+ state["sp_model"] = None
94
+ return state
95
+
96
+ def __setstate__(self, d):
97
+ self.__dict__ = d
98
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
99
+ self.sp_model.Load(self.vocab_file)
100
+
101
+ @property
102
+ def vocab_size(self):
103
+ """Returns vocab size"""
104
+ return self.sp_model.get_piece_size()
105
+
106
+ def get_vocab(self):
107
+ """Returns vocab as a dict"""
108
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
109
+ vocab.update(self.added_tokens_encoder)
110
+ return vocab
111
+
112
+ def _tokenize(self, text):
113
+ """Returns a tokenized string."""
114
+ return self.sp_model.encode(text, out_type=str)
115
+
116
+ def _convert_token_to_id(self, token):
117
+ """Converts a token (str) in an id using the vocab."""
118
+ return self.sp_model.piece_to_id(token)
119
+
120
+ def _convert_id_to_token(self, index):
121
+ """Converts an index (integer) in a token (str) using the vocab."""
122
+ token = self.sp_model.IdToPiece(index)
123
+ return token
124
+
125
+ def convert_tokens_to_string(self, tokens):
126
+ """Converts a sequence of tokens (string) in a single string."""
127
+ current_sub_tokens = []
128
+ out_string = ""
129
+ prev_is_special = False
130
+ for i, token in enumerate(tokens):
131
+ # make sure that special tokens are not decoded using sentencepiece model
132
+ if token in self.all_special_tokens:
133
+ if not prev_is_special and i != 0:
134
+ out_string += " "
135
+ out_string += self.sp_model.decode(current_sub_tokens) + token
136
+ prev_is_special = True
137
+ current_sub_tokens = []
138
+ else:
139
+ current_sub_tokens.append(token)
140
+ prev_is_special = False
141
+ out_string += self.sp_model.decode(current_sub_tokens)
142
+ return out_string
143
+
144
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
145
+ """
146
+ Save the vocabulary and special tokens file to a directory.
147
+
148
+ Args:
149
+ save_directory (`str`):
150
+ The directory in which to save the vocabulary.
151
+
152
+ Returns:
153
+ `Tuple(str)`: Paths to the files saved.
154
+ """
155
+ if not os.path.isdir(save_directory):
156
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
157
+ return
158
+ out_vocab_file = os.path.join(
159
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
160
+ )
161
+
162
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
163
+ copyfile(self.vocab_file, out_vocab_file)
164
+ elif not os.path.isfile(self.vocab_file):
165
+ with open(out_vocab_file, "wb") as fi:
166
+ content_spiece_model = self.sp_model.serialized_model_proto()
167
+ fi.write(content_spiece_model)
168
+
169
+ return (out_vocab_file,)
170
+
171
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
172
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
173
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
174
+
175
+ output = bos_token_id + token_ids_0 + eos_token_id
176
+
177
+ if token_ids_1 is not None:
178
+ output = output + bos_token_id + token_ids_1 + eos_token_id
179
+
180
+ return output
181
+
182
+ def get_special_tokens_mask(
183
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
184
+ ) -> List[int]:
185
+ """
186
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
187
+ special tokens using the tokenizer `prepare_for_model` method.
188
+
189
+ Args:
190
+ token_ids_0 (`List[int]`):
191
+ List of IDs.
192
+ token_ids_1 (`List[int]`, *optional*):
193
+ Optional second list of IDs for sequence pairs.
194
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
195
+ Whether or not the token list is already formatted with special tokens for the model.
196
+
197
+ Returns:
198
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
199
+ """
200
+ if already_has_special_tokens:
201
+ return super().get_special_tokens_mask(
202
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
203
+ )
204
+
205
+ bos_token_id = [1] if self.add_bos_token else []
206
+ eos_token_id = [1] if self.add_eos_token else []
207
+
208
+ if token_ids_1 is None:
209
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
210
+ return (
211
+ bos_token_id
212
+ + ([0] * len(token_ids_0))
213
+ + eos_token_id
214
+ + bos_token_id
215
+ + ([0] * len(token_ids_1))
216
+ + eos_token_id
217
+ )
218
+
219
+ def create_token_type_ids_from_sequences(
220
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
221
+ ) -> List[int]:
222
+ """
223
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
224
+ sequence pair mask has the following format:
225
+
226
+ ```
227
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
228
+ | first sequence | second sequence |
229
+ ```
230
+
231
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
232
+
233
+ Args:
234
+ token_ids_0 (`List[int]`):
235
+ List of ids.
236
+ token_ids_1 (`List[int]`, *optional*):
237
+ Optional second list of IDs for sequence pairs.
238
+
239
+ Returns:
240
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
241
+ """
242
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
243
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
244
+
245
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
246
+
247
+ if token_ids_1 is not None:
248
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
249
+
250
+ return output
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4be54af290d93c113bcbf421115ae9eed9d6340408f564898f1e966dc738ef01
3
+ size 1136699
tokenizer_config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "auto_map": {
5
+ "AutoTokenizer": [
6
+ "tokenization_baichuan.BaiChuanTokenizer",
7
+ null
8
+ ]
9
+ },
10
+ "bos_token": {
11
+ "__type": "AddedToken",
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": true,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
+ "clean_up_tokenization_spaces": false,
19
+ "eos_token": {
20
+ "__type": "AddedToken",
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false
26
+ },
27
+ "model_max_length": 1000000000000000019884624838656,
28
+ "pad_token": null,
29
+ "sp_model_kwargs": {},
30
+ "tokenizer_class": "BaiChuanTokenizer",
31
+ "unk_token": {
32
+ "__type": "AddedToken",
33
+ "content": "<unk>",
34
+ "lstrip": false,
35
+ "normalized": true,
36
+ "rstrip": false,
37
+ "single_word": false
38
+ }
39
+ }