Upload 3 files
Browse files- __init__.py +0 -0
- tokenization_bertweet.py +792 -0
- tokenization_bertweet_fast.py +324 -0
__init__.py
ADDED
File without changes
|
tokenization_bertweet.py
ADDED
@@ -0,0 +1,792 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright (c) 2020, VinAI Research and the HuggingFace Inc. team.
|
3 |
+
# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
""" Tokenization classes for BERTweet"""
|
17 |
+
|
18 |
+
|
19 |
+
import html
|
20 |
+
import os
|
21 |
+
import re
|
22 |
+
from shutil import copyfile
|
23 |
+
from typing import List, Optional, Tuple
|
24 |
+
|
25 |
+
import regex
|
26 |
+
|
27 |
+
from transformers.tokenization_utils import PreTrainedTokenizer
|
28 |
+
from transformers.utils import logging
|
29 |
+
|
30 |
+
|
31 |
+
logger = logging.get_logger(__name__)
|
32 |
+
|
33 |
+
VOCAB_FILES_NAMES = {
|
34 |
+
"vocab_file": "vocab.txt",
|
35 |
+
"merges_file": "bpe.codes",
|
36 |
+
}
|
37 |
+
|
38 |
+
PRETRAINED_VOCAB_FILES_MAP = {
|
39 |
+
"vocab_file": {
|
40 |
+
"vinai/bertweet-base": "https://huggingface.co/vinai/bertweet-base/resolve/main/vocab.txt",
|
41 |
+
},
|
42 |
+
"merges_file": {
|
43 |
+
"vinai/bertweet-base": "https://huggingface.co/vinai/bertweet-base/resolve/main/bpe.codes",
|
44 |
+
},
|
45 |
+
}
|
46 |
+
|
47 |
+
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
|
48 |
+
"vinai/bertweet-base": 128,
|
49 |
+
}
|
50 |
+
|
51 |
+
|
52 |
+
def get_pairs(word):
|
53 |
+
"""
|
54 |
+
Return set of symbol pairs in a word.
|
55 |
+
|
56 |
+
Word is represented as tuple of symbols (symbols being variable-length strings).
|
57 |
+
"""
|
58 |
+
pairs = set()
|
59 |
+
prev_char = word[0]
|
60 |
+
for char in word[1:]:
|
61 |
+
pairs.add((prev_char, char))
|
62 |
+
prev_char = char
|
63 |
+
|
64 |
+
pairs = set(pairs)
|
65 |
+
return pairs
|
66 |
+
|
67 |
+
|
68 |
+
class BertweetTokenizer(PreTrainedTokenizer):
|
69 |
+
"""
|
70 |
+
Constructs a BERTweet tokenizer, using Byte-Pair-Encoding.
|
71 |
+
|
72 |
+
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
|
73 |
+
this superclass for more information regarding those methods.
|
74 |
+
|
75 |
+
Args:
|
76 |
+
vocab_file (`str`):
|
77 |
+
Path to the vocabulary file.
|
78 |
+
merges_file (`str`):
|
79 |
+
Path to the merges file.
|
80 |
+
normalization (`bool`, *optional*, defaults to `False`)
|
81 |
+
Whether or not to apply a normalization preprocess.
|
82 |
+
bos_token (`str`, *optional*, defaults to `"<s>"`):
|
83 |
+
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
|
84 |
+
|
85 |
+
<Tip>
|
86 |
+
|
87 |
+
When building a sequence using special tokens, this is not the token that is used for the beginning of
|
88 |
+
sequence. The token used is the `cls_token`.
|
89 |
+
|
90 |
+
</Tip>
|
91 |
+
|
92 |
+
eos_token (`str`, *optional*, defaults to `"</s>"`):
|
93 |
+
The end of sequence token.
|
94 |
+
|
95 |
+
<Tip>
|
96 |
+
|
97 |
+
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
|
98 |
+
The token used is the `sep_token`.
|
99 |
+
|
100 |
+
</Tip>
|
101 |
+
|
102 |
+
sep_token (`str`, *optional*, defaults to `"</s>"`):
|
103 |
+
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
|
104 |
+
sequence classification or for a text and a question for question answering. It is also used as the last
|
105 |
+
token of a sequence built with special tokens.
|
106 |
+
cls_token (`str`, *optional*, defaults to `"<s>"`):
|
107 |
+
The classifier token which is used when doing sequence classification (classification of the whole sequence
|
108 |
+
instead of per-token classification). It is the first token of the sequence when built with special tokens.
|
109 |
+
unk_token (`str`, *optional*, defaults to `"<unk>"`):
|
110 |
+
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
111 |
+
token instead.
|
112 |
+
pad_token (`str`, *optional*, defaults to `"<pad>"`):
|
113 |
+
The token used for padding, for example when batching sequences of different lengths.
|
114 |
+
mask_token (`str`, *optional*, defaults to `"<mask>"`):
|
115 |
+
The token used for masking values. This is the token used when training this model with masked language
|
116 |
+
modeling. This is the token which the model will try to predict.
|
117 |
+
"""
|
118 |
+
|
119 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
120 |
+
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
|
121 |
+
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
|
122 |
+
model_input_names = ["input_ids", "attention_mask"]
|
123 |
+
|
124 |
+
def __init__(
|
125 |
+
self,
|
126 |
+
vocab_file,
|
127 |
+
merges_file,
|
128 |
+
normalization=False,
|
129 |
+
bos_token="<s>",
|
130 |
+
eos_token="</s>",
|
131 |
+
sep_token="</s>",
|
132 |
+
cls_token="<s>",
|
133 |
+
unk_token="<unk>",
|
134 |
+
pad_token="<pad>",
|
135 |
+
mask_token="<mask>",
|
136 |
+
**kwargs
|
137 |
+
):
|
138 |
+
super().__init__(
|
139 |
+
normalization=normalization,
|
140 |
+
bos_token=bos_token,
|
141 |
+
eos_token=eos_token,
|
142 |
+
sep_token=sep_token,
|
143 |
+
cls_token=cls_token,
|
144 |
+
unk_token=unk_token,
|
145 |
+
pad_token=pad_token,
|
146 |
+
mask_token=mask_token,
|
147 |
+
**kwargs,
|
148 |
+
)
|
149 |
+
|
150 |
+
try:
|
151 |
+
from emoji import demojize
|
152 |
+
|
153 |
+
self.demojizer = demojize
|
154 |
+
except ImportError:
|
155 |
+
logger.warning(
|
156 |
+
"emoji is not installed, thus not converting emoticons or emojis into text. Install emoji: pip3"
|
157 |
+
" install emoji==0.6.0"
|
158 |
+
)
|
159 |
+
self.demojizer = None
|
160 |
+
|
161 |
+
self.vocab_file = vocab_file
|
162 |
+
self.merges_file = merges_file
|
163 |
+
|
164 |
+
self.encoder = {}
|
165 |
+
self.encoder[self.bos_token] = 0
|
166 |
+
self.encoder[self.pad_token] = 1
|
167 |
+
self.encoder[self.eos_token] = 2
|
168 |
+
self.encoder[self.unk_token] = 3
|
169 |
+
|
170 |
+
self.add_from_file(vocab_file)
|
171 |
+
self.encoder[self.mask_token] = len(self.encoder)
|
172 |
+
|
173 |
+
self.decoder = {v: k for k, v in self.encoder.items()}
|
174 |
+
|
175 |
+
with open(merges_file, encoding="utf-8") as merges_handle:
|
176 |
+
merges = merges_handle.read().split("\n")[:-1]
|
177 |
+
merges = [tuple(merge.split()[:-1]) for merge in merges]
|
178 |
+
self.bpe_ranks = dict(zip(merges, range(len(merges))))
|
179 |
+
self.cache = {}
|
180 |
+
|
181 |
+
self.normalization = normalization
|
182 |
+
self.tweetPreprocessor = TweetTokenizer()
|
183 |
+
|
184 |
+
self.special_puncts = {"’": "'", "…": "..."}
|
185 |
+
|
186 |
+
def build_inputs_with_special_tokens(
|
187 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
188 |
+
) -> List[int]:
|
189 |
+
"""
|
190 |
+
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
|
191 |
+
adding special tokens. A BERTweet sequence has the following format:
|
192 |
+
|
193 |
+
- single sequence: `<s> X </s>`
|
194 |
+
- pair of sequences: `<s> A </s></s> B </s>`
|
195 |
+
|
196 |
+
Args:
|
197 |
+
token_ids_0 (`List[int]`):
|
198 |
+
List of IDs to which the special tokens will be added.
|
199 |
+
token_ids_1 (`List[int]`, *optional*):
|
200 |
+
Optional second list of IDs for sequence pairs.
|
201 |
+
|
202 |
+
Returns:
|
203 |
+
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
204 |
+
"""
|
205 |
+
|
206 |
+
if token_ids_1 is None:
|
207 |
+
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
|
208 |
+
cls = [self.cls_token_id]
|
209 |
+
sep = [self.sep_token_id]
|
210 |
+
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
|
211 |
+
|
212 |
+
def get_special_tokens_mask(
|
213 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
|
214 |
+
) -> List[int]:
|
215 |
+
"""
|
216 |
+
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
|
217 |
+
special tokens using the tokenizer `prepare_for_model` method.
|
218 |
+
|
219 |
+
Args:
|
220 |
+
token_ids_0 (`List[int]`):
|
221 |
+
List of IDs.
|
222 |
+
token_ids_1 (`List[int]`, *optional*):
|
223 |
+
Optional second list of IDs for sequence pairs.
|
224 |
+
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
|
225 |
+
Whether or not the token list is already formatted with special tokens for the model.
|
226 |
+
|
227 |
+
Returns:
|
228 |
+
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
229 |
+
"""
|
230 |
+
|
231 |
+
if already_has_special_tokens:
|
232 |
+
return super().get_special_tokens_mask(
|
233 |
+
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
|
234 |
+
)
|
235 |
+
|
236 |
+
if token_ids_1 is None:
|
237 |
+
return [1] + ([0] * len(token_ids_0)) + [1]
|
238 |
+
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
|
239 |
+
|
240 |
+
def create_token_type_ids_from_sequences(
|
241 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
242 |
+
) -> List[int]:
|
243 |
+
"""
|
244 |
+
Create a mask from the two sequences passed to be used in a sequence-pair classification task. BERTweet does
|
245 |
+
not make use of token type ids, therefore a list of zeros is returned.
|
246 |
+
|
247 |
+
Args:
|
248 |
+
token_ids_0 (`List[int]`):
|
249 |
+
List of IDs.
|
250 |
+
token_ids_1 (`List[int]`, *optional*):
|
251 |
+
Optional second list of IDs for sequence pairs.
|
252 |
+
|
253 |
+
Returns:
|
254 |
+
`List[int]`: List of zeros.
|
255 |
+
"""
|
256 |
+
|
257 |
+
sep = [self.sep_token_id]
|
258 |
+
cls = [self.cls_token_id]
|
259 |
+
|
260 |
+
if token_ids_1 is None:
|
261 |
+
return len(cls + token_ids_0 + sep) * [0]
|
262 |
+
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
|
263 |
+
|
264 |
+
@property
|
265 |
+
def vocab_size(self):
|
266 |
+
return len(self.encoder)
|
267 |
+
|
268 |
+
def get_vocab(self):
|
269 |
+
return dict(self.encoder, **self.added_tokens_encoder)
|
270 |
+
|
271 |
+
def bpe(self, token):
|
272 |
+
if token in self.cache:
|
273 |
+
return self.cache[token]
|
274 |
+
word = tuple(token)
|
275 |
+
word = tuple(list(word[:-1]) + [word[-1] + "</w>"])
|
276 |
+
pairs = get_pairs(word)
|
277 |
+
|
278 |
+
if not pairs:
|
279 |
+
return token
|
280 |
+
|
281 |
+
while True:
|
282 |
+
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
|
283 |
+
if bigram not in self.bpe_ranks:
|
284 |
+
break
|
285 |
+
first, second = bigram
|
286 |
+
new_word = []
|
287 |
+
i = 0
|
288 |
+
while i < len(word):
|
289 |
+
try:
|
290 |
+
j = word.index(first, i)
|
291 |
+
except ValueError:
|
292 |
+
new_word.extend(word[i:])
|
293 |
+
break
|
294 |
+
else:
|
295 |
+
new_word.extend(word[i:j])
|
296 |
+
i = j
|
297 |
+
|
298 |
+
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
|
299 |
+
new_word.append(first + second)
|
300 |
+
i += 2
|
301 |
+
else:
|
302 |
+
new_word.append(word[i])
|
303 |
+
i += 1
|
304 |
+
new_word = tuple(new_word)
|
305 |
+
word = new_word
|
306 |
+
if len(word) == 1:
|
307 |
+
break
|
308 |
+
else:
|
309 |
+
pairs = get_pairs(word)
|
310 |
+
word = "@@ ".join(word)
|
311 |
+
word = word[:-4]
|
312 |
+
self.cache[token] = word
|
313 |
+
return word
|
314 |
+
|
315 |
+
def _tokenize(self, text):
|
316 |
+
"""Tokenize a string."""
|
317 |
+
if self.normalization: # Perform Tweet normalization before performing BPE
|
318 |
+
text = self.normalizeTweet(text)
|
319 |
+
|
320 |
+
split_tokens = []
|
321 |
+
words = re.findall(r"\S+\n?", text)
|
322 |
+
for token in words:
|
323 |
+
split_tokens.extend([t for t in self.bpe(token).split(" ")])
|
324 |
+
return split_tokens
|
325 |
+
|
326 |
+
def normalizeTweet(self, tweet):
|
327 |
+
"""
|
328 |
+
Normalize a raw Tweet
|
329 |
+
"""
|
330 |
+
for punct in self.special_puncts:
|
331 |
+
tweet = tweet.replace(punct, self.special_puncts[punct])
|
332 |
+
|
333 |
+
tokens = self.tweetPreprocessor.tokenize(tweet)
|
334 |
+
normTweet = " ".join([self.normalizeToken(token) for token in tokens])
|
335 |
+
|
336 |
+
normTweet = (
|
337 |
+
normTweet.replace("cannot ", "can not ")
|
338 |
+
.replace("n't ", " n't ")
|
339 |
+
.replace("n 't ", " n't ")
|
340 |
+
.replace("ca n't", "can't")
|
341 |
+
.replace("ai n't", "ain't")
|
342 |
+
)
|
343 |
+
normTweet = (
|
344 |
+
normTweet.replace("'m ", " 'm ")
|
345 |
+
.replace("'re ", " 're ")
|
346 |
+
.replace("'s ", " 's ")
|
347 |
+
.replace("'ll ", " 'll ")
|
348 |
+
.replace("'d ", " 'd ")
|
349 |
+
.replace("'ve ", " 've ")
|
350 |
+
)
|
351 |
+
normTweet = (
|
352 |
+
normTweet.replace(" p . m .", " p.m.")
|
353 |
+
.replace(" p . m ", " p.m ")
|
354 |
+
.replace(" a . m .", " a.m.")
|
355 |
+
.replace(" a . m ", " a.m ")
|
356 |
+
)
|
357 |
+
|
358 |
+
return " ".join(normTweet.split())
|
359 |
+
|
360 |
+
def normalizeToken(self, token):
|
361 |
+
"""
|
362 |
+
Normalize tokens in a Tweet
|
363 |
+
"""
|
364 |
+
lowercased_token = token.lower()
|
365 |
+
if token.startswith("@"):
|
366 |
+
return "@USER"
|
367 |
+
elif lowercased_token.startswith("http") or lowercased_token.startswith("www"):
|
368 |
+
return "HTTPURL"
|
369 |
+
elif len(token) == 1:
|
370 |
+
if token in self.special_puncts:
|
371 |
+
return self.special_puncts[token]
|
372 |
+
if self.demojizer is not None:
|
373 |
+
return self.demojizer(token)
|
374 |
+
else:
|
375 |
+
return token
|
376 |
+
else:
|
377 |
+
return token
|
378 |
+
|
379 |
+
def _convert_token_to_id(self, token):
|
380 |
+
"""Converts a token (str) in an id using the vocab."""
|
381 |
+
return self.encoder.get(token, self.encoder.get(self.unk_token))
|
382 |
+
|
383 |
+
def _convert_id_to_token(self, index):
|
384 |
+
"""Converts an index (integer) in a token (str) using the vocab."""
|
385 |
+
return self.decoder.get(index, self.unk_token)
|
386 |
+
|
387 |
+
def convert_tokens_to_string(self, tokens):
|
388 |
+
"""Converts a sequence of tokens (string) in a single string."""
|
389 |
+
out_string = " ".join(tokens).replace("@@ ", "").strip()
|
390 |
+
return out_string
|
391 |
+
|
392 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
393 |
+
if not os.path.isdir(save_directory):
|
394 |
+
logger.error(f"Vocabulary path ({save_directory}) should be a directory.")
|
395 |
+
return
|
396 |
+
|
397 |
+
out_vocab_file = os.path.join(
|
398 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
|
399 |
+
)
|
400 |
+
|
401 |
+
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
|
402 |
+
copyfile(self.vocab_file, out_vocab_file)
|
403 |
+
elif not os.path.isfile(self.vocab_file):
|
404 |
+
with open(out_vocab_file, "w", encoding="utf-8") as fp:
|
405 |
+
for token, value in self.encoder.items():
|
406 |
+
if token not in self.all_special_tokens:
|
407 |
+
fp.write(f"{str(token)} 1\n")
|
408 |
+
|
409 |
+
out_merges_file = os.path.join(
|
410 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
|
411 |
+
)
|
412 |
+
|
413 |
+
if os.path.abspath(self.merges_file) != os.path.abspath(out_merges_file) and os.path.isfile(self.merges_file):
|
414 |
+
copyfile(self.merges_file, out_merges_file)
|
415 |
+
elif not os.path.isfile(self.merges_file):
|
416 |
+
index = 0
|
417 |
+
with open(out_merges_file, "w", encoding="utf-8") as writer:
|
418 |
+
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
|
419 |
+
if index != token_index:
|
420 |
+
logger.warning(
|
421 |
+
f"Saving vocabulary to {out_merges_file}: BPE merge indices are not consecutive."
|
422 |
+
" Please check that the tokenizer is not corrupted!"
|
423 |
+
)
|
424 |
+
index = token_index
|
425 |
+
writer.write(" ".join(bpe_tokens) + " 1\n")
|
426 |
+
index += 1
|
427 |
+
|
428 |
+
return (out_vocab_file, out_merges_file)
|
429 |
+
|
430 |
+
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
|
431 |
+
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
|
432 |
+
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
|
433 |
+
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
|
434 |
+
# return ''.join(tokens_generated_so_far)
|
435 |
+
|
436 |
+
def add_from_file(self, f):
|
437 |
+
"""
|
438 |
+
Loads a pre-existing dictionary from a text file and adds its symbols to this instance.
|
439 |
+
"""
|
440 |
+
if isinstance(f, str):
|
441 |
+
try:
|
442 |
+
with open(f, "r", encoding="utf-8") as fd:
|
443 |
+
self.add_from_file(fd)
|
444 |
+
except FileNotFoundError as fnfe:
|
445 |
+
raise fnfe
|
446 |
+
except UnicodeError:
|
447 |
+
raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset")
|
448 |
+
return
|
449 |
+
|
450 |
+
lines = f.readlines()
|
451 |
+
for lineTmp in lines:
|
452 |
+
line = lineTmp.strip()
|
453 |
+
idx = line.rfind(" ")
|
454 |
+
if idx == -1:
|
455 |
+
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'")
|
456 |
+
word = line[:idx]
|
457 |
+
self.encoder[word] = len(self.encoder)
|
458 |
+
|
459 |
+
|
460 |
+
# Natural Language Toolkit: Twitter Tokenizer
|
461 |
+
#
|
462 |
+
# Copyright (C) 2001-2020 NLTK Project
|
463 |
+
# Author: Christopher Potts <cgpotts@stanford.edu>
|
464 |
+
# Ewan Klein <ewan@inf.ed.ac.uk> (modifications)
|
465 |
+
# Pierpaolo Pantone <> (modifications)
|
466 |
+
# URL: http://nltk.org/
|
467 |
+
# For license information, see LICENSE.TXT
|
468 |
+
#
|
469 |
+
|
470 |
+
|
471 |
+
"""
|
472 |
+
Twitter-aware tokenizer, designed to be flexible and easy to adapt to new domains and tasks. The basic logic is this:
|
473 |
+
|
474 |
+
1. The tuple regex_strings defines a list of regular expression strings.
|
475 |
+
|
476 |
+
2. The regex_strings strings are put, in order, into a compiled regular expression object called word_re.
|
477 |
+
|
478 |
+
3. The tokenization is done by word_re.findall(s), where s is the user-supplied string, inside the tokenize() method of
|
479 |
+
the class Tokenizer.
|
480 |
+
|
481 |
+
4. When instantiating Tokenizer objects, there is a single option: preserve_case. By default, it is set to True. If it
|
482 |
+
is set to False, then the tokenizer will lowercase everything except for emoticons.
|
483 |
+
|
484 |
+
"""
|
485 |
+
|
486 |
+
|
487 |
+
######################################################################
|
488 |
+
#
|
489 |
+
# import regex # https://github.com/nltk/nltk/issues/2409
|
490 |
+
# import html
|
491 |
+
#
|
492 |
+
######################################################################
|
493 |
+
# The following strings are components in the regular expression
|
494 |
+
# that is used for tokenizing. It's important that phone_number
|
495 |
+
# appears first in the final regex (since it can contain whitespace).
|
496 |
+
# It also could matter that tags comes after emoticons, due to the
|
497 |
+
# possibility of having text like
|
498 |
+
#
|
499 |
+
# <:| and some text >:)
|
500 |
+
#
|
501 |
+
# Most importantly, the final element should always be last, since it
|
502 |
+
# does a last ditch whitespace-based tokenization of whatever is left.
|
503 |
+
|
504 |
+
# ToDo: Update with http://en.wikipedia.org/wiki/List_of_emoticons ?
|
505 |
+
|
506 |
+
# This particular element is used in a couple ways, so we define it
|
507 |
+
# with a name:
|
508 |
+
# docstyle-ignore
|
509 |
+
EMOTICONS = r"""
|
510 |
+
(?:
|
511 |
+
[<>]?
|
512 |
+
[:;=8] # eyes
|
513 |
+
[\-o\*\']? # optional nose
|
514 |
+
[\)\]\(\[dDpP/\:\}\{@\|\\] # mouth
|
515 |
+
|
|
516 |
+
[\)\]\(\[dDpP/\:\}\{@\|\\] # mouth
|
517 |
+
[\-o\*\']? # optional nose
|
518 |
+
[:;=8] # eyes
|
519 |
+
[<>]?
|
520 |
+
|
|
521 |
+
<3 # heart
|
522 |
+
)"""
|
523 |
+
|
524 |
+
# URL pattern due to John Gruber, modified by Tom Winzig. See
|
525 |
+
# https://gist.github.com/winzig/8894715
|
526 |
+
# docstyle-ignore
|
527 |
+
URLS = r""" # Capture 1: entire matched URL
|
528 |
+
(?:
|
529 |
+
https?: # URL protocol and colon
|
530 |
+
(?:
|
531 |
+
/{1,3} # 1-3 slashes
|
532 |
+
| # or
|
533 |
+
[a-z0-9%] # Single letter or digit or '%'
|
534 |
+
# (Trying not to match e.g. "URI::Escape")
|
535 |
+
)
|
536 |
+
| # or
|
537 |
+
# looks like domain name followed by a slash:
|
538 |
+
[a-z0-9.\-]+[.]
|
539 |
+
(?:[a-z]{2,13})
|
540 |
+
/
|
541 |
+
)
|
542 |
+
(?: # One or more:
|
543 |
+
[^\s()<>{}\[\]]+ # Run of non-space, non-()<>{}[]
|
544 |
+
| # or
|
545 |
+
\([^\s()]*?\([^\s()]+\)[^\s()]*?\) # balanced parens, one level deep: (...(...)...)
|
546 |
+
|
|
547 |
+
\([^\s]+?\) # balanced parens, non-recursive: (...)
|
548 |
+
)+
|
549 |
+
(?: # End with:
|
550 |
+
\([^\s()]*?\([^\s()]+\)[^\s()]*?\) # balanced parens, one level deep: (...(...)...)
|
551 |
+
|
|
552 |
+
\([^\s]+?\) # balanced parens, non-recursive: (...)
|
553 |
+
| # or
|
554 |
+
[^\s`!()\[\]{};:'".,<>?«»“”‘’] # not a space or one of these punct chars
|
555 |
+
)
|
556 |
+
| # OR, the following to match naked domains:
|
557 |
+
(?:
|
558 |
+
(?<!@) # not preceded by a @, avoid matching foo@_gmail.com_
|
559 |
+
[a-z0-9]+
|
560 |
+
(?:[.\-][a-z0-9]+)*
|
561 |
+
[.]
|
562 |
+
(?:[a-z]{2,13})
|
563 |
+
\b
|
564 |
+
/?
|
565 |
+
(?!@) # not succeeded by a @,
|
566 |
+
# avoid matching "foo.na" in "foo.na@example.com"
|
567 |
+
)
|
568 |
+
"""
|
569 |
+
|
570 |
+
# docstyle-ignore
|
571 |
+
# The components of the tokenizer:
|
572 |
+
REGEXPS = (
|
573 |
+
URLS,
|
574 |
+
# Phone numbers:
|
575 |
+
r"""
|
576 |
+
(?:
|
577 |
+
(?: # (international)
|
578 |
+
\+?[01]
|
579 |
+
[ *\-.\)]*
|
580 |
+
)?
|
581 |
+
(?: # (area code)
|
582 |
+
[\(]?
|
583 |
+
\d{3}
|
584 |
+
[ *\-.\)]*
|
585 |
+
)?
|
586 |
+
\d{3} # exchange
|
587 |
+
[ *\-.\)]*
|
588 |
+
\d{4} # base
|
589 |
+
)""",
|
590 |
+
# ASCII Emoticons
|
591 |
+
EMOTICONS,
|
592 |
+
# HTML tags:
|
593 |
+
r"""<[^>\s]+>""",
|
594 |
+
# ASCII Arrows
|
595 |
+
r"""[\-]+>|<[\-]+""",
|
596 |
+
# Twitter username:
|
597 |
+
r"""(?:@[\w_]+)""",
|
598 |
+
# Twitter hashtags:
|
599 |
+
r"""(?:\#+[\w_]+[\w\'_\-]*[\w_]+)""",
|
600 |
+
# email addresses
|
601 |
+
r"""[\w.+-]+@[\w-]+\.(?:[\w-]\.?)+[\w-]""",
|
602 |
+
# docstyle-ignore
|
603 |
+
# Remaining word types:
|
604 |
+
r"""
|
605 |
+
(?:[^\W\d_](?:[^\W\d_]|['\-_])+[^\W\d_]) # Words with apostrophes or dashes.
|
606 |
+
|
|
607 |
+
(?:[+\-]?\d+[,/.:-]\d+[+\-]?) # Numbers, including fractions, decimals.
|
608 |
+
|
|
609 |
+
(?:[\w_]+) # Words without apostrophes or dashes.
|
610 |
+
|
|
611 |
+
(?:\.(?:\s*\.){1,}) # Ellipsis dots.
|
612 |
+
|
|
613 |
+
(?:\S) # Everything else that isn't whitespace.
|
614 |
+
""",
|
615 |
+
)
|
616 |
+
|
617 |
+
######################################################################
|
618 |
+
# This is the core tokenizing regex:
|
619 |
+
|
620 |
+
WORD_RE = regex.compile(r"""(%s)""" % "|".join(REGEXPS), regex.VERBOSE | regex.I | regex.UNICODE)
|
621 |
+
|
622 |
+
# WORD_RE performs poorly on these patterns:
|
623 |
+
HANG_RE = regex.compile(r"([^a-zA-Z0-9])\1{3,}")
|
624 |
+
|
625 |
+
# The emoticon string gets its own regex so that we can preserve case for
|
626 |
+
# them as needed:
|
627 |
+
EMOTICON_RE = regex.compile(EMOTICONS, regex.VERBOSE | regex.I | regex.UNICODE)
|
628 |
+
|
629 |
+
# These are for regularizing HTML entities to Unicode:
|
630 |
+
ENT_RE = regex.compile(r"&(#?(x?))([^&;\s]+);")
|
631 |
+
|
632 |
+
|
633 |
+
######################################################################
|
634 |
+
# Functions for converting html entities
|
635 |
+
######################################################################
|
636 |
+
|
637 |
+
|
638 |
+
def _str_to_unicode(text, encoding=None, errors="strict"):
|
639 |
+
if encoding is None:
|
640 |
+
encoding = "utf-8"
|
641 |
+
if isinstance(text, bytes):
|
642 |
+
return text.decode(encoding, errors)
|
643 |
+
return text
|
644 |
+
|
645 |
+
|
646 |
+
def _replace_html_entities(text, keep=(), remove_illegal=True, encoding="utf-8"):
|
647 |
+
"""
|
648 |
+
Remove entities from text by converting them to their corresponding unicode character.
|
649 |
+
|
650 |
+
Args:
|
651 |
+
text:
|
652 |
+
A unicode string or a byte string encoded in the given *encoding* (which defaults to 'utf-8').
|
653 |
+
keep (list):
|
654 |
+
List of entity names which should not be replaced. This supports both numeric entities (`&#nnnn;` and
|
655 |
+
`&#hhhh;`) and named entities (such as ` ` or `>`).
|
656 |
+
remove_illegal (bool):
|
657 |
+
If `True`, entities that can't be converted are removed. Otherwise, entities that can't be converted are
|
658 |
+
kept "as is".
|
659 |
+
|
660 |
+
Returns: A unicode string with the entities removed.
|
661 |
+
|
662 |
+
See https://github.com/scrapy/w3lib/blob/master/w3lib/html.py
|
663 |
+
|
664 |
+
>>> from nltk.tokenize.casual import _replace_html_entities >>> _replace_html_entities(b'Price: £100')
|
665 |
+
'Price: \\xa3100' >>> print(_replace_html_entities(b'Price: £100')) Price: £100 >>>
|
666 |
+
"""
|
667 |
+
|
668 |
+
def _convert_entity(match):
|
669 |
+
entity_body = match.group(3)
|
670 |
+
if match.group(1):
|
671 |
+
try:
|
672 |
+
if match.group(2):
|
673 |
+
number = int(entity_body, 16)
|
674 |
+
else:
|
675 |
+
number = int(entity_body, 10)
|
676 |
+
# Numeric character references in the 80-9F range are typically
|
677 |
+
# interpreted by browsers as representing the characters mapped
|
678 |
+
# to bytes 80-9F in the Windows-1252 encoding. For more info
|
679 |
+
# see: https://en.wikipedia.org/wiki/ISO/IEC_8859-1#Similar_character_sets
|
680 |
+
if 0x80 <= number <= 0x9F:
|
681 |
+
return bytes((number,)).decode("cp1252")
|
682 |
+
except ValueError:
|
683 |
+
number = None
|
684 |
+
else:
|
685 |
+
if entity_body in keep:
|
686 |
+
return match.group(0)
|
687 |
+
else:
|
688 |
+
number = html.entities.name2codepoint.get(entity_body)
|
689 |
+
if number is not None:
|
690 |
+
try:
|
691 |
+
return chr(number)
|
692 |
+
except (ValueError, OverflowError):
|
693 |
+
pass
|
694 |
+
|
695 |
+
return "" if remove_illegal else match.group(0)
|
696 |
+
|
697 |
+
return ENT_RE.sub(_convert_entity, _str_to_unicode(text, encoding))
|
698 |
+
|
699 |
+
|
700 |
+
######################################################################
|
701 |
+
|
702 |
+
|
703 |
+
class TweetTokenizer:
|
704 |
+
r"""
|
705 |
+
Examples:
|
706 |
+
|
707 |
+
```python
|
708 |
+
>>> # Tokenizer for tweets.
|
709 |
+
>>> from nltk.tokenize import TweetTokenizer
|
710 |
+
|
711 |
+
>>> tknzr = TweetTokenizer()
|
712 |
+
>>> s0 = "This is a cooool #dummysmiley: :-) :-P <3 and some arrows < > -> <--"
|
713 |
+
>>> tknzr.tokenize(s0)
|
714 |
+
['This', 'is', 'a', 'cooool', '#dummysmiley', ':', ':-)', ':-P', '<3', 'and', 'some', 'arrows', '<', '>', '->', '<--']
|
715 |
+
|
716 |
+
>>> # Examples using *strip_handles* and *reduce_len parameters*:
|
717 |
+
>>> tknzr = TweetTokenizer(strip_handles=True, reduce_len=True)
|
718 |
+
>>> s1 = "@remy: This is waaaaayyyy too much for you!!!!!!"
|
719 |
+
>>> tknzr.tokenize(s1)
|
720 |
+
[':', 'This', 'is', 'waaayyy', 'too', 'much', 'for', 'you', '!', '!', '!']
|
721 |
+
```"""
|
722 |
+
|
723 |
+
def __init__(self, preserve_case=True, reduce_len=False, strip_handles=False):
|
724 |
+
self.preserve_case = preserve_case
|
725 |
+
self.reduce_len = reduce_len
|
726 |
+
self.strip_handles = strip_handles
|
727 |
+
|
728 |
+
def tokenize(self, text):
|
729 |
+
"""
|
730 |
+
Args:
|
731 |
+
text: str
|
732 |
+
|
733 |
+
Returns: list(str) A tokenized list of strings; concatenating this list returns the original string if
|
734 |
+
`preserve_case=False`
|
735 |
+
"""
|
736 |
+
# Fix HTML character entities:
|
737 |
+
text = _replace_html_entities(text)
|
738 |
+
# Remove username handles
|
739 |
+
if self.strip_handles:
|
740 |
+
text = remove_handles(text)
|
741 |
+
# Normalize word lengthening
|
742 |
+
if self.reduce_len:
|
743 |
+
text = reduce_lengthening(text)
|
744 |
+
# Shorten problematic sequences of characters
|
745 |
+
safe_text = HANG_RE.sub(r"\1\1\1", text)
|
746 |
+
# Tokenize:
|
747 |
+
words = WORD_RE.findall(safe_text)
|
748 |
+
# Possibly alter the case, but avoid changing emoticons like :D into :d:
|
749 |
+
if not self.preserve_case:
|
750 |
+
words = list(map((lambda x: x if EMOTICON_RE.search(x) else x.lower()), words))
|
751 |
+
return words
|
752 |
+
|
753 |
+
|
754 |
+
######################################################################
|
755 |
+
# Normalization Functions
|
756 |
+
######################################################################
|
757 |
+
|
758 |
+
|
759 |
+
def reduce_lengthening(text):
|
760 |
+
"""
|
761 |
+
Replace repeated character sequences of length 3 or greater with sequences of length 3.
|
762 |
+
"""
|
763 |
+
pattern = regex.compile(r"(.)\1{2,}")
|
764 |
+
return pattern.sub(r"\1\1\1", text)
|
765 |
+
|
766 |
+
|
767 |
+
def remove_handles(text):
|
768 |
+
"""
|
769 |
+
Remove Twitter username handles from text.
|
770 |
+
"""
|
771 |
+
pattern = regex.compile(
|
772 |
+
r"(?<![A-Za-z0-9_!@#\$%&*])@(([A-Za-z0-9_]){20}(?!@))|(?<![A-Za-z0-9_!@#\$%&*])@(([A-Za-z0-9_]){1,19})(?![A-Za-z0-9_]*@)"
|
773 |
+
)
|
774 |
+
# Substitute handles with ' ' to ensure that text on either side of removed handles are tokenized correctly
|
775 |
+
return pattern.sub(" ", text)
|
776 |
+
|
777 |
+
|
778 |
+
######################################################################
|
779 |
+
# Tokenization Function
|
780 |
+
######################################################################
|
781 |
+
|
782 |
+
|
783 |
+
def casual_tokenize(text, preserve_case=True, reduce_len=False, strip_handles=False):
|
784 |
+
"""
|
785 |
+
Convenience function for wrapping the tokenizer.
|
786 |
+
"""
|
787 |
+
return TweetTokenizer(preserve_case=preserve_case, reduce_len=reduce_len, strip_handles=strip_handles).tokenize(
|
788 |
+
text
|
789 |
+
)
|
790 |
+
|
791 |
+
|
792 |
+
###############################################################################
|
tokenization_bertweet_fast.py
ADDED
@@ -0,0 +1,324 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright (c) 2020, VinAI Research and the HuggingFace Inc. team.
|
3 |
+
# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
""" Tokenization classes for BERTweet"""
|
17 |
+
|
18 |
+
import os
|
19 |
+
from collections import defaultdict
|
20 |
+
from shutil import copyfile
|
21 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
22 |
+
|
23 |
+
from transformers.tokenization_utils_base import EncodingFast
|
24 |
+
|
25 |
+
from transformers.tokenization_utils_fast import PreTrainedTokenizerFast
|
26 |
+
from transformers.utils import logging
|
27 |
+
from .tokenization_bertweet import BertweetTokenizer
|
28 |
+
|
29 |
+
|
30 |
+
logger = logging.get_logger(__name__)
|
31 |
+
|
32 |
+
VOCAB_FILES_NAMES = {
|
33 |
+
"vocab_file": "vocab.txt",
|
34 |
+
"merges_file": "bpe.codes",
|
35 |
+
"tokenizer_file": "tokenizer.json",
|
36 |
+
}
|
37 |
+
|
38 |
+
PRETRAINED_VOCAB_FILES_MAP = {
|
39 |
+
"vocab_file": {
|
40 |
+
"vinai/bertweet-base": "https://huggingface.co/vinai/bertweet-base/resolve/main/vocab.txt",
|
41 |
+
},
|
42 |
+
"merges_file": {
|
43 |
+
"vinai/bertweet-base": "https://huggingface.co/vinai/bertweet-base/resolve/main/bpe.codes",
|
44 |
+
},
|
45 |
+
"tokenizer_file": {
|
46 |
+
"vinai/bertweet-base": "https://huggingface.co/vinai/bertweet-base/resolve/main/tokenizer.json",
|
47 |
+
},
|
48 |
+
}
|
49 |
+
|
50 |
+
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
|
51 |
+
"vinai/bertweet-base": 128,
|
52 |
+
}
|
53 |
+
|
54 |
+
|
55 |
+
class BertweetTokenizerFast(PreTrainedTokenizerFast):
|
56 |
+
"""
|
57 |
+
Construct a "Fast" BPE tokenizer for BERTweet (backed by HuggingFace's *tokenizers* library).
|
58 |
+
|
59 |
+
Peculiarities:
|
60 |
+
|
61 |
+
- uses BERT's pre-tokenizer: BertPreTokenizer splits tokens on spaces, and also on punctuation. Each occurrence of
|
62 |
+
a punctuation character will be treated separately.
|
63 |
+
|
64 |
+
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the methods. Users should refer to the
|
65 |
+
superclass for more information regarding methods.
|
66 |
+
|
67 |
+
Args:
|
68 |
+
vocab_file (`str`):
|
69 |
+
Path to the vocabulary file.
|
70 |
+
merges_file (`str`):
|
71 |
+
Path to the merges file.
|
72 |
+
"""
|
73 |
+
|
74 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
75 |
+
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
|
76 |
+
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
|
77 |
+
model_input_names = ["input_ids", "attention_mask"]
|
78 |
+
slow_tokenizer_class = BertweetTokenizer
|
79 |
+
|
80 |
+
def __init__(
|
81 |
+
self,
|
82 |
+
vocab_file=None,
|
83 |
+
merges_file=None,
|
84 |
+
tokenizer_file=None,
|
85 |
+
bos_token="<s>",
|
86 |
+
eos_token="</s>",
|
87 |
+
sep_token="</s>",
|
88 |
+
cls_token="<s>",
|
89 |
+
unk_token="<unk>",
|
90 |
+
pad_token="<pad>",
|
91 |
+
mask_token="<mask>",
|
92 |
+
**kwargs
|
93 |
+
):
|
94 |
+
super().__init__(
|
95 |
+
vocab_file,
|
96 |
+
merges_file,
|
97 |
+
tokenizer_file=tokenizer_file,
|
98 |
+
bos_token=bos_token,
|
99 |
+
eos_token=eos_token,
|
100 |
+
sep_token=sep_token,
|
101 |
+
cls_token=cls_token,
|
102 |
+
unk_token=unk_token,
|
103 |
+
pad_token=pad_token,
|
104 |
+
mask_token=mask_token,
|
105 |
+
**kwargs,
|
106 |
+
)
|
107 |
+
|
108 |
+
self.vocab_file = vocab_file
|
109 |
+
self.merges_file = merges_file
|
110 |
+
self.can_save_slow_tokenizer = False if not self.vocab_file else True
|
111 |
+
|
112 |
+
def get_added_vocab_hacking(self):
|
113 |
+
"""
|
114 |
+
Returns the added tokens in the vocabulary as a dictionary of token to index.
|
115 |
+
|
116 |
+
Returns:
|
117 |
+
`Dict[str, int], Dict[int, int]`: The added tokens, and their original and new ids
|
118 |
+
"""
|
119 |
+
base_vocab_size = self._tokenizer.get_vocab_size(with_added_tokens=False)
|
120 |
+
full_vocab_size = self._tokenizer.get_vocab_size(with_added_tokens=True)
|
121 |
+
if full_vocab_size == base_vocab_size:
|
122 |
+
return {}, {}
|
123 |
+
|
124 |
+
# Tokens in added_vocab should have ids that are equal to or larger than the size of base_vocab
|
125 |
+
added_vocab = dict(
|
126 |
+
(self._tokenizer.id_to_token(index), index + 1 - base_vocab_size + self.mask_token_id)
|
127 |
+
for index in range(base_vocab_size, full_vocab_size)
|
128 |
+
)
|
129 |
+
|
130 |
+
id_mapping = dict((index, self._tokenizer.token_to_id(tok)) for tok, index in added_vocab.items())
|
131 |
+
|
132 |
+
return added_vocab, id_mapping
|
133 |
+
|
134 |
+
def _decode(
|
135 |
+
self,
|
136 |
+
token_ids: Union[int, List[int]],
|
137 |
+
skip_special_tokens: bool = False,
|
138 |
+
clean_up_tokenization_spaces: bool = True,
|
139 |
+
**kwargs
|
140 |
+
) -> str:
|
141 |
+
self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False)
|
142 |
+
|
143 |
+
if isinstance(token_ids, int):
|
144 |
+
token_ids = [token_ids]
|
145 |
+
|
146 |
+
# Mapping ids into their original values
|
147 |
+
_, id_mapping = self.get_added_vocab_hacking()
|
148 |
+
if len(id_mapping) > 0:
|
149 |
+
token_ids = [id_mapping[id] if id in id_mapping else id for id in token_ids]
|
150 |
+
|
151 |
+
text = self._tokenizer.decode(token_ids, skip_special_tokens=skip_special_tokens)
|
152 |
+
|
153 |
+
if clean_up_tokenization_spaces:
|
154 |
+
clean_text = self.clean_up_tokenization(text)
|
155 |
+
return clean_text
|
156 |
+
else:
|
157 |
+
return text
|
158 |
+
|
159 |
+
def _convert_encoding(
|
160 |
+
self,
|
161 |
+
encoding: EncodingFast,
|
162 |
+
return_token_type_ids: Optional[bool] = None,
|
163 |
+
return_attention_mask: Optional[bool] = None,
|
164 |
+
return_overflowing_tokens: bool = False,
|
165 |
+
return_special_tokens_mask: bool = False,
|
166 |
+
return_offsets_mapping: bool = False,
|
167 |
+
return_length: bool = False,
|
168 |
+
verbose: bool = True,
|
169 |
+
) -> Tuple[Dict[str, Any], List[EncodingFast]]:
|
170 |
+
"""
|
171 |
+
Convert the encoding representation (from low-level HuggingFace tokenizer output) to a python Dict and a list
|
172 |
+
of encodings, take care of building a batch from overflowing tokens.
|
173 |
+
|
174 |
+
Overflowing tokens are converted to additional examples (like batches) so the output values of the dict are
|
175 |
+
lists (overflows) of lists (tokens).
|
176 |
+
|
177 |
+
Output shape: (overflows, sequence length)
|
178 |
+
"""
|
179 |
+
if return_token_type_ids is None:
|
180 |
+
return_token_type_ids = "token_type_ids" in self.model_input_names
|
181 |
+
if return_attention_mask is None:
|
182 |
+
return_attention_mask = "attention_mask" in self.model_input_names
|
183 |
+
|
184 |
+
if return_overflowing_tokens and encoding.overflowing is not None:
|
185 |
+
encodings = [encoding] + encoding.overflowing
|
186 |
+
else:
|
187 |
+
encodings = [encoding]
|
188 |
+
|
189 |
+
encoding_dict = defaultdict(list)
|
190 |
+
added_vocab, _ = self.get_added_vocab_hacking()
|
191 |
+
for e in encodings:
|
192 |
+
# encoding_dict["input_ids"].append(e.ids)
|
193 |
+
# Reassign ids of tokens due to the hacking strategy
|
194 |
+
ids = []
|
195 |
+
for id, token in zip(e.ids, e.tokens):
|
196 |
+
if id <= self.mask_token_id:
|
197 |
+
ids.append(id)
|
198 |
+
else:
|
199 |
+
if token.strip() in added_vocab:
|
200 |
+
ids.append(added_vocab[token.strip()])
|
201 |
+
else:
|
202 |
+
ids.append(self.unk_token_id)
|
203 |
+
|
204 |
+
encoding_dict["input_ids"].append(ids)
|
205 |
+
|
206 |
+
if return_token_type_ids:
|
207 |
+
encoding_dict["token_type_ids"].append(e.type_ids)
|
208 |
+
if return_attention_mask:
|
209 |
+
encoding_dict["attention_mask"].append(e.attention_mask)
|
210 |
+
if return_special_tokens_mask:
|
211 |
+
encoding_dict["special_tokens_mask"].append(e.special_tokens_mask)
|
212 |
+
if return_offsets_mapping:
|
213 |
+
encoding_dict["offset_mapping"].append(e.offsets)
|
214 |
+
if return_length:
|
215 |
+
# encoding_dict["length"].append(len(e.ids))
|
216 |
+
encoding_dict["length"].append(len(ids))
|
217 |
+
|
218 |
+
return encoding_dict, encodings
|
219 |
+
|
220 |
+
def build_inputs_with_special_tokens(
|
221 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
222 |
+
) -> List[int]:
|
223 |
+
"""
|
224 |
+
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
|
225 |
+
adding special tokens. A BERTweet sequence has the following format:
|
226 |
+
|
227 |
+
- single sequence: `<s> X </s>`
|
228 |
+
- pair of sequences: `<s> A </s></s> B </s>`
|
229 |
+
|
230 |
+
Args:
|
231 |
+
token_ids_0 (`List[int]`):
|
232 |
+
List of IDs to which the special tokens will be added.
|
233 |
+
token_ids_1 (`List[int]`, *optional*):
|
234 |
+
Optional second list of IDs for sequence pairs.
|
235 |
+
|
236 |
+
Returns:
|
237 |
+
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
|
238 |
+
"""
|
239 |
+
|
240 |
+
if token_ids_1 is None:
|
241 |
+
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
|
242 |
+
cls = [self.cls_token_id]
|
243 |
+
sep = [self.sep_token_id]
|
244 |
+
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
|
245 |
+
|
246 |
+
def get_special_tokens_mask(
|
247 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
|
248 |
+
) -> List[int]:
|
249 |
+
"""
|
250 |
+
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
|
251 |
+
special tokens using the tokenizer `prepare_for_model` method.
|
252 |
+
|
253 |
+
Args:
|
254 |
+
token_ids_0 (`List[int]`):
|
255 |
+
List of IDs.
|
256 |
+
token_ids_1 (`List[int]`, *optional*):
|
257 |
+
Optional second list of IDs for sequence pairs.
|
258 |
+
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
|
259 |
+
Whether or not the token list is already formatted with special tokens for the model.
|
260 |
+
|
261 |
+
Returns:
|
262 |
+
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
263 |
+
"""
|
264 |
+
|
265 |
+
if already_has_special_tokens:
|
266 |
+
return super().get_special_tokens_mask(
|
267 |
+
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
|
268 |
+
)
|
269 |
+
|
270 |
+
if token_ids_1 is None:
|
271 |
+
return [1] + ([0] * len(token_ids_0)) + [1]
|
272 |
+
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
|
273 |
+
|
274 |
+
def create_token_type_ids_from_sequences(
|
275 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
276 |
+
) -> List[int]:
|
277 |
+
"""
|
278 |
+
Create a mask from the two sequences passed to be used in a sequence-pair classification task. BERTweet does
|
279 |
+
not make use of token type ids, therefore a list of zeros is returned.
|
280 |
+
|
281 |
+
Args:
|
282 |
+
token_ids_0 (`List[int]`):
|
283 |
+
List of IDs.
|
284 |
+
token_ids_1 (`List[int]`, *optional*):
|
285 |
+
Optional second list of IDs for sequence pairs.
|
286 |
+
|
287 |
+
Returns:
|
288 |
+
`List[int]`: List of zeros.
|
289 |
+
|
290 |
+
"""
|
291 |
+
|
292 |
+
sep = [self.sep_token_id]
|
293 |
+
cls = [self.cls_token_id]
|
294 |
+
|
295 |
+
if token_ids_1 is None:
|
296 |
+
return len(cls + token_ids_0 + sep) * [0]
|
297 |
+
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
|
298 |
+
|
299 |
+
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
300 |
+
if not self.can_save_slow_tokenizer:
|
301 |
+
raise ValueError(
|
302 |
+
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
|
303 |
+
"tokenizer."
|
304 |
+
)
|
305 |
+
|
306 |
+
if not os.path.isdir(save_directory):
|
307 |
+
logger.error(f"Vocabulary path ({save_directory}) should be a directory.")
|
308 |
+
return
|
309 |
+
|
310 |
+
out_vocab_file = os.path.join(
|
311 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
|
312 |
+
)
|
313 |
+
|
314 |
+
out_merges_file = os.path.join(
|
315 |
+
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
|
316 |
+
)
|
317 |
+
|
318 |
+
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
|
319 |
+
copyfile(self.vocab_file, out_vocab_file)
|
320 |
+
|
321 |
+
if os.path.abspath(self.merges_file) != os.path.abspath(out_merges_file):
|
322 |
+
copyfile(self.merges_file, out_merges_file)
|
323 |
+
|
324 |
+
return (out_vocab_file, out_merges_file)
|