|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import os |
|
import unicodedata |
|
from shutil import copyfile |
|
from typing import Any, Dict, List, Optional, Tuple |
|
|
|
import regex |
|
import sentencepiece as spm |
|
from transformers import AddedToken, BartTokenizer, logging |
|
|
|
logger = logging.get_logger(__name__) |
|
VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"} |
|
|
|
PRETRAINED_VOCAB_FILES_MAP = { |
|
"vocab_file": { |
|
"bart-base-japanese-news": "https://huggingface.co/stockmark/bart-base-japanese-news/resolve/main/spiece.model", |
|
}} |
|
|
|
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { |
|
"bart-base-japanese-news": 512, |
|
} |
|
|
|
SPIECE_UNDERLINE = "β" |
|
|
|
|
|
class BartJapaneseNewsTokenizer(BartTokenizer): |
|
""" |
|
Construct an Bart tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). |
|
|
|
This tokenizer inherits from [`BartTokenizer`] which contains most of the main methods. Users should refer to |
|
this superclass for more information regarding those methods. |
|
|
|
Args: |
|
vocab_file (`str`): |
|
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that |
|
contains the vocabulary necessary to instantiate a tokenizer. |
|
do_lower_case (`bool`, *optional*, defaults to `False`): |
|
Whether or not to lowercase the input when tokenizing. |
|
remove_space (`bool`, *optional*, defaults to `False`): |
|
Whether or not to strip the text when tokenizing (removing excess spaces before and after the string). |
|
clean_text (`bool`, *optional*, defaults to `False`): |
|
Whether or not to clean input text |
|
|
|
bos_token (`str`, *optional*, defaults to `"<s>"`): |
|
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. |
|
|
|
<Tip> |
|
|
|
When building a sequence using special tokens, this is not the token that is used for the beginning of |
|
sequence. The token used is the `cls_token`. |
|
|
|
</Tip> |
|
|
|
eos_token (`str`, *optional*, defaults to `"</s>"`): |
|
The end of sequence token. |
|
|
|
<Tip> |
|
|
|
When building a sequence using special tokens, this is not the token that is used for the end of sequence. |
|
The token used is the `sep_token`. |
|
|
|
</Tip> |
|
|
|
sep_token (`str`, *optional*, defaults to `"</s>"`): |
|
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for |
|
sequence classification or for a text and a question for question answering. It is also used as the last |
|
token of a sequence built with special tokens. |
|
cls_token (`str`, *optional*, defaults to `"<s>"`): |
|
The classifier token which is used when doing sequence classification (classification of the whole sequence |
|
instead of per-token classification). It is the first token of the sequence when built with special tokens. |
|
unk_token (`str`, *optional*, defaults to `"<unk>"`): |
|
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this |
|
token instead. |
|
pad_token (`str`, *optional*, defaults to `"<pad>"`): |
|
The token used for padding, for example when batching sequences of different lengths. |
|
mask_token (`str`, *optional*, defaults to `"<mask>"`): |
|
The token used for masking values. This is the token used when training this model with masked language |
|
modeling. This is the token which the model will try to predict. |
|
|
|
sp_model_kwargs (`dict`, *optional*): |
|
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for |
|
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, |
|
to set: |
|
|
|
- `enable_sampling`: Enable subword regularization. |
|
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. |
|
|
|
- `nbest_size = {0,1}`: No sampling is performed. |
|
- `nbest_size > 1`: samples from the nbest_size results. |
|
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) |
|
using forward-filtering-and-backward-sampling algorithm. |
|
|
|
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for |
|
BPE-dropout. |
|
|
|
Attributes: |
|
sp_model (`SentencePieceProcessor`): |
|
The *SentencePiece* processor that is used for every conversion (string, tokens and IDs). |
|
""" |
|
|
|
vocab_files_names = VOCAB_FILES_NAMES |
|
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP |
|
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES |
|
|
|
def __init__( |
|
self, |
|
vocab_file, |
|
do_lower_case=False, |
|
remove_space=False, |
|
clean_text=False, |
|
bos_token="<s>", |
|
eos_token="</s>", |
|
unk_token="<unk>", |
|
sep_token="</s>", |
|
pad_token="<pad>", |
|
cls_token="<s>", |
|
mask_token="<mask>", |
|
sp_model_kwargs: Optional[Dict[str, Any]] = None, |
|
**kwargs |
|
) -> None: |
|
|
|
|
|
mask_token = ( |
|
AddedToken(mask_token, lstrip=True, rstrip=True, normalized=False) |
|
if isinstance(mask_token, str) |
|
else mask_token |
|
) |
|
|
|
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs |
|
|
|
super(BartTokenizer, self).__init__( |
|
do_lower_case=do_lower_case, |
|
remove_space=remove_space, |
|
clean_text=clean_text, |
|
bos_token=bos_token, |
|
eos_token=eos_token, |
|
unk_token=unk_token, |
|
sep_token=sep_token, |
|
pad_token=pad_token, |
|
cls_token=cls_token, |
|
mask_token=mask_token, |
|
sp_model_kwargs=self.sp_model_kwargs, |
|
**kwargs, |
|
) |
|
|
|
self.do_lower_case = do_lower_case |
|
self.remove_space = remove_space |
|
self.clean_text = clean_text |
|
self.vocab_file = vocab_file |
|
|
|
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) |
|
self.sp_model.Load(vocab_file) |
|
|
|
@property |
|
def vocab_size(self): |
|
return len(self.sp_model) |
|
|
|
def get_vocab(self): |
|
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} |
|
vocab.update(self.added_tokens_encoder) |
|
return vocab |
|
|
|
def __getstate__(self): |
|
state = self.__dict__.copy() |
|
state["sp_model"] = None |
|
return state |
|
|
|
def __setstate__(self, d): |
|
self.__dict__ = d |
|
|
|
|
|
if not hasattr(self, "sp_model_kwargs"): |
|
self.sp_model_kwargs = {} |
|
|
|
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) |
|
self.sp_model.Load(self.vocab_file) |
|
|
|
def preprocess_text(self, inputs): |
|
if self.remove_space: |
|
outputs = " ".join(inputs.strip().split()) |
|
else: |
|
outputs = inputs |
|
|
|
outputs = unicodedata.normalize("NFKD", outputs) |
|
outputs = ''.join([ s for s in outputs if not self._is_control(s) ]) |
|
|
|
if self.clean_text: |
|
outputs = outputs.replace('β','\'').replace('β','\'').replace('β','"').replace('β','"') |
|
outputs = outputs.replace('γ','<').replace('γ','>').replace('γ','<').replace('γ','>').replace('γ','γ').replace('γ','γ').replace('γ','γ').replace('γ','γ') |
|
outputs = regex.sub(r'[\p{GeometricShapes}\p{MiscellaneousSymbols}]','*', outputs) |
|
outputs = ''.join(regex.findall(r'[\p{InHiragana}\p{InKatakana}\p{BasicLatin}\p{Han}γγγγγγγγγγ]',outputs)) |
|
|
|
if self.do_lower_case: |
|
outputs = outputs.lower() |
|
|
|
outputs = unicodedata.normalize('NFKC',outputs) |
|
outputs = outputs.strip() |
|
return outputs |
|
|
|
def _tokenize(self, text: str) -> List[str]: |
|
"""Tokenize a string.""" |
|
text = self.preprocess_text(text) |
|
pieces = self.sp_model.encode(text, out_type=str) |
|
new_pieces = [] |
|
for piece in pieces: |
|
if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit(): |
|
cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, "")) |
|
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: |
|
if len(cur_pieces[0]) == 1: |
|
cur_pieces = cur_pieces[1:] |
|
else: |
|
cur_pieces[0] = cur_pieces[0][1:] |
|
cur_pieces.append(piece[-1]) |
|
new_pieces.extend(cur_pieces) |
|
else: |
|
new_pieces.append(piece) |
|
|
|
return new_pieces |
|
|
|
def _convert_token_to_id(self, token): |
|
"""Converts a token (str) in an id using the vocab.""" |
|
return self.sp_model.PieceToId(token) |
|
|
|
def _convert_id_to_token(self, index): |
|
"""Converts an index (integer) in a token (str) using the vocab.""" |
|
return self.sp_model.IdToPiece(index) |
|
|
|
def convert_tokens_to_string(self, tokens): |
|
"""Converts a sequence of tokens (string) in a single string.""" |
|
current_sub_tokens = [] |
|
out_string = "" |
|
prev_is_special = False |
|
for token in tokens: |
|
|
|
if token in self.all_special_tokens: |
|
if not prev_is_special: |
|
out_string += " " |
|
out_string += self.sp_model.decode(current_sub_tokens) + token |
|
prev_is_special = True |
|
current_sub_tokens = [] |
|
else: |
|
current_sub_tokens.append(token) |
|
prev_is_special = False |
|
out_string += self.sp_model.decode(current_sub_tokens) |
|
return out_string.strip() |
|
|
|
def _is_control(self, char): |
|
''' |
|
Check control char |
|
Args: |
|
char (str): |
|
Returns: |
|
bool: |
|
''' |
|
if char == "\t" or char == "\n" or char == "\r": |
|
return False |
|
cat = unicodedata.category(char) |
|
if cat.startswith("C") or ord(char)==0 or ord(char)==0xfffd: |
|
return True |
|
return False |
|
|
|
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: |
|
if not os.path.isdir(save_directory): |
|
logger.error(f"Vocabulary path ({save_directory}) should be a directory") |
|
return |
|
out_vocab_file = os.path.join( |
|
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] |
|
) |
|
|
|
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): |
|
copyfile(self.vocab_file, out_vocab_file) |
|
elif not os.path.isfile(self.vocab_file): |
|
with open(out_vocab_file, "wb") as fi: |
|
content_spiece_model = self.sp_model.serialized_model_proto() |
|
fi.write(content_spiece_model) |
|
|
|
return (out_vocab_file,) |
|
|
|
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): |
|
return super(BartTokenizer, self).prepare_for_tokenization(text, is_split_into_words=False, **kwargs) |
|
|