replit-code-v1-3b-ct2-int8 / replit_lm_tokenizer.py
winstxnhdw's picture
feat: initialise repository and add model file
67f7ef2
raw
history blame
6.54 kB
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Forked from the file src/transformers/models/bert_generation/tokenization_bert_generation.py from the HuggingFace Transformers library.
Permalink: https://github.com/huggingface/transformers/blob/04ab5605fbb4ef207b10bf2772d88c53fc242e83/src/transformers/models/bert_generation/tokenization_bert_generation.py
Tokenizer class for ReplitLM
Class is modified for compatibility with custom vocabulary and to achieve desired encode/decode behavior for Replit Code V1 3B model.
"""
import os
from shutil import copyfile
from typing import Any
from sentencepiece import SentencePieceProcessor
from transformers import PreTrainedTokenizer
VOCAB_FILES_NAMES = {'vocab_file': 'spiece.model'}
class ReplitLMTokenizer(PreTrainedTokenizer):
"""
Construct a ReplitLMTokenizer tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The end of sequence token.
bos_token (`str`, *optional*, defaults to `None`):
The begin of sequence token.
unk_token (`str`, *optional*, defaults to `"<|unk|>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<|pad|>"`):
The token used for padding, for example when batching sequences of different lengths.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
"""
vocab_files_names = VOCAB_FILES_NAMES
prefix_tokens: list[int] = []
model_input_names = ['input_ids', 'attention_mask']
def __init__(
self,
vocab_file: str,
bos_token: str | None = None,
eos_token: str | None ='<|endoftext|>',
unk_token: str | None ='<|unk|>',
pad_token: str | None ='<|pad|>',
sep_token: str | None = None,
sp_model_kwargs: dict[str, Any] | None = None,
**kwargs: dict[str, Any]
):
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, sep_token=sep_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
self.vocab_file = vocab_file
self.sp_model = SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(vocab_file)
@property
def vocab_size(self) -> int:
return self.sp_model.GetPieceSize()
def get_vocab(self):
vocab = { self.convert_ids_to_tokens(i): i for i in range(self.vocab_size) }
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__(self):
state = self.__dict__.copy()
state['sp_model'] = None
return state
def __setstate__(self, dictionary: dict[Any, Any]):
self.__dict__ = dictionary
if not hasattr(self, 'sp_model_kwargs'):
self.sp_model_kwargs = {}
self.sp_model = SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _tokenize(self, text: str, **_) -> list[str]:
"""Take as input a string and return a list of strings (tokens) for words/sub-words"""
return self.sp_model.Encode(text, out_type=str)
def _convert_token_to_id(self, token: str) -> int:
"""Converts a token (str) in an id using the vocab."""
return self.sp_model.PieceToId(token)
def _convert_id_to_token(self, index: int) -> str:
"""Converts an index (integer) in a token (str) using the vocab."""
return self.sp_model.IdToPiece(index)
def convert_tokens_to_string(self, tokens: list[str]) -> str:
"""Converts a sequence of tokens (string) in a single string."""
return self.sp_model.Decode(tokens)
def save_vocabulary(self, save_directory: str, filename_prefix: str | None = None) -> tuple[str]:
if not os.path.isdir(save_directory):
raise ValueError(f'Vocabulary path ({save_directory}) should be a directory')
out_vocab_file = os.path.join(
save_directory,
f"{filename_prefix}{'-' if filename_prefix else ''}{VOCAB_FILES_NAMES['vocab_file']}"
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, out_vocab_file)
elif not os.path.isfile(self.vocab_file):
with open(out_vocab_file, 'wb') as file:
content_spiece_model = self.sp_model.serialized_model_proto()
file.write(content_spiece_model) # type: ignore
return (out_vocab_file,)