|
|
|
|
|
import tiktoken |
|
from tiktoken import Encoding |
|
from utils.log_util import logger |
|
|
|
tokenizer = tiktoken.encoding_for_model('gpt-3.5-turbo') |
|
tokenizer.vocab_size = tokenizer.n_vocab |
|
|
|
|
|
|
|
def decode(self, tokens, errors="replace"): |
|
|
|
try: |
|
decode_str = self._core_bpe.decode_bytes(tokens).decode("utf-8", errors=errors) |
|
except: |
|
decode_str = "null" |
|
return decode_str |
|
|
|
def convert_ids_to_tokens(self, tokens): |
|
return tokenizer.decode_tokens_bytes(tokens) |
|
|
|
def get_vocab(self): |
|
"""Returns vocab as a dict""" |
|
vocab = {} |
|
key_error_list = [] |
|
unicode_decode_error_list = [] |
|
for i in range(self.vocab_size): |
|
try: |
|
token_byte = self.convert_ids_to_tokens([i])[0] |
|
token_str = token_byte.decode("utf-8") |
|
vocab[token_str] = i |
|
except KeyError: |
|
key_error_list.append(i) |
|
except UnicodeDecodeError: |
|
unicode_decode_error_list.append((i, str(token_byte))) |
|
|
|
|
|
logger.info(f"gpt_35_turbo {len(key_error_list)} KeyError: {key_error_list}") |
|
logger.info(f"gpt_35_turbo {len(unicode_decode_error_list)} UnicodeDecodeError: {unicode_decode_error_list[:5]}") |
|
return vocab |
|
|
|
|
|
Encoding.decode = decode |
|
Encoding.convert_ids_to_tokens = convert_ids_to_tokens |
|
Encoding.get_vocab = get_vocab |
|
|
|
|
|
|