import tiktoken from tiktoken import Encoding from utils.log_util import logger tokenizer = tiktoken.encoding_for_model('gpt-3.5-turbo') tokenizer.vocab_size = tokenizer.n_vocab tokenizer.comments = "tiktoken is a fast BPE tokeniser for use with OpenAI's models. There are 16 tokens KeyError" tokenizer.reversible = True # It's reversible and lossless, so you can convert tokens back into the original text def decode(self, tokens, errors="replace"): # def decode(self, tokens: list[int], errors: str = "replace") -> str: try: decode_str = self._core_bpe.decode_bytes(tokens).decode("utf-8", errors=errors) except: decode_str = "null" return decode_str def convert_ids_to_tokens(self, tokens): return tokenizer.decode_tokens_bytes(tokens) def get_vocab(self, token_type="str"): """Returns vocab as a dict :param token_type: ["str", "byte"] :return: """ vocab = {} key_error_list = [] unicode_decode_error_list = [] for i in range(self.vocab_size): try: token_byte = self.convert_ids_to_tokens([i])[0] token_str = token_byte.decode("utf-8") vocab[token_byte] = i except KeyError: # 16 KeyError, 100256 100261-100275 key_error_list.append(i) # vocab[f"[KeyError]-{i}"] = i except UnicodeDecodeError: # 773 UnicodeDecodeError unicode_decode_error_list.append((i, str(token_byte))) vocab[token_byte] = i # vocab.update(self.added_tokens_encoder) logger.info(f"gpt_35_turbo {len(key_error_list)} KeyError: {key_error_list}") logger.info(f"gpt_35_turbo {len(unicode_decode_error_list)} UnicodeDecodeError: {unicode_decode_error_list[:5]}") return vocab # tiktoken patch Encoding.decode = decode Encoding.convert_ids_to_tokens = convert_ids_to_tokens Encoding.get_vocab = get_vocab