import tiktoken from tiktoken import Encoding tokenizer = tiktoken.encoding_for_model('gpt-3.5-turbo') tokenizer.vocab_size = tokenizer.n_vocab def decode(self, tokens, errors="replace"): # def decode(self, tokens: list[int], errors: str = "replace") -> str: try: decode_str = self._core_bpe.decode_bytes(tokens).decode("utf-8", errors=errors) except: decode_str = "null" return decode_str def convert_ids_to_tokens(self, tokens): return tokenizer.decode_tokens_bytes(tokens) Encoding.decode = decode Encoding.convert_ids_to_tokens = convert_ids_to_tokens