import tiktoken
tiktoken_toker = tiktoken.Encoding

from transformers import AutoTokenizer
transformers_toker = AutoTokenizer.from_pretrained


import json

class tokenizer:
    def __init__(self, path:str):
        self.path = path
        with open(path, 'r') as f:
            loaded = json.load(f)
        self.vocab = loaded['model']['vocab']
        for e in loaded['added_tokens']:
            self.vocab[e['content']] = e['id']

        print('loaded tokenizer!')

    def encoding(self, string):
        try:
            return self.vocab[string]
        except KeyError:
            return None


if __name__ == '__main__':
    toker = tokenizer("../Meta-Llama-3-8B/tokenizer.json")
    tokens = list(toker.vocab)
    # print(tokens[:150])

    with open('vocab.txt', 'w', encoding='utf-8') as f:
        for item in tokens:
            f.write(item + '\n')
    # print(toker.encoding('hello'))