"""
 BPE means byte pair encoding, was used it to train llm such as GPT-2, GPT-3, etc.
"""
from importlib.metadata import version
import tiktoken
print("tiktoken version: ", version("tiktoken"))

bpe_text = "Hello, do you like tea? <|endoftext|> In the sunlit terraces of someunknownPlace. dingker Akwirw ier"

tokenizer = tiktoken.get_encoding("gpt2")
integers = tokenizer.encode(bpe_text, allowed_special={"<|endoftext|>"})
# [15496, 11, 466, 345, 588, 8887, 30, 220, 50256, 554, 262, 4252, 18250, 8812, 2114, 286, 617, 34680, 27271, 13]
print(integers)
decode_strs = tokenizer.decode(integers)
print(decode_strs)

def make_voca():
    with open("the-verdict.txt", 'r', encoding="utf-8") as f:
        raw_text = f.read()

    enc_text = tokenizer.encode(raw_text)
    print(len(enc_text))
    enc_sample = enc_text[50:]

    context_size = 4
    x = enc_sample[:context_size]
    y = enc_sample[1:context_size+1]
    """
    x: [290, 4920, 2241, 287]
    y:      [4920, 2241, 287, 257]
    """
    print(f"x: {x}")
    print(f"y:      {y}")

    for i in range(1, context_size+1):
        """
        [290] ----> 4920
        [290, 4920] ----> 2241
        [290, 4920, 2241] ----> 287
        [290, 4920, 2241, 287] ----> 257
        
         and ---->  established
         and established ---->  himself
         and established himself ---->  in
         and established himself in ---->  a
        """
        context = enc_sample[:i]
        desired = enc_sample[i]
        # print(context, "---->", desired)
        print(tokenizer.decode(context), "---->", tokenizer.decode([desired]))

make_voca()