import re
import data
from tokenizer.tokenizer import Tokenizer
from tokenizer.gpt2 import GPT2Tokenizer
from dataset import create_gpt2_dataloader_v1

def simple_tokenizer():
    text = data.load_text()
    preprocessed_text = re.split(r'([,.:;?!_()"\']|--|\s)', text)
    vocab = {char: i for i, char in enumerate(sorted(set(preprocessed_text)))}
    tokenizer = Tokenizer(vocab)
    tokens = tokenizer.encode(text)
    print(tokens[:100])

def gpt2_tokenizer():
    text = data.load_text()
    tokenizer = GPT2Tokenizer()
    tokens = tokenizer.encode(text)
    print(tokens[:100])

def gpt2_tokenize_chinese():
    text = "人工只能是未来发展的重要方向。"
    tokenizer = GPT2Tokenizer()
    tokens = tokenizer.encode(text)
    print(tokenizer.decode(tokens))

def dataloader():
    text = data.load_text()
    data_loader = create_gpt2_dataloader_v1(text, max_length=4, stride=1, batch_size=2, shuffle=False)
    data_iter = iter(data_loader)
    batch = next(data_iter)
    print(batch)

def main():
    # simple_tokenizer()
    # gpt2_tokenizer()
    # gpt2_tokenize_chinese()
    dataloader()

if __name__ == "__main__":
    main()