import os
import sys

project_dir_path = os.path.join(os.path.dirname(__file__), "../..")
sys.path.append(project_dir_path)

from scripts.datasets.load_dataset import load_text_dataset
from zkl_llmpt_iterator import SpecialsTextTokenizer, Utf8Tokenizer, bos_name, eos_name, make_llmpt_dataset_numpy, \
    pad_name

print("\nLoading dataset...")

dataset = load_text_dataset()

print("\nLoading tokenizer...")

tokenizer = Utf8Tokenizer()

tokenizer_add_bos = lambda tok: SpecialsTextTokenizer(tok, added_specials=[bos_name])
tokenizer_add_eos = lambda tok: SpecialsTextTokenizer(tok, added_specials=[eos_name])
tokenizer_add_pad = lambda tok: SpecialsTextTokenizer(tok, added_specials=[pad_name])

print("\nPreparing datasets...")

batch_samples_n = 4
chunk_tokens_n = 16

dataset = make_llmpt_dataset_numpy(
    dataset=dataset,
    tokenizer=tokenizer,
    tokenizer_add_bos=tokenizer_add_bos,
    tokenizer_add_eos=tokenizer_add_eos,
    tokenizer_add_pad=tokenizer_add_pad,
    batch_samples_n=batch_samples_n,
    chunk_tokens_n=chunk_tokens_n)
tokenizer = dataset.tokenizer

print("\nBrowsing...")

for i, batch in enumerate(dataset):
    batch_text = [tokenizer.decode(tokens) for tokens in batch.tokens]

    print()
    print(f"batch[{i}]")
    print(f"{batch.head=}")
    print(f"{batch.tail=}")
    print(f"{batch.tokens.shape=}")
    print(f"{batch.tokens=}")
    print(f"{batch_text=}")
    input("(Press Enter to continue)")
