import os
import sys

from transformers import AutoTokenizer, PreTrainedTokenizer

project_dir_path = os.path.join(os.path.dirname(__file__), "../..")
sys.path.append(project_dir_path)

from scripts.datasets.load_dataset import load_text_dataset
from zkl_llmpt_iterator import HuggingFaceTokenizer, SpecialsTextTokenizer, TextTokenizer, bos_name, eos_name, \
    make_llmpt_dataset_numpy, pad_name

print("\nLoading dataset...")

dataset = load_text_dataset()

print("\nLoading tokenizer...")

tokenizer = AutoTokenizer.from_pretrained('Qwen/Qwen2.5-0.5B')
tokenizer = HuggingFaceTokenizer(tokenizer)


def tokenizer_get_base(tokenizer: TextTokenizer) -> PreTrainedTokenizer:
    base = tokenizer
    while isinstance(base, SpecialsTextTokenizer):
        base = base.base
    assert isinstance(base, HuggingFaceTokenizer)
    return base.base


def tokenizer_add_bos(tokenizer: TextTokenizer):
    base = tokenizer_get_base(tokenizer)
    return SpecialsTextTokenizer(tokenizer, reused_specials={bos_name: base.eos_token_id})


def tokenizer_add_eos(tokenizer: TextTokenizer):
    base = tokenizer_get_base(tokenizer)
    return SpecialsTextTokenizer(tokenizer, reused_specials={eos_name: base.eos_token_id})


def tokenizer_add_pad(tokenizer: TextTokenizer):
    base = tokenizer_get_base(tokenizer)
    return SpecialsTextTokenizer(tokenizer, reused_specials={pad_name: base.pad_token_id})


print("\nPreparing datasets...")

batch_samples_n = 4
chunk_tokens_n = 16

dataset = make_llmpt_dataset_numpy(
    dataset=dataset,
    tokenizer=tokenizer,
    tokenizer_add_bos=tokenizer_add_bos,
    tokenizer_add_eos=tokenizer_add_eos,
    tokenizer_add_pad=tokenizer_add_pad,
    batch_samples_n=batch_samples_n,
    chunk_tokens_n=chunk_tokens_n)
tokenizer = dataset.tokenizer

print("\nBrowsing...")

for i, batch in enumerate(dataset):
    batch_text = [tokenizer.decode(tokens) for tokens in batch.tokens]

    print()
    print(f"batch[{i}]")
    print(f"{batch.head=}")
    print(f"{batch.tail=}")
    print(f"{batch.tokens.shape=}")
    print(f"{batch.tokens=}")
    print(f"{batch_text=}")
    input("(Press Enter to continue)")
