import os
import sys

from zkl_aiutils_datasets import load_dataset
from zkl_llmpt_datasets import TokenizedDataset
from zkl_llmpt_iterator import GPTTrainingIterator, VocabForNLP

project_dir_path = os.path.join(os.path.dirname(__file__), "../..")
sys.path.append(project_dir_path)

from scripts.config import default_dataset_path

vocab_size = 4096
batch_samples_n = 4
chunk_tokens_n = 16

print("\nLoading dataset...")

dataset = load_dataset(default_dataset_path)
assert isinstance(dataset, TokenizedDataset)

print("\nPreparing iterator...")

vocab = dataset.vocab
vocab = vocab.truncate(vocab_size - len(VocabForNLP.specials))
vocab = VocabForNLP(vocab)

iterator = GPTTrainingIterator.create(
    dataset_factory=lambda: dataset,
    vocab_factory=lambda: vocab,
    batch_samples_n=batch_samples_n,
    chunk_tokens_n=chunk_tokens_n)

print("\nBrowsing...")
for i, (tile_tokens, tile_offset, doc_index) in enumerate(iterator):
    tile_str = [
        "".join(vocab.get_token(token) for token in tokens)
        for tokens in tile_tokens]

    print()
    print(f"batch[{i}]")
    print(f"{doc_index=}")
    print(f"{tile_offset=}")
    print(f"{tile_tokens.shape=}")
    print(f"{tile_tokens=}")
    print(f"{tile_str=}")
    input("(Press Enter to continue)")
