import os
import sys

from zkl_datasets import load_dataset

root_dir_path = os.path.join(os.path.dirname(__file__), "../..")
sys.path.append(root_dir_path)

from llmpt.model import VocabForNLP, iterate_dataset
from llmpt.preprocess import TokenizedDataset
from scripts.config import preprocessed_dataset_path

vocab_size = 4096
context_size = 16
context_stride = 8
batch_size = 4

print("\nLoading dataset...")

dataset = load_dataset(preprocessed_dataset_path)
assert isinstance(dataset, TokenizedDataset)

print("\nPreparing iterator...")

vocab = dataset.vocab
vocab = vocab.truncate(vocab_size - len(VocabForNLP.specials))
vocab = VocabForNLP(vocab)

dataset_iterator, _ = iterate_dataset(
    dataset=dataset,
    vocab=vocab,
    context_tokens_n=context_size,
    striding_tokens_n=context_stride,
    batch_samples_n=batch_size,
    device=None)

print("\nBrowsing...")
for i, (in_tokens, out_tokens, out_tokens_mask, offset, progress) in enumerate(dataset_iterator):
    in_str = [
        "".join(vocab.get_token(token) for token in tokens)
        for tokens in in_tokens]
    out_str = [
        "".join(vocab.get_token(token) for token in tokens)
        for tokens in out_tokens]

    print()
    print(f"batch[{i}]")
    print(f"{offset=}")
    print(f"{progress=}")
    print(f"{in_tokens.shape=}")
    print(f"{out_tokens.shape=}")
    print(f"{in_tokens=}")
    print(f"{out_tokens=}")
    print(f"{in_str=}")
    print(f"{out_str=}")
    input("(Press Enter to continue)")
