import numpy as np
from zkl_pyutils_serialization import parse_json_value

from scripts.datasets.loading import DatasetHparams, IteratorHparams, load_llmpt_dataset_from_hparams


def browse_llmpt_dataset(
    dataset_hparams: DatasetHparams,
    iterator_hparams: IteratorHparams,
):
    dataset = load_llmpt_dataset_from_hparams(
        dataset_hparams=dataset_hparams,
        iterator_hparams=iterator_hparams)
    for batch in dataset:
        tokens_wid = np.asarray(batch.tokens[0])
        tokens_str = dataset.tokenizer.decode(tokens_wid)
        print(batch)
        print(tokens_str)
        input()


if __name__ == '__main__':
    browse_llmpt_dataset(
        dataset_hparams=parse_json_value({
            "text_dataset_name": "wikien",
            "text_tokenizer_name": "utf8",
            "text_dataset_clipping": {
                "multiple": True,
                "conditions": [
                    {
                        "text_tokenizer_name": "llama3",
                        "max_tokens_n": 32
                    },
                    {
                        "text_tokenizer_name": "utf8",
                        "max_tokens_n": 128
                    }
                ]
            }
        }, DatasetHparams),
        iterator_hparams=parse_json_value({
            "batch_samples_n": 2,
            "chunk_tokens_n": 128
        }, IteratorHparams))
    # browse_llmpt_dataset(
    #     dataset_hparams=parse_json_value({
    #         "text_dataset_name": "wikien",
    #         "text_tokenizer_name": "llama3",
    #         "text_dataset_clipping": {
    #             "multiple": True,
    #             "conditions": [
    #                 {
    #                     "text_tokenizer_name": "llama3",
    #                     "max_tokens_n": 32
    #                 },
    #                 {
    #                     "text_tokenizer_name": "utf8",
    #                     "max_tokens_n": 128
    #                 }
    #             ]
    #         }
    #     }, DatasetHparams),
    #     iterator_hparams=parse_json_value({
    #         "batch_samples_n": 2,
    #         "chunk_tokens_n": 32
    #     }, IteratorHparams))
