from dataclasses import dataclass
from typing import Callable

import numpy as np
from zkl_aiutils_datasets import Dataset, MappedDataset

from zkl_llmpt_iterator.tokenizer import SpecialsTextTokenizer, TextTokenizer
from .llmpt import LlmptDataset
from .llmpt_raw import make_llmpt_dataset_raw


@dataclass
class LlmptDataBatchNumpy:
    tokens: np.ndarray
    head: np.ndarray
    tail: np.ndarray


def make_llmpt_dataset_numpy(*,
    dataset: Dataset[str],
    tokenizer: TextTokenizer,
    tokenizer_add_bos: Callable[[TextTokenizer], SpecialsTextTokenizer] | None,
    tokenizer_add_eos: Callable[[TextTokenizer], SpecialsTextTokenizer] | None,
    tokenizer_add_pad: Callable[[TextTokenizer], SpecialsTextTokenizer] | None,
    chunk_tokens_n: int,
    batch_samples_n: int,
    keep_remainder_batches: bool = False,
    total_tokens_n: int | None = None,
) -> LlmptDataset[LlmptDataBatchNumpy]:
    dataset, tokenizer = make_llmpt_dataset_raw(
        dataset=dataset,
        tokenizer=tokenizer,
        tokenizer_add_bos=tokenizer_add_bos,
        tokenizer_add_eos=tokenizer_add_eos,
        tokenizer_add_pad=tokenizer_add_pad,
        chunk_tokens_n=chunk_tokens_n,
        batch_samples_n=batch_samples_n,
        keep_remainder_batches=keep_remainder_batches)

    dataset = MappedDataset(dataset, lambda sample: tuple(zip(*sample)))
    dataset = MappedDataset(dataset, lambda sample: LlmptDataBatchNumpy(
        tokens=np.asarray(sample[0], dtype=np.int64),
        head=np.asarray(sample[1], dtype=np.int64),
        tail=np.asarray(sample[2], dtype=np.int64)))
    return LlmptDataset(dataset, tokenizer, total_tokens_n)
