from dataclasses import dataclass
from typing import Callable, TYPE_CHECKING, Union

import numpy as np
from zkl_aiutils_datasets import Dataset, MappedDataset

from zkl_llmpt_iterator.tokenizer import SpecialsTextTokenizer, TextTokenizer
from .llmpt import LlmptDataset
from .llmpt_numpy import make_llmpt_dataset_raw

if TYPE_CHECKING:
    import torch


@dataclass
class LlmptDataBatchTorch:
    tokens: 'torch.Tensor'
    head: 'torch.Tensor'
    tail: 'torch.Tensor'


def make_llmpt_dataset_torch(*,
    dataset: Dataset[str],
    tokenizer: TextTokenizer,
    tokenizer_add_bos: Callable[[TextTokenizer], SpecialsTextTokenizer] | None,
    tokenizer_add_eos: Callable[[TextTokenizer], SpecialsTextTokenizer] | None,
    tokenizer_add_pad: Callable[[TextTokenizer], SpecialsTextTokenizer] | None,
    chunk_tokens_n: int,
    batch_samples_n: int,
    keep_remainder_batches: bool = False,
    device: Union[str, 'torch.device', None] = None,
    total_tokens_n: int | None = None,
) -> LlmptDataset[LlmptDataBatchTorch]:
    dataset, tokenizer = make_llmpt_dataset_raw(
        dataset=dataset,
        tokenizer=tokenizer,
        tokenizer_add_bos=tokenizer_add_bos,
        tokenizer_add_eos=tokenizer_add_eos,
        tokenizer_add_pad=tokenizer_add_pad,
        chunk_tokens_n=chunk_tokens_n,
        batch_samples_n=batch_samples_n,
        keep_remainder_batches=keep_remainder_batches)

    import torch
    dataset = MappedDataset(dataset, lambda sample: tuple(zip(*sample)))
    dataset = MappedDataset(dataset, lambda sample: LlmptDataBatchTorch(
        tokens=torch.asarray(np.asarray(sample[0], dtype=np.int64), device=device),
        head=torch.asarray(np.asarray(sample[1], dtype=np.int64), device=device),
        tail=torch.asarray(np.asarray(sample[2], dtype=np.int64), device=device)))
    return LlmptDataset(dataset, tokenizer, total_tokens_n)
