from collections.abc import Iterable
from typing import Callable

import numpy as np
from zkl_aiutils_datasets import Dataset, MappedDataset

from zkl_llmpt_iterator.tokenizer import SpecialsTextTokenizer, TextTokenizer
from .array_sliding_window import ArraySlidingWindowDataset, ArraySlidingWindowSample
from .channels_batched import ChannelsBatchedDataset

bos_name = "bos"
eos_name = "eos"
pad_name = "pad"


def make_llmpt_dataset_raw(*,
    dataset: Dataset[str],
    tokenizer: TextTokenizer,
    tokenizer_add_bos: Callable[[TextTokenizer], SpecialsTextTokenizer] | None,
    tokenizer_add_eos: Callable[[TextTokenizer], SpecialsTextTokenizer] | None,
    tokenizer_add_pad: Callable[[TextTokenizer], SpecialsTextTokenizer] | None,
    chunk_tokens_n: int,
    batch_samples_n: int,
    keep_remainder_batches: bool,
) -> tuple[Dataset[tuple[ArraySlidingWindowSample, ...]], TextTokenizer]:
    # tokenized dataset
    dataset = MappedDataset(dataset, tokenizer.encode)

    # formated dataset
    if tokenizer_add_bos is not None:
        tokenizer = tokenizer_add_bos(tokenizer)
        bos_wid = tokenizer.get_special_wid(bos_name)
        dataset = MappedDataset(dataset,
            lambda sample: np.concatenate([[bos_wid], tokenizer.transform(sample)]))
    if tokenizer_add_eos is not None:
        tokenizer = tokenizer_add_eos(tokenizer)
        eos_wid = tokenizer.get_special_wid(eos_name)
        dataset = MappedDataset(dataset,
            lambda sample: np.concatenate([tokenizer.transform(sample), [eos_wid]]))

    # batched dataset
    if tokenizer_add_pad is not None:
        tokenizer = tokenizer_add_pad(tokenizer)
    pad_wid = tokenizer.get_special_wid(pad_name) \
        if tokenizer_add_pad is not None else ArraySlidingWindowDataset.NoPadding
    dataset = MappedDataset(dataset, lambda sample: ArraySlidingWindowDataset(
        array=tokenizer.transform(sample),
        win_size=chunk_tokens_n + 1,
        win_stride=chunk_tokens_n,
        pad_item=pad_wid))
    pad_item = ArraySlidingWindowSample(
        data=np.full(chunk_tokens_n + 1, pad_wid, dtype=np.int64), head=0, tail=0) \
        if tokenizer_add_pad is not None and keep_remainder_batches else ChannelsBatchedDataset.NoPadding
    dataset = ChannelsBatchedDataset(dataset, batch_samples_n, pad_item)

    return dataset, tokenizer


def make_formated_dataset(
    tokenized_dataset: Iterable[np.ndarray],
    formated_tokenizer: SpecialsTextTokenizer
) -> Dataset[np.ndarray]:
    bos_wid = formated_tokenizer.get_special_wid(bos_name)
    eos_wid = formated_tokenizer.get_special_wid(eos_name)
    return MappedDataset(tokenized_dataset, lambda sample: np.concatenate([
        [bos_wid], formated_tokenizer.transform(sample), [eos_wid]]))


def make_batched_dataset(
    tokenized_dataset: Iterable[np.ndarray],
    windowed_tokenizer: SpecialsTextTokenizer,
    batch_samples_n: int,
    chunk_tokens_n: int,
    pad_empty_channel: bool,
) -> Dataset[tuple[ArraySlidingWindowSample, ...]]:
    pad_wid = windowed_tokenizer.get_special_wid(pad_name)
    dataset = MappedDataset(tokenized_dataset, lambda sample: ArraySlidingWindowDataset(
        array=windowed_tokenizer.transform(sample),
        win_size=chunk_tokens_n + 1,
        win_stride=chunk_tokens_n,
        pad_item=pad_wid))

    pad_item = ArraySlidingWindowSample(
        data=np.full(chunk_tokens_n + 1, pad_wid, dtype=np.int64),
        head=0, tail=0) if pad_empty_channel else ChannelsBatchedDataset.NoPadding
    dataset = ChannelsBatchedDataset(dataset, batch_samples_n, pad_item)

    return dataset
