from dataclasses import dataclass
from typing import Callable

import torch
from zkl_aiutils_datasets import Dataset, SlicingShardedDataset, StateSafeThreadedBufferedDataset
from zkl_llmpt_iterator import LlmptDataBatchTorch, LlmptDataset, make_llmpt_dataset_torch

from scripts.datasets.clipping import TextDatasetClippingHparams, make_text_dataset_clipping_transform
from scripts.datasets.presets import load_preset_text_dataset, load_preset_text_tokenizer, \
    make_preset_text_tokenizer_add_bos, make_preset_text_tokenizer_add_eos, make_preset_text_tokenizer_add_pad


@dataclass(kw_only=True)
class DatasetHparams:
    text_dataset_name: str
    text_dataset_split_name: str | None = None
    text_tokenizer_name: str
    text_dataset_clipping: TextDatasetClippingHparams | None = None


@dataclass(kw_only=True)
class IteratorHparams:
    batch_samples_n: int
    chunk_tokens_n: int


def load_llmpt_dataset_from_hparams(*,
    dataset_hparams: DatasetHparams,
    iterator_hparams: IteratorHparams,
    text_dataset_transform: Callable[[Dataset[str]], Dataset[str]] | None = None,
    device: torch.device | str | None = None,
) -> LlmptDataset[LlmptDataBatchTorch]:
    text_dataset_name = dataset_hparams.text_dataset_name
    text_dataset_split_name = dataset_hparams.text_dataset_split_name
    text_tokenizer_name = dataset_hparams.text_tokenizer_name
    text_dataset_clipping = dataset_hparams.text_dataset_clipping
    batch_samples_n = iterator_hparams.batch_samples_n
    chunk_tokens_n = iterator_hparams.chunk_tokens_n

    if text_dataset_clipping is None:
        append_eos = True
    else:
        text_dataset_custom_transform = text_dataset_transform
        text_dataset_clipping_transform = make_text_dataset_clipping_transform(text_dataset_clipping)
        text_dataset_transform = text_dataset_custom_transform if text_dataset_custom_transform is None \
            else lambda x: text_dataset_custom_transform(text_dataset_clipping_transform(x))
        append_eos = False

    return load_llmpt_dataset(
        text_dataset_name=text_dataset_name,
        text_dataset_split_name=text_dataset_split_name,
        text_dataset_transform=text_dataset_transform,
        text_tokenizer_name=text_tokenizer_name,
        append_eos=append_eos,
        chunk_tokens_n=chunk_tokens_n,
        batch_samples_n=batch_samples_n,
        device=device)


def load_llmpt_dataset(*,
    text_dataset_name: str,
    text_dataset_split_name: str | None = None,
    text_dataset_transform: Callable[[Dataset[str]], Dataset[str]] | None = None,
    text_tokenizer_name: str,
    append_bos: bool = True,
    append_eos: bool = True,
    chunk_tokens_n: int,
    batch_samples_n: int,
    buffer_size: int = 4096,
    total_tokens_n: int | None = None,
    device: torch.device | str | None = None,
) -> LlmptDataset[LlmptDataBatchTorch]:
    dataset = load_preset_text_dataset(text_dataset_name, text_dataset_split_name)

    try:
        process_rank = torch.distributed.get_rank()
        process_group_size = torch.distributed.get_world_size()
        dataset = SlicingShardedDataset(dataset, process_rank, process_group_size)
    except ValueError:
        pass

    if text_dataset_transform is not None:
        dataset = text_dataset_transform(dataset)

    dataset = make_llmpt_dataset_torch(
        dataset=dataset,
        tokenizer=load_preset_text_tokenizer(text_tokenizer_name),
        tokenizer_add_bos=make_preset_text_tokenizer_add_bos(text_tokenizer_name) if append_bos else None,
        tokenizer_add_eos=make_preset_text_tokenizer_add_eos(text_tokenizer_name) if append_eos else None,
        tokenizer_add_pad=make_preset_text_tokenizer_add_pad(text_tokenizer_name),
        keep_remainder_batches=True,
        chunk_tokens_n=chunk_tokens_n,
        batch_samples_n=batch_samples_n,
        total_tokens_n=total_tokens_n,
        device=device)
    tokenizer = dataset.tokenizer
    total_tokens_n = dataset.total_tokens_n

    dataset = StateSafeThreadedBufferedDataset(dataset, buffer_size)
    return LlmptDataset(dataset, tokenizer, total_tokens_n)
