from collections.abc import Iterable, Iterator
from dataclasses import dataclass

import numpy as np
from zkl_aiutils_datasets import AugmentedDataset, MappedDataset
from zkl_llmpt_iterator import TextTokenizer

from scripts.datasets.presets import load_preset_text_tokenizer


@dataclass(kw_only=True)
class TextDatasetClippingCondition:
    text_tokenizer_name: str
    max_tokens_n: int


@dataclass(kw_only=True)
class TextDatasetClippingHparams:
    multiple: bool = False
    conditions: list[TextDatasetClippingCondition]


def iter_clip(
    text: str,
    primary_tokenizer: TextTokenizer,
    primary_bound: int,
    secondary_tokenizers_and_bound: Iterable[tuple[TextTokenizer, int]],
) -> Iterator[tuple[str, np.ndarray]]:
    primary_tokens = primary_tokenizer.encode(text)

    head = 0
    while head < len(primary_tokens):
        tail = min(len(primary_tokens), head + primary_bound)
        while True:
            clipped_primary_tokens = primary_tokens[head:tail]
            clipped_text = primary_tokenizer.decode(clipped_primary_tokens)

            secondary_ratios = []
            for secondary_tokenizer, secondary_bound in secondary_tokenizers_and_bound:
                clipped_secondary_tokens = secondary_tokenizer.encode(clipped_text)
                secondary_ratios.append(len(clipped_secondary_tokens) / secondary_bound)
            secondary_ratio = max(secondary_ratios, default=0)

            if secondary_ratio <= 1:
                yield clipped_text, clipped_primary_tokens
                head = tail
                break

            new_tail = head + int((tail - head) / secondary_ratio)
            if new_tail <= head or new_tail >= tail:
                raise ValueError("The bound is too narrow!")
            tail = new_tail


def make_text_dataset_clipping_transform(hparams: TextDatasetClippingHparams):
    tokenizers_and_bound = []
    for condition in hparams.conditions:
        text_tokenizer = load_preset_text_tokenizer(condition.text_tokenizer_name)
        tokenizers_and_bound.append((text_tokenizer, condition.max_tokens_n))

    if len(tokenizers_and_bound) < 1:
        raise ValueError("Expected at least one primary condition!")
    primary_tokenizer, primary_bound = tokenizers_and_bound[0]
    secondary_tokenizers_and_bound = tokenizers_and_bound[1:]

    if hparams.multiple:
        def augment_func(text: str) -> list[str]:
            iterator = iter_clip(text, primary_tokenizer, primary_bound, secondary_tokenizers_and_bound)
            return [clipped_text for clipped_text, _ in iterator]

        return lambda dataset: AugmentedDataset(dataset, augment_func)

    else:
        def map_func(text: str) -> str:
            iterator = iter_clip(text, primary_tokenizer, primary_bound, secondary_tokenizers_and_bound)
            clipped_text, _ = next(iterator)
            return clipped_text

        return lambda dataset: MappedDataset(dataset, map_func)
