import warnings
from dataclasses import dataclass
from typing import Optional, TypeVar

import numpy as np
import torch
from tqdm import tqdm
from zkl_aiutils_datasets import BoundedDataset, Dataset
from zkl_aiutils_training import Dock, DockPlugin, ProcessingTaskPlugin
from zkl_aiutils_training.scanning import Postpone
from zkl_llmpt_iterator import LlmptDataBatchTorch, LlmptDataset
from zkl_ptutils_metrics import Metric, SumMetric
from zkl_ptutils_training import DelegatingInferringEngine, EarlyStopByMaxThreshold, MLInferringEngine, \
    MLTrainingEngine, MLValidatingTask, MetricOrValue, MetricsProvider, MetricsRecording, StandardValidator, \
    TqdmProgressBar, ValidationProvider, ZklDatasetProducer
from zkl_ptutils_training.plugins.metrics import MetricScope
from zkl_ptutils_training.utils.attributes import get_entry_or_attribute

from scripts.datasets.loading import DatasetHparams, IteratorHparams, load_llmpt_dataset_from_hparams
from scripts.datasets.presets import load_preset_text_tokenizer
from scripts.utils.distributed import torch_distributed_is_rank0

AnyInput = TypeVar('AnyInput')
AnyOutput = TypeVar('AnyOutput')


class BpbMetric(Metric[Optional[torch.Tensor], tuple[torch.Tensor, torch.Tensor]]):
    def __init__(self, *, total_tokens_n: int, total_bytes_n: int):
        self.total_tokens_n = total_tokens_n
        self.total_bytes_n = total_bytes_n
        self.ce_sum_metric = SumMetric()
        self.tn_sum_metric = SumMetric()

    def warn_error(self, epsilon: float = 0.01):
        tn_sum = self.tn_sum_metric.compute()
        if tn_sum is None:
            return None
        tn_sum = tn_sum.cpu().item()
        ttn_sum = self.total_tokens_n
        tn_err_rate = abs(tn_sum - ttn_sum) / ttn_sum
        if tn_err_rate <= epsilon:
            return
        warnings.warn(
            f"The chunking has caused {tn_err_rate :.2%} error in counting token number. "
            "To fix it, the ce_sum is scaled by ratio total_tokens_n/processed_tokens_n. "
            "To avoid this problem, you may consider setting a larger `total_docs_n`.")

    def compute(self) -> torch.Tensor | None:
        ce_sum = self.ce_sum_metric.compute()
        tn_sum = self.tn_sum_metric.compute()
        if ce_sum is None or tn_sum is None:
            return None

        ce_sum = ce_sum.cpu().item()
        tn_sum = tn_sum.cpu().item()
        ttn_sum = self.total_tokens_n
        tbn_sum = self.total_bytes_n

        ce_sum = ce_sum / tn_sum * ttn_sum
        bpb = ce_sum / tbn_sum / np.log(2)
        return bpb

    def update(self, patch: tuple[torch.Tensor, torch.Tensor]):
        ce, tn = patch
        self.ce_sum_metric.update(ce)
        self.tn_sum_metric.update(tn)

    def reset(self):
        self.ce_sum_metric.reset()
        self.tn_sum_metric.reset()


class BpbMetricsProvider(MetricsProvider, ProcessingTaskPlugin[AnyInput, AnyOutput]):
    def __init__(self, *, total_tokens_n: int, total_bytes_n: int):
        self._metric = BpbMetric(total_tokens_n=total_tokens_n, total_bytes_n=total_bytes_n)

    def _get_scoped_metrics(self) -> dict[str, tuple[Metric, MetricScope]]:
        return {'bpb': (self._metric, 'epoch')}

    def on_after_process(self, output: AnyOutput) -> AnyOutput:
        try:
            ce_sum, tn_sum = get_entry_or_attribute(output, ['metrics', 'ce', 'value'])
        except (KeyError, AttributeError):
            raise Postpone

        self._metric.update((ce_sum, tn_sum))

        return output

    def on_after_run(self):
        self._metric.warn_error()


def load_llmpt_dataset_for_bpb_validating(*,
    dataset_hparams: DatasetHparams,
    iterator_hparams: IteratorHparams,
    stopping_tokens_n: int | None = None,
    device: torch.device | str | None = None,
) -> tuple[LlmptDataset[LlmptDataBatchTorch], int, int]:
    total_bytes_n_metric = SumMetric(torch.asarray(0, dtype=torch.int64, device=device))
    total_tokens_n_metric = SumMetric(torch.asarray(0, dtype=torch.int64, device=device))

    def text_dataset_bpb_transform(dataset: Dataset[str]) -> Dataset[str]:
        is_rank0 = torch_distributed_is_rank0()

        tokenizer = load_preset_text_tokenizer(dataset_hparams.text_tokenizer_name)

        progressbar = tqdm(
            total=stopping_tokens_n,
            unit='token', unit_scale=True,
            desc='Counting total bytes and tokens...') \
            if is_rank0 else None

        for i, text in enumerate(dataset):
            text_bytes = bytes(text, 'utf8')
            text_tokens = tokenizer.encode(text)

            text_bytes_n = torch.asarray(len(text_bytes), dtype=torch.int64, device=device)
            text_tokens_n = torch.asarray(len(text_tokens), dtype=torch.int64, device=device)
            total_bytes_n_metric.update(text_bytes_n)
            total_tokens_n_metric.update(text_tokens_n)

            total_bytes_n = total_bytes_n_metric.compute().item()
            total_tokens_n = total_tokens_n_metric.compute().item()

            if is_rank0:
                progressbar.update(
                    total_tokens_n - progressbar.n)
                progressbar.set_postfix(
                    total_bytes_n=total_bytes_n,
                    total_tokens_n=total_tokens_n)

            if stopping_tokens_n is not None and total_tokens_n >= stopping_tokens_n:
                dataset = BoundedDataset(dataset, i + 1)
                break

        return dataset

    dataset = load_llmpt_dataset_from_hparams(
        dataset_hparams=dataset_hparams,
        iterator_hparams=iterator_hparams,
        text_dataset_transform=text_dataset_bpb_transform,
        device=device)

    total_bytes_n = total_bytes_n_metric.compute().item()
    total_tokens_n = total_tokens_n_metric.compute().item()
    return dataset, total_bytes_n, total_tokens_n


@dataclass(kw_only=True)
class BpbValidationProviderHparams:
    dataset: DatasetHparams | None
    iterator: IteratorHparams | None
    stopping_tokens_n: int | None = 0


class BpbValidationProvider(ValidationProvider, DockPlugin):
    def __init__(self, *,
        hparams: BpbValidationProviderHparams,
        engine: MLInferringEngine | None = None,
        device: torch.device | str | None = None,
    ):
        self._hparams = hparams
        self._engine = engine
        self._device = device

        self._dataset, self._total_bytes_n, self._total_tokens_n = load_llmpt_dataset_for_bpb_validating(
            dataset_hparams=self._hparams.dataset,
            iterator_hparams=self._hparams.iterator,
            stopping_tokens_n=self._hparams.stopping_tokens_n,
            device=self._device)

    def validate(self) -> dict[str, MetricOrValue]:
        if self._engine is None:
            training_engine = self.dock.get_plugin(MLTrainingEngine)
            self._engine = DelegatingInferringEngine(training_engine)

        dock = Dock()

        dock.install(MLValidatingTask(
            producer=ZklDatasetProducer(self._dataset),
            processor=StandardValidator(engine=self._engine)))

        dock.install(BpbMetricsProvider(
            total_tokens_n=self._total_tokens_n,
            total_bytes_n=self._total_bytes_n))
        dock.install(MetricsRecording())

        dock.install(EarlyStopByMaxThreshold(
            metric_name='tokens',
            threshold=self._hparams.stopping_tokens_n))

        dock.install(TqdmProgressBar(
            progress_metric_name='tokens',
            desc="Validating", unit='tokens', unit_scale=True))

        task = dock.get_plugin(MLValidatingTask)
        return task.run()
