from dataclasses import dataclass
from functools import cached_property
from typing import Callable, Self, TYPE_CHECKING

import torch
import torch.distributed
from zkl_llmpt_datasets import TokenizedDataset
from zkl_llmpt_iterator import GPTTrainingIterator, VocabForNLP
from zkl_metrics import Metric
from zkl_training import ProcessingTask

from llmpt.neural import GPT
from .model import GPTModel, GPTModelHparams
from .plugin_metrics import MetricsPlugin
from .plugin_tqdm import TqdmPlugin
from .progress import ProgressRecorder

if TYPE_CHECKING:
    from .training import GPTTrainingOutput, GPTTrainingInput, GPTTraining


@dataclass(kw_only=True)
class GPTValidatingHparams:
    batch_samples_n: int
    chunk_tokens_n: int

    valid_tokens_n: int | None = None
    valid_repeats_n: int | None = 1

    ff_dropout: float | None = None
    at_dropout: float | None = None


class GPTValidating(ProcessingTask['GPTTrainingInput', 'GPTTrainingOutput']):
    @classmethod
    def create_from_model(cls,
        model: GPTModel, *,
        valid_hparams: GPTValidatingHparams,
        dataset: TokenizedDataset,
        compile: bool = False,
    ) -> Self:
        return cls(
            valid_hparams=valid_hparams,
            model_hparams=model.hparams,
            iterator=GPTTrainingIterator.create(
                dataset_factory=lambda: dataset,
                vocab_factory=lambda: model.vocab,
                batch_samples_n=valid_hparams.batch_samples_n,
                chunk_tokens_n=valid_hparams.chunk_tokens_n,
                device=model.device),
            gpt=model.nn,
            device=model.device,
            compile=compile)

    @classmethod
    def create_from_training(cls,
        training: 'GPTTraining', *,
        dataset_factory: Callable[[], TokenizedDataset],
        vocab_factory: Callable[[], VocabForNLP] | None = None,
    ) -> Self:
        return GPTValidating(
            model_hparams=training.model_hparams,
            valid_hparams=GPTValidatingHparams(
                batch_samples_n=training.training_hparams.batch_samples_n,
                chunk_tokens_n=training.training_hparams.chunk_tokens_n,
                valid_tokens_n=training.training_hparams.valid_tokens_n,
                at_dropout=training.training_hparams.at_dropout,
                ff_dropout=training.training_hparams.ff_dropout),
            iterator=GPTTrainingIterator.create(
                dataset_factory=dataset_factory,
                vocab_factory=vocab_factory,
                batch_samples_n=training.training_hparams.batch_samples_n,
                chunk_tokens_n=training.training_hparams.chunk_tokens_n,
                device=training.device),
            gpt=training.gpt,
            device=training.device,
            compile=training.compile)

    def __init__(self, *,
        # hparams
        valid_hparams: GPTValidatingHparams,
        model_hparams: GPTModelHparams,

        # iterator
        iterator: GPTTrainingIterator,

        # model
        gpt: GPT,

        # device
        device: torch.device | str | None,
        compile: bool,
    ):
        super().__init__()

        # hparams
        self.model_hparams = model_hparams
        self.valid_hparams = valid_hparams

        # iterator
        self.iterator = iterator

        # model
        self.gpt = gpt

        # progress
        self.progress_recorder = ProgressRecorder()

        # device
        self.device = device

        # compile
        self.compile = compile

        # plugins
        self.install(TqdmPlugin())
        self.install(MetricsPlugin())

    @property
    def progress_tokens_n(self) -> int:
        return self.progress_recorder.get()

    # task

    def run(self) -> dict[str, Metric]:
        super().run()

        metrics = {}
        for plugin in self.plugins:
            if isinstance(plugin, MetricsPlugin):
                metrics.update(plugin.metrics)
        return metrics

    def _before_step(self):
        # early stop by progress
        if self.valid_hparams.valid_tokens_n is not None:
            if self.progress_tokens_n >= self.valid_hparams.valid_tokens_n:
                raise StopIteration

        super()._before_step()

    def _next(self) -> 'GPTTrainingInput':
        from .training import process_data_batch
        return process_data_batch(next(self.iterator))

    def _process(self, input: 'GPTTrainingInput') -> 'GPTTrainingOutput':
        with torch.no_grad():
            return self.forward(input)

    def _after_process(self, output: 'GPTTrainingOutput'):
        self.progress_recorder.increase(output.tokens_out_mask)
        super()._after_process(output)

    # neural

    @cached_property
    def forward_kernel(self):
        from .training import forward_kernel
        return torch.compile(forward_kernel) if self.compile else forward_kernel

    def forward(self, input: 'GPTTrainingInput') -> 'GPTTrainingOutput':
        return self.forward_kernel(self.gpt, input,
            position_period=2 ** (self.model_hparams.pos_size // 2),
            at_dropout=self.valid_hparams.at_dropout,
            ff_dropout=self.valid_hparams.ff_dropout)
