from dataclasses import dataclass
from typing import Callable, Self, TYPE_CHECKING

import torch
import torch.distributed
from zkl_training import ProcessingTask

from llmpt.preprocess import TokenizedDataset
from .iterator import PreprocessedDatasetIterator
from .model import GPTModel
from .plugin_metrics import MetricsPlugin
from .plugin_tqdm import TqdmPlugin
from .progress import ProgressRecorder
from .vocab import VocabForNLP

if TYPE_CHECKING:
    from .training import GPTTraining, Input, Output


class GPTValidating(ProcessingTask):
    @dataclass(kw_only=True)
    class Hyperparams:
        batch_samples_n: int
        context_tokens_n: int
        striding_tokens_n: int = None

        valid_tokens_n: int | None = None
        valid_repeats_n: int | None = 1

        def __post_init__(self):
            if self.striding_tokens_n is None:
                self.striding_tokens_n = self.context_tokens_n

    @classmethod
    def from_training(cls, training: 'GPTTraining') -> Self:
        return cls(
            hyperparams=GPTValidating.Hyperparams(
                batch_samples_n=training.hyperparams.batch_samples_n,
                context_tokens_n=training.hyperparams.context_tokens_n,
                striding_tokens_n=training.hyperparams.striding_tokens_n,
                valid_tokens_n=training.hyperparams.valid_tokens_n),
            dataset=training.valid_dataset,
            vocab=training.vocab,
            device=training.device,
            compile=training.compile,
            forward_logits=training.forward_logits)

    @classmethod
    def from_model(cls, *,
        model: GPTModel,
        hyperparams: Hyperparams,
        dataset: TokenizedDataset,
        compile: bool,
    ) -> Self:
        def forward_logits(in_tokens: torch.Tensor, in_positions: torch.Tensor):
            out_logits, _ = model.nn.forward(in_tokens, in_positions)
            return out_logits

        return cls(
            hyperparams=hyperparams,
            dataset=dataset,
            vocab=model.vocab,
            device=model.device,
            compile=compile,
            forward_logits=forward_logits)

    def __init__(self, *,
        hyperparams: Hyperparams,
        dataset: TokenizedDataset,
        vocab: VocabForNLP,
        device: torch.device | str | None,
        compile: bool,
        forward_logits: Callable[[torch.Tensor, torch.Tensor], torch.Tensor],
    ):
        super().__init__()

        self.hyperparams = hyperparams
        self.dataset = dataset
        self.vocab = vocab
        self.device = device
        self.compile = compile
        self.forward_logits = forward_logits

        # iterator
        self.iterator = PreprocessedDatasetIterator(
            dataset=self.dataset, vocab=self.vocab,
            limit_repeats_n=self.hyperparams.valid_repeats_n,
            context_tokens_n=self.hyperparams.context_tokens_n,
            striding_tokens_n=self.hyperparams.striding_tokens_n,
            batch_samples_n=self.hyperparams.batch_samples_n,
            device=self.device)

        # progress
        self.progress_recorder = ProgressRecorder()

        # plugins
        self.install(TqdmPlugin())
        self.install(MetricsPlugin())

    @property
    def progress_tokens_n(self) -> int:
        return self.progress_recorder.get()

    # task

    def run(self) -> dict[str, float]:
        super().run()

        metrics_value = {}
        for plugin in self.plugins:
            if isinstance(plugin, MetricsPlugin):
                metrics_value.update(plugin.collect_metrics_value())
        return metrics_value

    def _before_run(self):
        super()._before_run()

        if self.compile:
            self.forward_logits = torch.compile(self.forward_logits)
            self.forward_metrics = torch.compile(self.forward_metrics)

    def _before_step(self):
        # early stop by progress
        if self.hyperparams.valid_tokens_n is not None:
            if self.progress_tokens_n >= self.hyperparams.valid_tokens_n:
                raise StopIteration

        super()._before_step()

    def _next(self) -> 'Input':
        return next(self.iterator)

    def _process(self, input: 'Input') -> 'Output':
        tokens_in_wid, tokens_in_pos, tokens_out_wid, tokens_out_mask, progress_tokens_n = input

        # forward
        with torch.no_grad():
            tokens_ce, tokens_acc = self.forward(tokens_in_wid, tokens_in_pos, tokens_out_wid)

        # progress
        self.progress_recorder.increase(progress_tokens_n)

        return tokens_ce, tokens_acc, tokens_out_mask, progress_tokens_n

    # neural

    def forward(self,
        tokens_in_wid: torch.Tensor,
        tokens_in_pos: torch.Tensor,
        tokens_out_wid: torch.Tensor,
    ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
        tokens_out_logits = self.forward_logits(tokens_in_wid, tokens_in_pos)
        tokens_ce, tokens_acc = self.forward_metrics(tokens_out_logits, tokens_out_wid)
        return tokens_ce, tokens_acc

    @classmethod
    def forward_metrics(cls,
        tokens_out_logits: torch.Tensor,
        tokens_out_wid: torch.Tensor,
    ) -> tuple[torch.Tensor, torch.Tensor]:
        tokens_out_probs = torch.softmax(tokens_out_logits, dim=-1)
        # [batch_size, context_size, vocab_size]

        tokens_ce = torch.nn.functional.cross_entropy(
            torch.swapaxes(tokens_out_logits, -1, -2),
            tokens_out_wid, reduction='none')
        # [batch_size, context_size]

        tokens_acc = torch.gather(tokens_out_probs, dim=-1, index=tokens_out_wid.unsqueeze(-1)).squeeze(-1)
        # [batch_size, context_size]

        tokens_ce = tokens_ce.detach().to(torch.float32)
        tokens_acc = tokens_acc.detach().to(torch.float32)
        return tokens_ce, tokens_acc
