from dataclasses import dataclass

import torch
from zkl_aiutils_metrics import MeanMetric
from zkl_aiutils_neural import categorical_softmax_cross_entropy, categorical_softmax_soft_accuracy
from zkl_llmpt_training import LlmptStepInput, LlmptStepOutput, LlmptTrainingModel, TypedMetricPatch

from zkl_llmpt_haodar.neural import HaodarCausalLanguageModel, HaodarCausalLanguageModelHparams
from .memory import HaodarTransformerMemory, RandomOffsetMemory


@dataclass(kw_only=True)
class HaodarTrainingModelHparams:
    model: HaodarCausalLanguageModelHparams
    memory_tokens_n: int = 0
    random_offset: bool = False


class HaodarTrainingModel(LlmptTrainingModel):
    def __init__(self,
        hparams: HaodarTrainingModelHparams,
        dtype: torch.dtype | None = None,
        device: torch.device | None = None,
    ):
        super().__init__()
        self.hparams = hparams

        self.model = HaodarCausalLanguageModel.construct(
            hparams=hparams.model,
            dtype=dtype, device=device)

        self.kv_memory = HaodarTransformerMemory(
            layers_capacity=hparams.memory_tokens_n) \
            if hparams.memory_tokens_n > 0 else None
        self.offset_memory = RandomOffsetMemory() \
            if hparams.random_offset else None

    def forward(self, input: LlmptStepInput) -> LlmptStepOutput:
        tokens_in_wid = input.tokens_in_wid
        tokens_in_pos = input.tokens_in_pos
        tokens_out_wid = input.tokens_out_wid
        tokens_out_mask = input.tokens_out_mask
        reset_mask = tokens_in_pos[..., 0] == 0  # [batch_size], bool

        if self.kv_memory is not None:
            self.kv_memory.reset(reset_mask)
            layers_extra_tokens_kv, layers_extra_tokens_mask = self.kv_memory.get()
        else:
            layers_extra_tokens_kv, layers_extra_tokens_mask = None, None

        if self.offset_memory is not None:
            self.offset_memory.reset(
                reset_mask,
                period=2 ** (self.hparams.model.pos_size // 2),
                dtype=torch.float64,
                device=tokens_in_wid.device)
            in_pos_offset = self.offset_memory.get()
            # [batch_size], float64
            tokens_in_pos = tokens_in_pos.to(torch.float64) + in_pos_offset.unsqueeze(-1)
            # [batch_size, chunk_size], float64

        tokens_out_logits, layers_tokens_kv = self.model.forward(
            tokens_in_wid=tokens_in_wid,
            tokens_in_pos=tokens_in_pos,
            layers_extra_tokens_kv=layers_extra_tokens_kv,
            layers_extra_tokens_mask=layers_extra_tokens_mask)
        # [batch_size, chunk_size, embs_n]

        if self.kv_memory is not None:
            layers_tokens_kv = tuple(
                (tokens_k.detach(), tokens_v.detach())
                for tokens_k, tokens_v in layers_tokens_kv)
            self.kv_memory.append(layers_tokens_kv)

        tokens_out_ce = categorical_softmax_cross_entropy(tokens_out_logits, tokens_out_wid, dim=-1)
        tokens_out_acc = categorical_softmax_soft_accuracy(tokens_out_logits, tokens_out_wid, dim=-1)
        # [batch_size, chunk_size]

        loss = torch.masked.sum(tokens_out_ce, mask=tokens_out_mask) / torch.numel(tokens_out_mask)
        # []

        ce = torch.masked.sum(tokens_out_ce.detach().to(torch.float64), mask=tokens_out_mask)
        acc = torch.masked.sum(tokens_out_acc.detach().to(torch.float64), mask=tokens_out_mask)
        weight = torch.sum(tokens_out_mask, dtype=torch.float64)
        # []

        metrics_patch = dict(
            ce=TypedMetricPatch((ce, weight), metric=MeanMetric),
            acc=TypedMetricPatch((acc, weight), metric=MeanMetric))

        return LlmptStepOutput(loss=loss, metrics_patch=metrics_patch)
