import torch
from zkl_aiutils_metrics import MeanMetric
from zkl_ptutils_neural import categorical_softmax_cross_entropy, categorical_softmax_soft_accuracy
from zkl_llmpt_training import LlmptStepInput, LlmptStepOutput, LlmptTrainingModel, TypedMetricPatch

from zkl_llmpt_llama3 import Llama3CausalLanguageModel, Llama3CausalLanguageModelHparams

Llama3TrainingModelHparams = Llama3CausalLanguageModelHparams


class Llama3TrainingModel(LlmptTrainingModel):
    def __init__(self, *,
        hparams: Llama3TrainingModelHparams,
        dtype: torch.dtype | None = None,
        device: torch.device | None = None,
    ):
        super().__init__()
        self.model = Llama3CausalLanguageModel(hparams=hparams, dtype=dtype, device=device)

    def forward(self, input: LlmptStepInput) -> LlmptStepOutput:
        tokens_in_wid = input.tokens_in_wid
        tokens_in_pos = input.tokens_in_pos
        tokens_out_wid = input.tokens_out_wid
        tokens_out_mask = input.tokens_out_mask

        tokens_out_logits, _ = self.model.forward(
            tokens_wid=tokens_in_wid,
            tokens_pos=tokens_in_pos)
        # [batch_size, chunk_size, embs_n]

        tokens_out_ce = categorical_softmax_cross_entropy(tokens_out_logits, tokens_out_wid, dim=-1)
        tokens_out_acc = categorical_softmax_soft_accuracy(tokens_out_logits, tokens_out_wid, dim=-1)
        # [batch_size, chunk_size]

        loss = torch.masked.sum(tokens_out_ce, mask=tokens_out_mask) / torch.numel(tokens_out_mask)
        # []

        ce = torch.masked.sum(tokens_out_ce.detach().to(torch.float64), mask=tokens_out_mask)
        acc = torch.masked.sum(tokens_out_acc.detach().to(torch.float64), mask=tokens_out_mask)
        weight = torch.sum(tokens_out_mask, dtype=torch.float64)
        # []

        metrics_patch = dict(
            ce=TypedMetricPatch((ce, weight), metric=MeanMetric),
            acc=TypedMetricPatch((acc, weight), metric=MeanMetric))

        return LlmptStepOutput(loss=loss, metrics_patch=metrics_patch)
