import torch
from zkl_ptutils_neural import categorical_softmax_cross_entropy, categorical_softmax_soft_accuracy
from zkl_llmpt_iterator import LlmptDataBatchTorch
from zkl_llmpt_llama3 import Llama3CausalLanguageModel, Llama3CausalLanguageModelHparams
from zkl_ptutils_metrics import MeanMetric, SumMetric
from zkl_ptutils_training import ScopedTypedMetricPatch, TypedMetricPatch

Llama3TrainingModelHparams = Llama3CausalLanguageModelHparams


class Llama3TrainingModel(torch.nn.Module):
    def __init__(self, *,
        hparams: Llama3TrainingModelHparams,
        dtype: torch.dtype | None = None,
        device: torch.device | None = None,
    ):
        super().__init__()
        self.hparams = hparams
        self.model = Llama3CausalLanguageModel(hparams=hparams, dtype=dtype, device=device)

        # dummy call for initialization
        self.model(
            tokens_wid=torch.zeros((1, 4), dtype=torch.int64, device=device),
            tokens_pos=torch.zeros((1, 4), dtype=torch.int64, device=device))

    def forward(self, input: LlmptDataBatchTorch):
        tokens_in_wid = input.tokens[:, :-1]
        tokens_out_wid = input.tokens[:, 1:]
        tokens_in_pos = torch.arange(0, tokens_in_wid.shape[-1], dtype=input.head.dtype, device=input.head.device)
        tokens_in_pos = tokens_in_pos + torch.unsqueeze(input.head, dim=-1)
        tokens_out_pos = tokens_in_pos + 1
        tokens_mask = tokens_out_pos < torch.unsqueeze(input.tail, dim=-1)

        tokens_out_logits, _ = self.model(
            tokens_wid=tokens_in_wid,
            tokens_pos=tokens_in_pos,
            tokens_mask=tokens_mask)
        # [batch_size, chunk_size, embs_n]

        tokens_out_ce = categorical_softmax_cross_entropy(tokens_out_logits, tokens_out_wid, dim=-1)
        tokens_out_acc = categorical_softmax_soft_accuracy(tokens_out_logits, tokens_out_wid, dim=-1)
        # [batch_size, chunk_size]

        tokens = torch.sum(tokens_mask, dtype=torch.int64)
        weight = torch.sum(tokens_mask, dtype=torch.float64)
        ce = torch.masked.sum(tokens_out_ce.detach(), dtype=torch.float64, mask=tokens_mask)
        acc = torch.masked.sum(tokens_out_acc.detach(), dtype=torch.float64, mask=tokens_mask)
        loss = torch.masked.sum(tokens_out_ce, mask=tokens_mask) / torch.numel(tokens_mask)

        return dict(
            metrics=dict(
                tokens=ScopedTypedMetricPatch(tokens, typ=lambda: SumMetric(0), scope='run'),
                ce=TypedMetricPatch((ce, weight), typ=MeanMetric),
                acc=TypedMetricPatch((acc, weight), typ=MeanMetric)),
            loss=loss)
