from dataclasses import dataclass

import torch
from zkl_llmpt_llama3 import compute_periods_cs
from zkl_llmpt_synergy import SynergyTransformerOutput
from zkl_ptutils_neural import RMSNorm

from .synergy import Llama3SynergyTransformerHparams, construct_llama3_synergy_transformer


@dataclass(kw_only=True)
class Llama3SynergyCausalLanguageModelHparams:
    embs_n: int
    rope_period_min: float
    rope_period_max: float
    transformer: Llama3SynergyTransformerHparams


@dataclass(kw_only=True)
class Llama3SynergyCausalLanguageModelOutput:
    tokens_logits: torch.Tensor
    transformer: SynergyTransformerOutput


class Llama3SynergyCausalLanguageModel(torch.nn.Module):
    def __init__(self, *,
        hparams: Llama3SynergyCausalLanguageModelHparams,
        dtype: torch.dtype | None = None,
        device: torch.device | None = None,
    ):
        super().__init__()
        self.hparams = hparams

        self.embedding = torch.nn.Embedding(
            hparams.embs_n,
            hparams.transformer.encoder.m_size,
            dtype=dtype, device=device)
        self.transformer = construct_llama3_synergy_transformer(
            hparams=hparams.transformer,
            rope_period_min=hparams.rope_period_min,
            rope_period_max=hparams.rope_period_max,
            dtype=dtype, device=device)
        self.out_norm = RMSNorm()
        self.out_linear = torch.nn.Linear(
            hparams.transformer.decoder.m_size,
            hparams.embs_n, bias=False,
            dtype=dtype, device=device)

    def forward(self, *,
        tokens_wid: torch.Tensor,
        tokens_pos: torch.Tensor,
        **kwargs
    ) -> Llama3SynergyCausalLanguageModelOutput:
        tokens_emb = self.embedding(tokens_wid)

        assert (self.hparams.transformer.encoder.qk_size ==
                self.hparams.transformer.middle.qk_size ==
                self.hparams.transformer.decoder.qk_size)
        tokens_pos_emb = compute_periods_cs(
            pos=tokens_pos,
            size=self.hparams.transformer.encoder.qk_size,
            period_min=self.hparams.rope_period_min,
            period_max=self.hparams.rope_period_max,
            dtype=tokens_emb.dtype,
            device=tokens_emb.device)

        transformer_output = self.transformer(
            tokens_emb=tokens_emb,
            tokens_pos_emb=tokens_pos_emb, **kwargs)
        tokens_emb = transformer_output.tokens_emb

        tokens_emb = self.out_norm(tokens_emb)
        tokens_logits = self.out_linear(tokens_emb)
        return Llama3SynergyCausalLanguageModelOutput(
            tokens_logits=tokens_logits,
            transformer=transformer_output)
