from collections.abc import Iterable
from dataclasses import dataclass
from typing import Callable

import torch
from zkl_ptutils_neural import QkvInjector, RMSNorm

from .positional_encoding import compute_periods_cs
from .transformer import Llama3Transformer, Llama3TransformerHparams


@dataclass(kw_only=True)
class Llama3CausalLanguageModelHparams:
    embs_n: int
    rope_period_min: float
    rope_period_max: float
    transformer: Llama3TransformerHparams


class Llama3CausalLanguageModel(torch.nn.Module):
    def __init__(self, *,
        hparams: Llama3CausalLanguageModelHparams,
        dtype: torch.dtype | None = None,
        device: torch.device | None = None,
    ):
        super().__init__()
        self.hparams = hparams

        self.embedding = torch.nn.Embedding(
            hparams.embs_n,
            hparams.transformer.m_size,
            dtype=dtype, device=device)
        self.transformer = Llama3Transformer(
            hparams=hparams.transformer,
            dtype=dtype, device=device)
        self.out_norm = RMSNorm()
        self.out_linear = torch.nn.Linear(
            hparams.transformer.m_size,
            hparams.embs_n, bias=False,
            dtype=dtype, device=device)

    def forward(self, *,
        tokens_wid: torch.Tensor,
        tokens_pos: torch.Tensor,
        tokens_mask: torch.Tensor | None = None,
        layers_extra_tokens_kv: Iterable[tuple[torch.Tensor, torch.Tensor] | None] | None = None,
        layers_extra_tokens_mask: Iterable[torch.Tensor | None] | None = None,
        qkv_injector_factory: Callable[[QkvInjector | None], QkvInjector | None] | None = None,
    ) -> tuple[torch.Tensor, tuple[tuple[torch.Tensor, torch.Tensor], ...]]:
        tokens_emb = self.embedding(tokens_wid)
        tokens_pos_emb = compute_periods_cs(
            pos=tokens_pos,
            size=self.hparams.transformer.qk_size,
            period_min=self.hparams.rope_period_min,
            period_max=self.hparams.rope_period_max,
            dtype=tokens_emb.dtype,
            device=tokens_emb.device)

        tokens_emb, layers_tokens_kv = self.transformer.forward(
            tokens_emb=tokens_emb,
            tokens_pos_emb=tokens_pos_emb,
            tokens_mask=tokens_mask,
            layers_extra_tokens_kv=layers_extra_tokens_kv,
            layers_extra_tokens_mask=layers_extra_tokens_mask,
            qkv_injector_factory=qkv_injector_factory)

        tokens_emb = self.out_norm(tokens_emb)
        tokens_logits = self.out_linear(tokens_emb)
        return tokens_logits, layers_tokens_kv
