from dataclasses import dataclass
from typing import Callable

import torch
from zkl_ptutils_neural import GroupQuerySelfAttention, QkvInjector, RMSNorm, SwishGluFeedForward, \
    make_causal_attention_mask

from .positional_encoding import apply_rotary_emb


@dataclass(kw_only=True)
class Llama3TransformerLayerHparams:
    queries_n: int
    groups_n: int
    qk_size: int
    v_size: int
    h_size: int
    m_size: int


class Llama3TransformerLayer(torch.nn.Module):
    def __init__(self, *,
        hparams: Llama3TransformerLayerHparams,
        dtype: torch.dtype | None = None,
        device: torch.device | None = None,
    ):
        super().__init__()
        self.self_attention_norm = RMSNorm()
        self.self_attention = GroupQuerySelfAttention(
            queries_n=hparams.queries_n,
            groups_n=hparams.groups_n,
            i_size=hparams.m_size,
            qk_size=hparams.qk_size,
            v_size=hparams.v_size,
            o_size=hparams.m_size,
            dtype=dtype,
            device=device)
        self.feed_forward_norm = RMSNorm()
        self.feed_forward = SwishGluFeedForward(
            i_size=hparams.m_size,
            h_size=hparams.h_size,
            o_size=hparams.m_size,
            bias=False,
            dtype=dtype,
            device=device)

    def forward(self, *,
        tokens_emb: torch.Tensor,
        tokens_pos_emb: torch.Tensor | None,
        tokens_mask: torch.Tensor | None = None,
        extra_tokens_kv: tuple[torch.Tensor, torch.Tensor] | None = None,
        extra_tokens_mask: torch.Tensor | None = None,
        qkv_injector_factory: Callable[[QkvInjector | None], QkvInjector | None] | None = None,
    ) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor]]:
        tokens_emb_residual = tokens_emb
        tokens_emb = self.self_attention_norm(tokens_emb)
        tokens_mask = make_causal_attention_mask_with_tokens_mask(tokens_mask) \
            if tokens_mask is not None else 'causal'
        extra_tokens_mask = extra_tokens_mask \
            if extra_tokens_mask is not None else 'full'
        qkv_injector = make_rope_qkv_injector(tokens_pos_emb) \
            if tokens_pos_emb is not None else None
        qkv_injector = qkv_injector_factory(qkv_injector) \
            if qkv_injector_factory is not None else qkv_injector
        tokens_emb, (_, *tokens_kv) = self.self_attention(
            x=tokens_emb,
            mask=tokens_mask,
            extra_kv=extra_tokens_kv,
            extra_mask=extra_tokens_mask,
            qkv_injector=qkv_injector)
        tokens_emb = tokens_emb + tokens_emb_residual

        tokens_emb_residual = tokens_emb
        tokens_emb = self.feed_forward_norm(tokens_emb)
        tokens_emb = self.feed_forward(tokens_emb)
        tokens_emb = tokens_emb + tokens_emb_residual

        return tokens_emb, tokens_kv


def make_causal_attention_mask_with_tokens_mask(
    tokens_mask: torch.Tensor, *,
    device: torch.device | str | None = None
) -> torch.Tensor:
    seq_len = tokens_mask.shape[-1]
    mask = make_causal_attention_mask(seq_len, device=device)
    # [..., seq_len (q), seq_len (k)]

    q_mask = tokens_mask.unsqueeze(-1)
    # [..., seq_len (q), 1 (k)]
    k_mask = tokens_mask.unsqueeze(-1)
    # [..., 1 (q), seq_len (k)]

    mask = mask & q_mask
    mask = mask & k_mask
    return mask


def make_rope_qkv_injector(tokens_pos_emb: torch.Tensor | None) -> QkvInjector:
    def rope_qkv_injector(q: torch.Tensor, k: torch.Tensor, v: torch.Tensor):
        q = apply_rotary_emb(q, tokens_pos_emb.unsqueeze(-2))
        k = apply_rotary_emb(k, tokens_pos_emb.unsqueeze(-2))
        return q, k, v

    return rope_qkv_injector
