from dataclasses import dataclass
from typing import Literal, Self

import torch
from zkl_aiutils_neural import GroupQuerySelfAttention, SwishGluFeedforward, aligning_add, make_causal_attention_mask, \
    rms_norm


@dataclass(kw_only=True)
class HaodarTransformerLayerHparams:
    queries_n: int
    groups_n: int
    qk_size: int
    v_size: int
    m_size: int
    h_size: int
    o_size: int | None = None


class HaodarTransformerLayer(torch.nn.Module):
    @classmethod
    def construct(cls, *,
        hparams: HaodarTransformerLayerHparams,
        dtype: torch.dtype | None = None,
        device: torch.device | None = None
    ) -> Self:
        if hparams.o_size is None:
            hparams.o_size = hparams.m_size
        return cls(
            attention=GroupQuerySelfAttention(
                queries_n=hparams.queries_n,
                groups_n=hparams.groups_n,
                qk_size=hparams.qk_size,
                v_size=hparams.v_size,
                o_size=hparams.m_size,
                dtype=dtype, device=device),
            feedforward=SwishGluFeedforward(
                i_size=hparams.m_size,
                h_size=hparams.h_size,
                o_size=hparams.o_size,
                dtype=dtype, device=device))

    def __init__(self, *,
        attention: GroupQuerySelfAttention,
        feedforward: SwishGluFeedforward,
    ):
        super().__init__()
        self.attention = attention
        self.feedforward = feedforward

    def forward(self, *,
        tokens_in_emb: torch.Tensor,
        tokens_in_pos_emb: torch.Tensor | None = None,
        tokens_in_mask: torch.Tensor | None = None,
        tokens_out_mask: torch.Tensor | None = None,
        attention_mask: torch.Tensor | None = None,
        extra_tokens_kv: tuple[torch.Tensor, torch.Tensor] | None,
        extra_tokens_mask: torch.Tensor | None = None,
        extra_attention_mask: torch.Tensor | None = None,
        at_dropout: torch.Tensor | float = 0.0,
        ff_dropout: torch.Tensor | float = 0.0,
    ) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor]]:
        """
        :param tokens_in_emb: shape=[..., chunk_tokens_n, i_size]
        :param tokens_in_pos_emb: shape=[..., chunk_tokens_n, pos_size]
        :param tokens_in_mask: shape=[..., chunk_tokens_n (k)], dtype=bool
        :param tokens_out_mask: shape=[..., chunk_tokens_n (q)], dtype=bool
        :param attention_mask: shape=[..., chunk_tokens_n (q), chunk_tokens_n (k)], dtype=bool
        :param extra_tokens_kv: (extra_tokens_k, extra_tokens_v)
            extra_tokens_k: shape=[..., extra_tokens_n (k), groups_n, qk_size]
            extra_tokens_v: shape=[..., extra_tokens_n (k), groups_n, v_size]
        :param extra_tokens_mask: shape=[..., extra_tokens_n (k)], dtype=bool
        :param extra_attention_mask: shape=[..., chunk_tokens_n (q), extra_tokens_n (k)], dtype=bool
        :param at_dropout: float
        :param ff_dropout: float
        :return: (tokens_out_emb, (tokens_k, tokens_v))
            tokens_out_emb: shape=[..., chunk_tokens_n (q), o_size]
            tokens_k: shape=[..., chunk_tokens_n (q), groups_n, qk_size]
            tokens_v: shape=[..., chunk_tokens_n (q), groups_n, v_size]
        """
        tokens_emb = tokens_in_emb

        # attention
        tokens_emb_residual = tokens_emb
        tokens_emb = rms_norm(tokens_emb)

        tokens_emb = torch.cat([tokens_emb, tokens_in_pos_emb], dim=-1) \
            if tokens_in_pos_emb is not None else tokens_emb

        mask = _make_causal_self_attention_mask(
            attention_mask=attention_mask,
            tokens_in_mask=tokens_in_mask,
            tokens_out_mask=tokens_out_mask,
            chunk_tokens_n=tokens_emb.shape[-2],
            device=tokens_emb.device)

        extra_mask = _make_extra_attention_mask(
            extra_attention_mask=extra_attention_mask,
            extra_tokens_mask=extra_tokens_mask,
            chunk_tokens_n=tokens_emb.shape[-2],
            extra_tokens_n=extra_tokens_kv[0].shape[-3],
            device=tokens_emb.device) \
            if extra_tokens_kv is not None else 'full'

        tokens_emb, (_, *tokens_kv) = self.attention.forward(
            tokens_emb, mask=mask,
            extra_kv=extra_tokens_kv,
            extra_mask=extra_mask,
            dropout=at_dropout)

        tokens_emb = aligning_add(tokens_emb, tokens_emb_residual, dim=-1)

        # feedforward
        tokens_emb_residual = tokens_emb
        tokens_emb = rms_norm(tokens_emb)
        tokens_emb = self.feedforward.forward(tokens_emb, dropout=ff_dropout)
        tokens_emb = aligning_add(tokens_emb, tokens_emb_residual, dim=-1)

        tokens_out_emb = tokens_emb
        return tokens_out_emb, tokens_kv


def _make_causal_self_attention_mask(*,
    attention_mask: torch.Tensor | None = None,
    tokens_in_mask: torch.Tensor | None = None,
    tokens_out_mask: torch.Tensor | None = None,
    chunk_tokens_n: int,
    device: torch.device | str | None,
) -> torch.Tensor | Literal['causal']:
    """
    :param tokens_in_mask: shape=[..., chunk_tokens_n (k)], dtype=bool
    :param tokens_out_mask: shape=[..., chunk_tokens_n (q)], dtype=bool
    :param attention_mask: shape=[..., chunk_tokens_n (q), chunk_tokens_n (k)], dtype=bool
    :return: attention_mask: shape=[..., chunk_tokens_n (q), chunk_tokens_n (k)], dtype=bool
    """
    if attention_mask is None and tokens_in_mask is None and tokens_out_mask is None:
        return 'causal'

    mask = make_causal_attention_mask(chunk_tokens_n, device=device)
    if attention_mask is not None:
        mask = torch.logical_and(mask, attention_mask)
    if tokens_in_mask is not None:
        mask = torch.logical_and(mask, tokens_in_mask.unsqueeze(-2))
    if tokens_out_mask is not None:
        mask = torch.logical_and(mask, tokens_out_mask.unsqueeze(-1))
    return mask


def _make_extra_attention_mask(*,
    extra_attention_mask: torch.Tensor | None,
    extra_tokens_mask: torch.Tensor | None,
    chunk_tokens_n: int,
    extra_tokens_n: int,
    device: torch.device | str | None,
) -> torch.Tensor | Literal['full']:
    """
    :param extra_tokens_mask: shape=[..., extra_tokens_n (k)], dtype=bool
    :param extra_attention_mask: shape=[..., chunk_tokens_n (q), extra_tokens_n (k)], dtype=bool
    :return: extra_attention_mask: shape=[..., chunk_tokens_n (q), chunk_tokens_n (k)], dtype=bool
    """
    if extra_attention_mask is None and extra_tokens_mask is None:
        return 'full'

    mask = torch.ones(chunk_tokens_n, extra_tokens_n, device=device)
    if extra_attention_mask is not None:
        mask = torch.logical_and(mask, extra_attention_mask)
    if extra_tokens_mask is not None:
        mask = torch.logical_and(mask, extra_tokens_mask.unsqueeze(-2))
    return mask
