from typing import Literal

import torch

from .group_query_self_attention import QkvInjector


class GroupQueryCrossAttention(torch.nn.Module):
    def __init__(self, *,
        queries_n: int,
        groups_n: int,
        i_size: int | None = None,
        qk_size: int,
        v_size: int,
        o_size: int,
        dtype: torch.dtype | None = None,
        device: torch.device | None = None,
    ):
        super().__init__()
        self.queries_n = queries_n
        self.groups_n = groups_n
        self.i_size = i_size
        self.qk_size = qk_size
        self.v_size = v_size
        self.o_size = o_size

        if i_size is None:
            self.q_linear = torch.nn.LazyLinear(queries_n * qk_size, bias=False, dtype=dtype, device=device)
        else:
            self.q_linear = torch.nn.Linear(i_size, queries_n * qk_size, bias=False, dtype=dtype, device=device)
        self.r_linear = torch.nn.Linear(queries_n * v_size, o_size, bias=False, dtype=dtype, device=device)

    def forward(self,
        x: torch.Tensor,
        k: torch.Tensor,
        v: torch.Tensor, *,
        mask: torch.Tensor | Literal['full'] = 'full',
        qkv_injector: QkvInjector | None = None,
        dropout: float | None = None,
    ) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
        """
        :param x: shape=[..., seq_len, i_size]
        :param k: shape=[..., ref_len, groups_n, qk_size]
        :param v: shape=[..., ref_len, groups_n, v_size]
        :param mask: shape=[..., seq_len (q), ref_len (k)]
        :param qkv_injector: modify q,k,v with additional info (e.g. RoPE)
        :param dropout: float
        :return: y, (q, k, v)
            y.shape=[..., seq_len, o_size]
            q.shape=[..., seq_len, queries_n, qk_size]
            k.shape=[..., seq_len, groups_n, qk_size]
            v.shape=[..., seq_len, groups_n, v_size]
        """
        q = self.q_linear(x)
        # [..., seq_len, queries_n * qk_size]

        q = torch.reshape(q, (*q.shape[:-1], self.queries_n, self.qk_size))
        # [..., seq_len, queries_n, qk_size]

        if qkv_injector is not None:
            q, k, v = qkv_injector(q, k, v)

        qkv = (q, k, v)

        q = torch.swapaxes(q, axis0=-2, axis1=-3)
        # [..., queries_n, seq_len, qk_size]
        k = torch.swapaxes(k, axis0=-2, axis1=-3)
        # [..., groups_n, ref_len, qk_size]
        v = torch.swapaxes(v, axis0=-2, axis1=-3)
        # [..., groups_n, ref_len, v_size]

        q = torch.reshape(q, (*q.shape[:-3], self.groups_n, -1, *q.shape[-2:]))
        # [..., groups_n, group_queries_n, seq_len, qk_size]
        k = torch.reshape(k, (*k.shape[:-3], self.groups_n, 1, *k.shape[-2:]))
        # [..., groups_n, 1, ref_len, qk_size]
        v = torch.reshape(v, (*v.shape[:-3], self.groups_n, 1, *v.shape[-2:]))
        # [..., groups_n, 1, ref_len, v_size]

        mask = make_cross_attention_mask(mask)
        # mask.shape=[(..., seq_len), ref_len]

        if mask is not None and mask.ndim > 2:
            # [..., seq_len(q), ref_len(k)]
            mask = torch.unsqueeze(mask, dim=-3)
            mask = torch.unsqueeze(mask, dim=-3)
            # [..., 1 (groups_n), 1 (group_queries_n), seq_len (q), ref_len (k)]

        dropout = 0.0 if dropout is None else dropout
        r = torch.nn.functional.scaled_dot_product_attention(q, k, v, mask, dropout)
        # [..., groups_n, group_queries_n, seq_len, v_size]

        r = torch.reshape(r, (*r.shape[:-4], -1, *r.shape[-2:]))
        # [..., queries_n, seq_len, v_size]

        r = torch.swapaxes(r, axis0=-2, axis1=-3)
        # [..., seq_len, queries_n, v_size]

        r = r.reshape((*r.shape[:-2], self.queries_n * self.v_size))
        # [..., seq_len, queries_n * v_size]

        y = self.r_linear(r)
        # [..., seq_len, o_size]

        return y, qkv


def make_cross_attention_mask(mask: torch.Tensor | Literal['full'] = 'full') -> torch.Tensor | None:
    if mask == 'full':
        return None
    if isinstance(mask, torch.Tensor):
        return mask
    raise ValueError(f"Unexpected {mask=}")
