from typing import Callable, Literal

import torch

QkvInjector = Callable[[torch.Tensor, torch.Tensor, torch.Tensor], tuple[torch.Tensor, torch.Tensor, torch.Tensor]]


class GroupQuerySelfAttention(torch.nn.Module):
    def __init__(self, *,
        queries_n: int,
        groups_n: int,
        i_size: int | None = None,
        qk_size: int,
        v_size: int,
        o_size: int,
        dtype: torch.dtype | None = None,
        device: torch.device | None = None,
    ):
        super().__init__()
        self.queries_n = queries_n
        self.groups_n = groups_n
        self.i_size = i_size
        self.qk_size = qk_size
        self.v_size = v_size
        self.o_size = o_size

        if i_size is None:
            self.q_linear = torch.nn.LazyLinear(queries_n * qk_size, bias=False, dtype=dtype, device=device)
            self.k_linear = torch.nn.LazyLinear(groups_n * qk_size, bias=False, dtype=dtype, device=device)
            self.v_linear = torch.nn.LazyLinear(groups_n * v_size, bias=False, dtype=dtype, device=device)
        else:
            self.q_linear = torch.nn.Linear(i_size, queries_n * qk_size, bias=False, dtype=dtype, device=device)
            self.k_linear = torch.nn.Linear(i_size, groups_n * qk_size, bias=False, dtype=dtype, device=device)
            self.v_linear = torch.nn.Linear(i_size, groups_n * v_size, bias=False, dtype=dtype, device=device)
        self.r_linear = torch.nn.Linear(queries_n * v_size, o_size, bias=False, dtype=dtype, device=device)

    def forward(self,
        x: torch.Tensor, *,
        mask: torch.Tensor | Literal['full', 'causal'] = 'full',
        extra_kv: tuple[torch.Tensor, torch.Tensor] | None = None,
        extra_mask: torch.Tensor | Literal['full'] = 'full',
        qkv_injector: QkvInjector | None = None,
        dropout: float | None = None,
    ) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
        """
        :param x: shape=[..., seq_len, i_size]
        :param mask: shape=[..., seq_len (q), seq_len (k)], bool
        :param extra_kv: (extra_k, extra_v):
            extra_k.shape=[..., ext_len, groups_n, qk_size]
            extra_v.shape=[..., ext_len, groups_n, v_size]
        :param extra_mask: shape=[..., seq_len (q), ext_len (k)], bool
        :param qkv_injector: modify q,k,v with additional info (e.g. RoPE)
        :param dropout: float
        :return: y, (q, k, v)
            y.shape=[..., seq_len, o_size]
            q.shape=[..., seq_len, queries_n, qk_size]
            k.shape=[..., seq_len, groups_n, qk_size]
            v.shape=[..., seq_len, groups_n, v_size]
        """
        q = self.q_linear(x)
        # [..., seq_len, queries_n * qk_size]
        k = self.k_linear(x)
        # [..., seq_len, groups_n * qk_size]
        v = self.v_linear(x)
        # [..., seq_len, groups_n * v_size]

        q = torch.reshape(q, (*q.shape[:-1], self.queries_n, self.qk_size))
        # [..., seq_len, queries_n, qk_size]
        k = torch.reshape(k, (*k.shape[:-1], self.groups_n, self.qk_size))
        # [..., seq_len, groups_n, qk_size]
        v = torch.reshape(v, (*v.shape[:-1], self.groups_n, self.v_size))
        # [..., seq_len, groups_n, v_size]

        if qkv_injector is not None:
            q, k, v = qkv_injector(q, k, v)

        qkv = (q, k, v)

        extra_len = None
        if extra_kv is not None:
            extra_k, extra_v = extra_kv
            # extra_k.shape=[..., ext_len, groups_n, qk_size]
            # extra_v.shape=[..., ext_len, groups_n, v_size]
            k = torch.cat((extra_k, k), dim=-3)
            # [..., ext_len + seq_len, groups_n, qk_size]
            v = torch.cat((extra_v, v), dim=-3)
            # [..., ext_len + seq_len, groups_n, v_size]
            extra_len = extra_k.shape[-3]

        q = torch.swapaxes(q, axis0=-2, axis1=-3)
        # [..., queries_n, seq_len, qk_size]
        k = torch.swapaxes(k, axis0=-2, axis1=-3)
        # [..., groups_n, (ext_len +) seq_len, qk_size]
        v = torch.swapaxes(v, axis0=-2, axis1=-3)
        # [..., groups_n, (ext_len +) seq_len, v_size]

        q = torch.reshape(q, (*q.shape[:-3], self.groups_n, -1, *q.shape[-2:]))
        # [..., groups_n, group_queries_n, seq_len, qk_size]
        k = torch.reshape(k, (*k.shape[:-3], self.groups_n, 1, *k.shape[-2:]))
        # [..., groups_n, 1, (ext_len +) seq_len, qk_size]
        v = torch.reshape(v, (*v.shape[:-3], self.groups_n, 1, *v.shape[-2:]))
        # [..., groups_n, 1, (ext_len +) seq_len, v_size]

        mask, causal = make_causal_attention_mask_with_extra(
            seq_len=x.shape[-2], seq_mask=mask,
            ext_len=extra_len, ext_mask=extra_mask,
            device=x.device)
        # [(..., seq_len), (ext_len +) seq_len]

        if mask is not None and mask.ndim > 2:
            # [..., seq_len, (ext_len +) seq_len]
            mask = torch.unsqueeze(mask, dim=-3)
            mask = torch.unsqueeze(mask, dim=-3)
            # [..., 1 (groups_n), 1 (group_queries_n), seq_len, (ext_len +) seq_len]

        dropout = 0.0 if dropout is None else dropout
        r = torch.nn.functional.scaled_dot_product_attention(q, k, v, mask, dropout, causal)
        # [..., groups_n, group_queries_n, seq_len, v_size]

        r = torch.reshape(r, (*r.shape[:-4], -1, *r.shape[-2:]))
        # [..., queries_n, seq_len, v_size]

        r = torch.swapaxes(r, axis0=-2, axis1=-3)
        # [..., seq_len, queries_n, v_size]

        r = r.reshape((*r.shape[:-2], self.queries_n * self.v_size))
        # [..., seq_len, queries_n * v_size]

        y = self.r_linear(r)
        # [..., seq_len, o_size]

        return y, qkv


def make_causal_attention_mask(
    seq_len: int, *,
    local_len: int | None = None,
    device: torch.device | str | None = None
) -> torch.Tensor:
    """ make causal mask for self-attention

    :return: mask
        shape=[seq_len (query), seq_len (key)]
    """
    mask = torch.ones([seq_len, seq_len], dtype=torch.bool, device=device)
    mask = torch.tril(mask, diagonal=0)
    if local_len is not None:
        mask = torch.logical_and(mask, torch.logical_not(torch.tril(mask, diagonal=-local_len)))
    return mask


def make_causal_attention_mask_with_extra(*,
    seq_len: int,
    seq_mask: torch.Tensor | Literal['full', 'causal'] = 'full',
    ext_len: int | None = None,
    ext_mask: torch.Tensor | Literal['full'] = 'full',
    device: torch.device | None = None,
) -> tuple[torch.Tensor | None, bool]:
    """
    :param seq_len: int
    :param seq_mask: shape=[(..., seq_len), seq_len]
    :param ext_len: int | None
    :param ext_mask: shape=[(..., seq_len), ext_len]
    :param device: torch.device | None
    :return: mask.shape=[(..., seq_len), (ext_len +) seq_len]
    """
    if ext_len is None:
        if seq_mask == 'full':
            mask = None
            causal = False
        elif seq_mask == 'causal':
            mask = None
            causal = True
        elif isinstance(seq_mask, torch.Tensor):
            mask = seq_mask
            causal = False
        else:
            raise ValueError(f'Unexpected seq_mask: {seq_mask}')
    else:
        causal = False
        if seq_mask == 'full':
            if ext_mask == 'full':
                mask = None
            elif isinstance(ext_mask, torch.Tensor):
                # [(..., seq_len), ext_len]

                seq_mask = torch.ones([*ext_mask.shape[:-1], seq_len], dtype=torch.bool, device=device)
                # [(..., seq_len), seq_len]

                mask = torch.cat([ext_mask, seq_mask], dim=-1)
                # [(..., seq_len), ext_len + seq_len]
            else:
                raise ValueError(f'Unexpected ext_mask: {ext_mask}')
        elif seq_mask == 'causal':
            seq_mask = make_causal_attention_mask(seq_len, device=device)
            # [seq_len, seq_len]

            if ext_mask == 'full':
                ext_mask = torch.ones([*seq_mask.shape[:-1], ext_len], dtype=torch.bool, device=device)
                # [seq_len, ext_len]

                mask = torch.cat([ext_mask, seq_mask], dim=-1)
                # [seq_len, ext_len + seq_len]
            elif isinstance(ext_mask, torch.Tensor):
                # [(..., seq_len), ext_len]

                batch_shape = torch.broadcast_shapes(seq_mask.shape[:-1], ext_mask.shape[:-1])
                # [(...), seq_len]

                seq_mask = torch.broadcast_to(seq_mask, [*batch_shape, seq_mask.shape[-1]])
                # [(..., seq_len), seq_len]

                ext_mask = torch.broadcast_to(ext_mask, [*batch_shape, ext_mask.shape[-1]])
                # [(..., seq_len), ext_len]

                mask = torch.cat([ext_mask, seq_mask], dim=-1)
                # [(..., seq_len), ext_len + seq_len]
            else:
                raise ValueError(f'Unexpected ext_mask: {ext_mask}')
        elif isinstance(seq_mask, torch.Tensor):
            # [(..., seq_len), seq_len]

            if ext_mask == 'full':
                ext_mask = torch.ones([*seq_mask.shape[:-1], ext_len], dtype=torch.bool, device=device)
                # [(..., seq_len), ext_len]

                mask = torch.cat([ext_mask, seq_mask], dim=-1)
                # [(..., seq_len), ext_len + seq_len]
            elif isinstance(ext_mask, torch.Tensor):
                # [(..., seq_len), ext_len]

                batch_shape = torch.broadcast_shapes(seq_mask.shape[:-1], ext_mask.shape[:-1])
                # [(...), seq_len]

                seq_mask = torch.broadcast_to(seq_mask, [*batch_shape, seq_mask.shape[-1]])
                # [(..., seq_len), seq_len]

                ext_mask = torch.broadcast_to(ext_mask, [*batch_shape, ext_mask.shape[-1]])
                # [(..., seq_len), ext_len]

                mask = torch.cat([ext_mask, seq_mask], dim=-1)
                # [(..., seq_len), ext_len + seq_len]
            else:
                raise ValueError(f'Unexpected ext_mask: {ext_mask}')
        else:
            raise ValueError(f'Unexpected seq_mask: {seq_mask}')
    return mask, causal
