from typing import Literal

import torch.nn
from zkl_ptutils_neural import GroupQueryCrossAttention, QkvInjector


class NbpttAttention(torch.nn.Module):
    def __init__(self, *,
        queries_n: int,
        groups_n: int,
        i_size: int | None = None,
        qk_size: int,
        v_size: int,
        o_size: int,
        dtype: torch.dtype | None = None,
        device: torch.device | None = None,
    ):
        super().__init__()
        self.queries_n = queries_n
        self.groups_n = groups_n
        self.i_size = i_size
        self.qk_size = qk_size
        self.v_size = v_size
        self.o_size = o_size

        if i_size is None:
            self.k_linear = torch.nn.LazyLinear(groups_n * qk_size, bias=False, dtype=dtype, device=device)
            self.v_linear = torch.nn.LazyLinear(groups_n * v_size, bias=False, dtype=dtype, device=device)
        else:
            self.k_linear = torch.nn.Linear(i_size, groups_n * qk_size, bias=False, dtype=dtype, device=device)
            self.v_linear = torch.nn.Linear(i_size, groups_n * v_size, bias=False, dtype=dtype, device=device)

        self.attention = GroupQueryCrossAttention(
            queries_n=queries_n,
            groups_n=groups_n,
            i_size=i_size,
            qk_size=qk_size,
            v_size=v_size,
            o_size=o_size,
            dtype=dtype,
            device=device)

    def forward(self, *,
        x: torch.Tensor,
        mem_x: torch.Tensor,
        mem_mask: torch.Tensor | Literal['full'] = 'full',
        qkv_injector: QkvInjector | None = None,
        dropout: float | None = None,
    ) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
        """
        :param x:        shape = [..., seq_len, i_size]
        :param mem_x:    shape = [..., mem_len, i_size]
        :param mem_mask: shape = [..., mem_len (k)], bool
        :param qkv_injector: modify q,k,v with additional info (e.g. RoPE)
        :param dropout: float
        :return: y, (q, k, v)
            y.shape = [..., seq_len, o_size]
            q.shape = [..., seq_len, queries_n, qk_size]
            k.shape = [..., mem_len, groups_n, qk_size]
            v.shape = [..., mem_len, groups_n, v_size]
        """
        k = self.k_linear(mem_x)
        # [..., mem_len, groups_n * qk_size]
        v = self.v_linear(mem_x)
        # [..., mem_len, groups_n * v_size]

        k = torch.reshape(k, (*k.shape[:-1], self.groups_n, self.qk_size))
        # [..., mem_len, groups_n, qk_size]
        v = torch.reshape(v, (*v.shape[:-1], self.groups_n, self.v_size))
        # [..., mem_len, groups_n, v_size]

        mask = make_nbptt_memory_mask(mem_mask)
        # [..., 1 (q), mem_len (k)]

        y, qkv = self.attention.forward(
            x=x, k=k, v=v, mask=mask,
            qkv_injector=qkv_injector,
            dropout=dropout)

        return y, qkv


def make_nbptt_memory_mask(mask: torch.Tensor | Literal['full']) -> torch.Tensor | Literal['full']:
    if mask == 'full':
        return 'full'

    if mask.ndim == 0:
        mask = mask.unsqueeze(dim=-1)
        # [1 (k)], bool

    mask = mask.unsqueeze(dim=-2)
    # [..., 1 (q), mem_len (k)], bool

    return mask
