from typing import Iterable, Literal

import torch
from zkl_neural import GroupQuerySelfAttention, RMSNorm, SwishGluFeedforward, \
    geometric_periods_sinusoidal_positional_encoding, pad_or_crop_add, rms_norm

from .utils import make_causal_attention_mask

KV = tuple[torch.Tensor, torch.Tensor]


class GPTTransformerLayer(torch.nn.Module):
    def __init__(self, *,
        queries_n: int,
        groups_n: int,
        qk_size: int,
        v_size: int,
        m_size: int,
        h_size: int,
        o_size: int | None = None,
        dtype: torch.dtype | None = None,
        device: torch.device | None = None,
    ):
        super().__init__()
        o_size = m_size if o_size is None else o_size

        self.queries_n = queries_n
        self.groups_n = groups_n
        self.qk_size = qk_size
        self.v_size = v_size
        self.m_size = m_size
        self.h_size = h_size
        self.o_size = o_size

        self.attention = GroupQuerySelfAttention(
            queries_n=queries_n,
            groups_n=groups_n,
            qk_size=qk_size,
            v_size=v_size,
            o_size=m_size,
            dtype=dtype,
            device=device)
        self.feedforward = SwishGluFeedforward(
            i_size=m_size,
            h_size=h_size,
            o_size=o_size,
            dtype=dtype,
            device=device)

    def forward(self,
        tokens_emb: torch.Tensor,
        tokens_pos_emb: torch.Tensor | None = None, *,
        tokens_mask: torch.Tensor | None = None,
        extra_tokens_kv: KV | None = None,
        extra_tokens_mask: torch.Tensor | Literal['full'] = 'full',
        at_dropout: torch.Tensor | float = 0.0,
        ff_dropout: torch.Tensor | float = 0.0,
    ) -> tuple[torch.Tensor, KV]:
        """
        :param tokens_emb: shape=[..., seq_len, i_size]
        :param tokens_pos_emb: shape=[..., seq_len, pos_size]
        :param tokens_mask: shape=[(..., seq_len), seq_len], bool
        :param extra_tokens_kv: (extra_tokens_k, extra_tokens_v)
            extra_tokens_k.shape=[..., ext_len, groups_n, qk_size]
            extra_tokens_v.shape=[..., ext_len, groups_n, v_size]
        :param extra_tokens_mask: shape = [(..., seq_len), ext_len]
        :param at_dropout: float
        :param ff_dropout: float
        :return: (tokens_emb, (tokens_k, tokens_v))
            tokens_emb.shape=[..., seq_len, o_size]
            tokens_k.shape=[..., seq_len, groups_n, qk_size]
            tokens_v.shape=[..., seq_len, groups_n, v_size]
        """

        # attention
        tokens_emb_residual = tokens_emb
        tokens_emb = rms_norm(tokens_emb)
        x = torch.cat([tokens_emb, tokens_pos_emb], dim=-1) \
            if tokens_pos_emb is not None else tokens_emb

        if tokens_mask is not None:
            mask = make_causal_attention_mask(tokens_emb.shape[-2], device=tokens_emb.device)
            mask = mask and tokens_mask
        else:
            mask = 'causal'

        tokens_emb, (_, *tokens_kv) = self.attention.forward(
            x, mask=mask,
            extra_kv=extra_tokens_kv,
            extra_mask=extra_tokens_mask,
            dropout=at_dropout,
            return_qkv=True)
        tokens_emb = pad_or_crop_add(tokens_emb, tokens_emb_residual, dim=-1)

        # feedforward
        tokens_emb_residual = tokens_emb
        tokens_emb = rms_norm(tokens_emb)
        tokens_emb = self.feedforward.forward(tokens_emb,
            dropout=ff_dropout)
        tokens_emb = pad_or_crop_add(tokens_emb, tokens_emb_residual, dim=-1)

        return tokens_emb, tokens_kv


class GPTTransformer(torch.nn.Module):
    def __init__(self, *,
        layers_n: int,
        pos_size: int,
        queries_n: int,
        groups_n: int,
        qk_size: int,
        v_size: int,
        m_size: int,
        h_size: int,
        o_size: int | None = None,
        dtype: torch.dtype | None = None,
        device: torch.device | None = None,
    ):
        super().__init__()
        o_size = m_size if o_size is None else o_size

        self.layers_n = layers_n
        self.pos_size = pos_size
        self.queries_n = queries_n
        self.groups_n = groups_n
        self.qk_size = qk_size
        self.v_size = v_size
        self.m_size = m_size
        self.h_size = h_size
        self.o_size = o_size

        if not layers_n > 0:
            raise ValueError(f"Expected layers_n>0, found layers_n={layers_n}")
        layers = [GPTTransformerLayer(
            queries_n=queries_n,
            groups_n=groups_n,
            qk_size=qk_size,
            v_size=v_size,
            m_size=m_size,
            h_size=h_size,
            dtype=dtype, device=device)
            for _ in range(layers_n - 1)]
        layers.append(GPTTransformerLayer(
            queries_n=queries_n,
            groups_n=groups_n,
            qk_size=qk_size,
            v_size=v_size,
            m_size=m_size,
            h_size=h_size,
            o_size=o_size,
            dtype=dtype, device=device))
        self.layers = torch.nn.ModuleList(layers)

    def forward(self,
        tokens_emb: torch.Tensor,
        tokens_pos_emb: torch.Tensor | None = None, *,
        tokens_mask: torch.Tensor | None = None,
        layers_extra_tokens_kv: Iterable[KV] | None = None,
        layers_extra_tokens_mask: Iterable[torch.Tensor] | None = None,
        at_dropout: torch.Tensor | float = 0.0,
        ff_dropout: torch.Tensor | float = 0.0,
    ) -> tuple[torch.Tensor, tuple[KV, ...]]:
        """
        :param tokens_emb: shape=[..., seq_len, i_size]
        :param tokens_pos_emb: shape=[..., seq_len, pos_size]
        :param tokens_mask: shape=[(..., seq_len), seq_len], bool
        :param layers_extra_tokens_kv: layers_n * (extra_tokens_k, extra_tokens_v)
            extra_tokens_k.shape=[..., ext_len, groups_n, qk_size]
            extra_tokens_v.shape=[..., ext_len, groups_n, v_size]
        :param layers_extra_tokens_mask: layers_n * extra_tokens_mask
            extra_tokens_mask.shape=[(..., seq_len), ext_len]
        :param at_dropout: float
        :param ff_dropout: float
        :return: (tokens_emb, layers_n * (tokens_k, tokens_v))
            tokens_emb.shape=[..., seq_len, o_size]
            tokens_k.shape=[..., seq_len, groups_n, qk_size]
            tokens_v.shape=[..., seq_len, groups_n, v_size]
        """
        layers_tokens_kv = []
        layers_extra_tokens_kv = tuple(layers_extra_tokens_kv) if layers_extra_tokens_kv is not None else None
        layers_extra_tokens_mask = tuple(layers_extra_tokens_mask) if layers_extra_tokens_mask is not None else None
        for layer_i in range(self.layers_n):
            layer = self.layers[layer_i]
            assert isinstance(layer, GPTTransformerLayer)
            extra_tokens_kv = layers_extra_tokens_kv[layer_i] if layers_extra_tokens_kv is not None else None
            extra_tokens_mask = layers_extra_tokens_mask[layer_i] if layers_extra_tokens_mask is not None else None

            tokens_emb, tokens_kv = layer.forward(
                tokens_emb, tokens_pos_emb,
                tokens_mask=tokens_mask,
                extra_tokens_kv=extra_tokens_kv,
                extra_tokens_mask=extra_tokens_mask,
                at_dropout=at_dropout,
                ff_dropout=ff_dropout)
            layers_tokens_kv.append(tokens_kv)

        return tokens_emb, tuple(layers_tokens_kv)


class GPT(torch.nn.Module):
    def __init__(self, *,
        vocab_size: int,
        emb_size: int = 256,
        layers_n: int = 4,
        pos_size: int = 16,
        queries_n: int = 8,
        groups_n: int = 1,
        qk_size: int = 32,
        v_size: int = 128,
        m_size: int = 256,
        h_size: int = 512,
        dtype: torch.dtype | None = None,
        device: torch.device | None = None,
    ):
        super().__init__()
        self.pos_size = pos_size

        self.embedding = torch.nn.Embedding(
            vocab_size,
            emb_size,
            dtype=dtype,
            device=device)
        self.transformer = GPTTransformer(
            layers_n=layers_n,
            pos_size=pos_size,
            queries_n=queries_n,
            groups_n=groups_n,
            qk_size=qk_size,
            v_size=v_size,
            m_size=m_size,
            h_size=h_size,
            o_size=emb_size,
            dtype=dtype,
            device=device)
        self.output_norm = RMSNorm(
            emb_size,
            dtype=dtype,
            device=device)

    def forward(self,
        tokens_wid: torch.Tensor,
        tokens_pos: torch.Tensor | None = None, *,
        layers_extra_tokens_kv: Iterable[KV] | None = None,
        layers_extra_tokens_mask: Iterable[torch.Tensor] | None = None,
        at_dropout: torch.Tensor | float = 0.0,
        ff_dropout: torch.Tensor | float = 0.0,
    ) -> tuple[torch.Tensor, tuple[KV, ...]]:
        """
        :param tokens_wid: shape=[..., seq_len], dtype=int64
        :param tokens_pos: shape=[..., seq_len]
        :param layers_extra_tokens_kv: layers_n * (extra_tokens_k, extra_tokens_v)
            extra_tokens_k.shape=[..., ext_len, groups_n, qk_size]
            extra_tokens_v.shape=[..., ext_len, groups_n, v_size]
        :param layers_extra_tokens_mask: layers_n * extra_tokens_mask
            extra_tokens_mask.shape=[(..., seq_len), ext_len]
        :param at_dropout: float
        :param ff_dropout: float
        :return: (tokens_logits, layers_n * (tokens_k, tokens_v))
            tokens_logits.shape=[..., seq_len, vocab_size]
            tokens_k.shape=[..., seq_len, groups_n, qk_size]
            tokens_v.shape=[..., seq_len, groups_n, v_size]
        """
        tokens_emb = self.embedding(tokens_wid)
        tokens_emb = rms_norm(tokens_emb)
        # [..., seq_len, emb_size]

        tokens_pos = torch.arange(tokens_wid.shape[-1], dtype=torch.float64, device=tokens_wid.device) \
            if tokens_pos is None else tokens_pos.to(dtype=torch.float64, device=tokens_wid.device)
        tokens_pos = torch.broadcast_to(tokens_pos, tokens_wid.shape)
        # [..., seq_len]

        tokens_pos_emb = geometric_periods_sinusoidal_positional_encoding(tokens_pos, periods_n=self.pos_size // 2)
        tokens_pos_emb = tokens_pos_emb.to(tokens_emb)
        # [..., seq_len, pos_size]

        tokens_emb, layers_tokens_kv = self.transformer.forward(
            tokens_emb, tokens_pos_emb,
            layers_extra_tokens_kv=layers_extra_tokens_kv,
            layers_extra_tokens_mask=layers_extra_tokens_mask,
            at_dropout=at_dropout,
            ff_dropout=ff_dropout)

        tokens_emb = self.output_norm(tokens_emb)
        # [..., seq_len, emb_size]
        tokens_logits = self.match(tokens_emb)
        # [..., seq_len, vocab_size]

        return tokens_logits, layers_tokens_kv

    def match(self, tokens_emb: torch.Tensor):
        """
        :param tokens_emb: shape=[..., emb_size]
        :return: shape=[..., vocab_size]
        """
        vocab_tokens_emb = self.embedding.weight
        vocab_tokens_emb = rms_norm(vocab_tokens_emb)
        # [vocab_size, emb_size]

        vocab_size = vocab_tokens_emb.shape[0]
        vocab_size = torch.asarray(vocab_size, dtype=tokens_emb.dtype, device=tokens_emb.device)
        # []

        tokens_emb = torch.unsqueeze(tokens_emb, dim=-2)
        # [..., 1, emb_size]

        tokens_logits = torch.sum(tokens_emb * vocab_tokens_emb, dim=-1)
        # [..., vocab_size]

        scale = torch.rsqrt(vocab_size) * 4.0
        tokens_logits = tokens_logits * scale
        # [..., vocab_size]

        return tokens_logits
