from typing import Iterable, Literal

import torch
from zkl_neural import GroupQuerySelfAttention, RMSNorm, SwishGluFeedforward, \
    geometric_periods_sinusoidal_positional_encoding, pad_or_crop_add, rms_norm


class GPTTransformerLayer(torch.nn.Module):
    def __init__(self, *,
        queries_n: int,
        groups_n: int,
        qk_size: int,
        v_size: int,
        m_size: int,
        h_size: int,
        o_size: int | None = None,
        dtype: torch.dtype | None = None,
        device: torch.device | None = None,
    ):
        super().__init__()
        o_size = m_size if o_size is None else o_size

        self.queries_n = queries_n
        self.groups_n = groups_n
        self.qk_size = qk_size
        self.v_size = v_size
        self.m_size = m_size
        self.h_size = h_size
        self.o_size = o_size

        self.attention = GroupQuerySelfAttention(
            queries_n=queries_n,
            groups_n=groups_n,
            qk_size=qk_size,
            v_size=v_size,
            o_size=m_size,
            dtype=dtype,
            device=device)
        self.feedforward = SwishGluFeedforward(
            i_size=m_size,
            h_size=h_size,
            o_size=o_size,
            dtype=dtype,
            device=device)

    def forward(self,
        x: torch.Tensor,
        pos: torch.Tensor | None = None, *,
        extra_kv: tuple[torch.Tensor, torch.Tensor] | None = None,
        extra_mask: torch.Tensor | Literal['full'] = 'full',
        at_dropout: float = 0.0,
        ff_dropout: float = 0.0,
    ) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor]]:
        """
        :param x: shape=[..., seq_len, i_size]
        :param pos: shape=[..., seq_len, pos_size]
        :param extra_kv: (extra_k, extra_v)
            extra_k.shape=[..., ext_len, groups_n, qk_size]
            extra_v.shape=[..., ext_len, groups_n, v_size]
        :param extra_mask: shape = [(..., seq_len), ext_len]
        :param at_dropout: float
        :param ff_dropout: float
        :return: (x, (k, v))
            x.shape=[..., seq_len, o_size]
            k.shape=[..., seq_len, groups_n, qk_size]
            v.shape=[..., seq_len, groups_n, v_size]
        """

        # self attention
        x_residual = x
        x = rms_norm(x)
        x = x if pos is None else torch.cat([x, pos], dim=-1)
        x, (_, *kv) = self.attention.forward(x,
            mask='causal',
            extra_kv=extra_kv,
            extra_mask=extra_mask,
            dropout=at_dropout,
            return_qkv=True)
        x = pad_or_crop_add(x, x_residual, dim=-1)

        # feedforward
        x_residual = x
        x = rms_norm(x)
        x = self.feedforward.forward(x,
            dropout=ff_dropout)
        x = pad_or_crop_add(x, x_residual, dim=-1)

        return x, kv


class GPTTransformer(torch.nn.Module):
    def __init__(self, *,
        layers_n: int,
        pos_size: int,
        queries_n: int,
        groups_n: int,
        qk_size: int,
        v_size: int,
        m_size: int,
        h_size: int,
        o_size: int | None = None,
        dtype: torch.dtype | None = None,
        device: torch.device | None = None,
    ):
        super().__init__()
        o_size = m_size if o_size is None else o_size

        self.layers_n = layers_n
        self.pos_size = pos_size
        self.queries_n = queries_n
        self.groups_n = groups_n
        self.qk_size = qk_size
        self.v_size = v_size
        self.m_size = m_size
        self.h_size = h_size
        self.o_size = o_size

        layers = [GPTTransformerLayer(
            queries_n=queries_n,
            groups_n=groups_n,
            qk_size=qk_size,
            v_size=v_size,
            m_size=m_size,
            h_size=h_size,
            dtype=dtype, device=device)
            for _ in range(layers_n - 1)]
        layers.append(GPTTransformerLayer(
            queries_n=queries_n,
            groups_n=groups_n,
            qk_size=qk_size,
            v_size=v_size,
            m_size=m_size,
            h_size=h_size,
            o_size=o_size,
            dtype=dtype, device=device))
        self.layers = torch.nn.ModuleList(layers)

    def forward(self,
        x: torch.Tensor,
        pos: torch.Tensor | None = None, *,
        layers_extra_kv: Iterable[tuple[torch.Tensor, torch.Tensor]] | None = None,
        layers_extra_mask: Iterable[torch.Tensor] | None = None,
        at_dropout: float = 0.0,
        ff_dropout: float = 0.0,
    ) -> tuple[torch.Tensor, tuple[tuple[torch.Tensor, torch.Tensor], ...]]:
        """
        :param x: shape=[..., seq_len, i_size]
        :param pos: shape=[..., seq_len, pos_size]
        :param layers_extra_kv: layers_n * (extra_k, extra_v)
            extra_k.shape=[..., ext_len, groups_n, qk_size]
            extra_v.shape=[..., ext_len, groups_n, v_size]
        :param layers_extra_mask: layers_n * extra_mask
            extra_mask.shape=[(..., seq_len), ext_len]
        :param at_dropout: float
        :param ff_dropout: float
        :return: (x, layers_n * (k, v))
            x.shape=[..., seq_len, o_size]
            k.shape=[..., seq_len, groups_n, qk_size]
            v.shape=[..., seq_len, groups_n, v_size]
        """
        layers_kv = []
        layers_extra_kv = tuple(layers_extra_kv) if layers_extra_kv is not None else None
        layers_extra_mask = tuple(layers_extra_mask) if layers_extra_mask is not None else None
        for layer_i in range(self.layers_n):
            layer = self.layers[layer_i]
            assert isinstance(layer, GPTTransformerLayer)
            extra_kv = layers_extra_kv[layer_i] if layers_extra_kv is not None else None
            extra_mask = layers_extra_mask[layer_i] if layers_extra_mask is not None else None

            x, kv = layer.forward(x, pos,
                extra_kv=extra_kv, extra_mask=extra_mask,
                at_dropout=at_dropout, ff_dropout=ff_dropout)
            layers_kv.append(kv)

        return x, tuple(layers_kv)


class GPT(torch.nn.Module):
    def __init__(self, *,
        vocab_size: int,
        emb_size: int = 256,
        layers_n: int = 4,
        pos_size: int = 16,
        queries_n: int = 8,
        groups_n: int = 1,
        qk_size: int = 32,
        v_size: int = 128,
        m_size: int = 256,
        h_size: int = 512,
        dtype: torch.dtype | None = None,
        device: torch.device | None = None,
    ):
        super().__init__()
        self.pos_size = pos_size

        self.embedding = torch.nn.Embedding(
            vocab_size,
            emb_size,
            dtype=dtype,
            device=device)
        self.transformer = GPTTransformer(
            layers_n=layers_n,
            pos_size=pos_size,
            queries_n=queries_n,
            groups_n=groups_n,
            qk_size=qk_size,
            v_size=v_size,
            m_size=m_size,
            h_size=h_size,
            o_size=emb_size,
            dtype=dtype,
            device=device)
        self.output_norm = RMSNorm(
            emb_size,
            dtype=dtype,
            device=device)

    def forward(self,
        tokens: torch.Tensor,
        positions: torch.Tensor | None = None, *,
        layers_extra_kv: Iterable[tuple[torch.Tensor, torch.Tensor]] | None = None,
        at_dropout: float = 0.0,
        ff_dropout: float = 0.0,
    ) -> tuple[torch.Tensor, tuple[tuple[torch.Tensor, torch.Tensor], ...]]:
        """
        :param tokens: shape=[..., seq_len], dtype=int64
        :param positions: shape=[(...)]
        :param layers_extra_kv: layers_n * (extra_k, extra_v)
            extra_k.shape=[..., ext_len, groups_n, qk_size]
            extra_v.shape=[..., ext_len, groups_n, v_size]
        :param at_dropout: float
        :param ff_dropout: float
        :return: (logits, layers_n * (k, v))
            logits.shape=[..., seq_len, vocab_size]
            k.shape=[..., seq_len, groups_n, qk_size]
            v.shape=[..., seq_len, groups_n, v_size]
        """
        tokens = self.embedding(tokens)
        tokens = rms_norm(tokens)
        # [..., seq_len, emb_size]

        positions = torch.arange(tokens.shape[-2], dtype=torch.float64, device=tokens.device) \
            if positions is None else positions.to(dtype=torch.float64, device=tokens.device)
        positions = torch.broadcast_to(positions, tokens.shape[:-1])
        # [..., seq_len]

        positions = geometric_periods_sinusoidal_positional_encoding(positions, periods_n=self.pos_size // 2)
        positions = positions.to(tokens)
        # [..., seq_len, pos_size]

        x, layers_kv = self.transformer.forward(
            tokens, positions,
            layers_extra_kv=layers_extra_kv,
            at_dropout=at_dropout,
            ff_dropout=ff_dropout)
        x = self.output_norm(x)
        # [..., seq_len, emb_size]

        logits = self.match(x)
        # [..., seq_len, vocab_size]

        return logits, layers_kv

    def match(self, x: torch.Tensor):
        """
        :param x: shape=[..., emb_size]
        :return: shape=[..., vocab_size]
        """
        embeddings = self.embedding.weight
        embeddings = rms_norm(embeddings)
        # [vocab_size, emb_size]

        x = torch.unsqueeze(x, dim=-2)
        # [..., 1, emb_size]

        logits = torch.sum(x * embeddings, dim=-1)
        # [..., vocab_size]

        vocab_size = x.shape[-1]
        vocab_size = torch.asarray(vocab_size, dtype=x.dtype, device=x.device)
        scale = torch.rsqrt(vocab_size) * 4.0
        logits = logits * scale
        # [..., vocab_size]

        return logits
