from collections.abc import Iterable
from dataclasses import dataclass
from typing import Self

import torch

from .transformer_layer import HaodarTransformerLayer, HaodarTransformerLayerHparams


@dataclass(kw_only=True)
class HaodarTransformerHparams:
    layers_n: int
    queries_n: int
    groups_n: int
    qk_size: int
    v_size: int
    m_size: int
    h_size: int
    o_size: int | None = None


class HaodarTransformer(torch.nn.Module):
    @classmethod
    def construct(cls, *,
        hparams: HaodarTransformerHparams,
        dtype: torch.dtype | None = None,
        device: torch.device | None = None
    ) -> Self:
        return cls(
            [HaodarTransformerLayer.construct(
                hparams=HaodarTransformerLayerHparams(
                    queries_n=hparams.queries_n,
                    groups_n=hparams.groups_n,
                    qk_size=hparams.qk_size,
                    v_size=hparams.v_size,
                    m_size=hparams.m_size,
                    h_size=hparams.h_size,
                    o_size=hparams.m_size),
                dtype=dtype, device=device,
            ) for _ in range(hparams.layers_n)[:-1]] +
            [HaodarTransformerLayer.construct(
                hparams=HaodarTransformerLayerHparams(
                    queries_n=hparams.queries_n,
                    groups_n=hparams.groups_n,
                    qk_size=hparams.qk_size,
                    v_size=hparams.v_size,
                    m_size=hparams.m_size,
                    h_size=hparams.h_size,
                    o_size=hparams.o_size),
                dtype=dtype, device=device,
            ) for _ in range(hparams.layers_n)[-1:]])

    def __init__(self, layers: Iterable[HaodarTransformerLayer]):
        super().__init__()
        self.layers = torch.nn.ModuleList(layers)

    def forward(self, *,
        tokens_in_emb: torch.Tensor,
        tokens_in_pos_emb: torch.Tensor | None = None,
        tokens_in_mask: torch.Tensor | None = None,
        tokens_out_mask: torch.Tensor | None = None,
        attention_mask: torch.Tensor | None = None,
        layers_extra_tokens_kv: Iterable[tuple[torch.Tensor, torch.Tensor] | None] | None = None,
        layers_extra_tokens_mask: Iterable[torch.Tensor | None] | None = None,
        layers_extra_attention_mask: Iterable[torch.Tensor | None] | None = None,
        at_dropout: torch.Tensor | float = 0.0,
        ff_dropout: torch.Tensor | float = 0.0,
    ) -> tuple[torch.Tensor, tuple[tuple[torch.Tensor, torch.Tensor], ...]]:
        """
        :param tokens_in_emb: shape=[..., chunk_tokens_n, i_size]
        :param tokens_in_pos_emb: shape=[..., chunk_tokens_n, pos_size]
        :param tokens_in_mask: shape=[..., chunk_tokens_n (k)], dtype=bool
        :param tokens_out_mask: shape=[..., chunk_tokens_n (q)], dtype=bool
        :param attention_mask: shape=[..., chunk_tokens_n (q), chunk_tokens_n (k)], dtype=bool
        :param layers_extra_tokens_kv: layers_n * (extra_tokens_k, extra_tokens_v)
            extra_tokens_k: shape=[..., extra_tokens_n (k), groups_n, qk_size]
            extra_tokens_v: shape=[..., extra_tokens_n (k), groups_n, v_size]
        :param layers_extra_tokens_mask: layers_n * extra_tokens_mask
            extra_tokens_mask: shape=[..., extra_tokens_n (k)], dtype=bool
        :param layers_extra_attention_mask: layers_n * extra_attention_mask
            extra_attention_mask: shape=[..., chunk_tokens_n (q), extra_tokens_n (k)], dtype=bool
        :param at_dropout: float
        :param ff_dropout: float
        :return: (tokens_out_emb, layers_n * (tokens_k, tokens_v))
            tokens_out_emb: shape=[..., chunk_tokens_n (q), o_size]
            tokens_k: shape=[..., chunk_tokens_n (q), groups_n, qk_size]
            tokens_v: shape=[..., chunk_tokens_n (q), groups_n, v_size]
        """
        tokens_emb = tokens_in_emb

        if layers_extra_tokens_kv is not None:
            if layers_extra_tokens_mask is None:
                layers_extra_tokens_mask = [None] * len(self.layers)
            if layers_extra_attention_mask is None:
                layers_extra_attention_mask = [None] * len(self.layers)
        else:
            layers_extra_tokens_kv = [None] * len(self.layers)
            layers_extra_tokens_mask = [None] * len(self.layers)
            layers_extra_attention_mask = [None] * len(self.layers)

        layers_tokens_kv = []
        for layer, extra_tokens_kv, extra_tokens_mask, extra_attention_mask \
            in zip(self.layers, layers_extra_tokens_kv, layers_extra_tokens_mask, layers_extra_attention_mask):
            assert isinstance(layer, HaodarTransformerLayer)
            tokens_emb, tokens_kv = layer.forward(
                tokens_in_emb=tokens_emb,
                tokens_in_pos_emb=tokens_in_pos_emb,
                tokens_in_mask=tokens_in_mask,
                tokens_out_mask=tokens_out_mask,
                attention_mask=attention_mask,
                extra_tokens_kv=extra_tokens_kv,
                extra_tokens_mask=extra_tokens_mask,
                extra_attention_mask=extra_attention_mask,
                at_dropout=at_dropout,
                ff_dropout=ff_dropout)
            layers_tokens_kv.append(tokens_kv)

        tokens_out_emb = tokens_emb
        layers_tokens_kv = tuple(layers_tokens_kv)
        return tokens_out_emb, layers_tokens_kv
