from collections.abc import Iterable
from dataclasses import dataclass

import torch
from zkl_aiutils_neural import geometric_periods_sinusoidal_positional_encoding

from .matcher import EmbeddingMatcher
from .transformer import HaodarTransformer, HaodarTransformerHparams


@dataclass(kw_only=True)
class HaodarCausalLanguageModelHparams:
    embs_n: int
    emb_size: int
    pos_size: int
    transformer: HaodarTransformerHparams


class HaodarCausalLanguageModel(torch.nn.Module):
    @classmethod
    def construct(cls, *,
        hparams: HaodarCausalLanguageModelHparams,
        dtype: torch.dtype | None = None,
        device: torch.device | None = None,
    ):
        return cls(
            pos_size=hparams.pos_size,
            embedding=torch.nn.Embedding(
                hparams.embs_n,
                hparams.emb_size,
                dtype=dtype,
                device=device),
            transformer=HaodarTransformer.construct(
                hparams=hparams.transformer,
                dtype=dtype, device=device))

    def __init__(self, *,
        pos_size: int,
        embedding: torch.nn.Embedding,
        transformer: HaodarTransformer,
        matcher: EmbeddingMatcher | None = None,
    ):
        super().__init__()
        self.pos_size = pos_size
        self.embedding = embedding
        self.transformer = transformer
        self.matcher = EmbeddingMatcher(embedding) if matcher is None else matcher

    def forward(self, *,
        tokens_in_wid: torch.Tensor,
        tokens_in_pos: torch.Tensor | None = None,
        tokens_in_mask: torch.Tensor | None = None,
        tokens_out_mask: torch.Tensor | None = None,
        attention_mask: torch.Tensor | None = None,
        layers_extra_tokens_kv: Iterable[tuple[torch.Tensor, torch.Tensor] | None] | None = None,
        layers_extra_tokens_mask: Iterable[torch.Tensor | None] | None = None,
        layers_extra_attention_mask: Iterable[torch.Tensor | None] | None = None,
        at_dropout: torch.Tensor | float = 0.0,
        ff_dropout: torch.Tensor | float = 0.0,
    ) -> tuple[torch.Tensor, tuple[tuple[torch.Tensor, torch.Tensor], ...]]:
        """
        :param tokens_in_wid: shape=[..., chunk_tokens_n], dtype=int64
        :param tokens_in_pos: shape=[..., chunk_tokens_n], dtype=int/float
        :param tokens_in_mask: shape=[..., chunk_tokens_n (k)], dtype=bool
        :param tokens_out_mask: shape=[..., chunk_tokens_n (q)], dtype=bool
        :param attention_mask: shape=[..., chunk_tokens_n (q), chunk_tokens_n (k)], dtype=bool
        :param layers_extra_tokens_kv: layers_n * (extra_tokens_k, extra_tokens_v)
            extra_tokens_k: shape=[..., extra_tokens_n (k), groups_n, qk_size]
            extra_tokens_v: shape=[..., extra_tokens_n (k), groups_n, v_size]
        :param layers_extra_tokens_mask: layers_n * extra_tokens_mask
            extra_tokens_mask: shape=[..., extra_tokens_n (k)], dtype=bool
        :param layers_extra_attention_mask: layers_n * extra_attention_mask
            extra_attention_mask: shape=[..., chunk_tokens_n (q), extra_tokens_n (k)], dtype=bool
        :param at_dropout: float
        :param ff_dropout: float
        :return: tokens_out_logits: shape=[..., chunk_tokens_n, embs_n]
        """
        tokens_in_emb = self.embedding(tokens_in_wid)
        # [..., chunk_tokens_n, emb_size]

        tokens_in_pos = torch.arange(tokens_in_wid.shape[-1], dtype=torch.float64, device=tokens_in_wid.device) \
            if tokens_in_pos is None else tokens_in_pos.to(dtype=torch.float64, device=tokens_in_wid.device)
        tokens_in_pos = torch.broadcast_to(tokens_in_pos, tokens_in_wid.shape)
        # [..., chunk_tokens_n]

        tokens_in_pos_emb = geometric_periods_sinusoidal_positional_encoding(
            tokens_in_pos, periods_n=self.pos_size // 2)
        tokens_in_pos_emb = tokens_in_pos_emb.to(tokens_in_emb)
        # [..., chunk_tokens_n, pos_size]

        tokens_out_emb, layers_tokens_kv = self.transformer.forward(
            tokens_in_emb=tokens_in_emb,
            tokens_in_pos_emb=tokens_in_pos_emb,
            tokens_in_mask=tokens_in_mask,
            tokens_out_mask=tokens_out_mask,
            attention_mask=attention_mask,
            layers_extra_tokens_kv=layers_extra_tokens_kv,
            layers_extra_tokens_mask=layers_extra_tokens_mask,
            layers_extra_attention_mask=layers_extra_attention_mask,
            at_dropout=at_dropout,
            ff_dropout=ff_dropout)
        # [..., chunk_tokens_n, emb_size]

        tokens_out_logits = self.matcher.forward(tokens_out_emb)
        # [..., chunk_tokens_n, embs_n]

        return tokens_out_logits, layers_tokens_kv
