from dataclasses import dataclass
from typing import Literal

import torch
from zkl_ptutils_neural import RMSNorm, SwishGluFeedForward

from .nbptt_attention import NbpttAttention
from .nbptt_attention_memory import NbpttAttentionWithMemory
from .nbptt_attention_memory_deque import DequeNbpttAttentionMemory
from .neural_switch import BinaryNeuralSwitch

@dataclass(kw_only=True)
class NbpttTransformerLayerHparams:
    i_size: int | None = None
    memory_n: int
    queries_n: int
    groups_n: int
    qk_size: int
    v_size: int
    m_size: int
    h_size: int
    o_size: int

class NbpttTransformerLayer(torch.nn.Module):
    def __init__(self, *,
        hparams: NbpttTransformerLayerHparams,
        dtype: torch.dtype | None = None,
        device: torch.device | None = None,
    ):
        super().__init__()
        self.hparams = hparams

        self.mem_attention_norm = RMSNorm()
        self.mem_attention = NbpttAttentionWithMemory(
            attention=NbpttAttention(
                queries_n=hparams.queries_n,
                groups_n=hparams.groups_n,
                i_size=hparams.i_size,
                qk_size=hparams.qk_size,
                v_size=hparams.v_size,
                o_size=hparams.m_size,
                dtype=dtype,
                device=device),
            memory=DequeNbpttAttentionMemory(
                capacity=hparams.memory_n),
            switch=BinaryNeuralSwitch(
                dtype=dtype,
                device=device))
        self.feed_forward_norm = RMSNorm()
        self.feed_forward = SwishGluFeedForward(
            i_size=hparams.m_size,
            h_size=hparams.h_size,
            o_size=hparams.o_size,
            bias=False,
            dtype=dtype,
            device=device)

    def forward(self, *,
        emb: torch.Tensor,
        reset_mask: torch.Tensor | Literal['all'] | None = None,
    ) -> torch.Tensor:
        emb_residual = emb
        emb = self.mem_attention_norm(emb)
        emb = self.mem_attention(x=emb, reset_mask=reset_mask)
        emb = emb + emb_residual

        emb_residual = emb
        emb = self.feed_forward_norm(emb)
        emb = self.feed_forward(emb)
        emb = emb + emb_residual

        return emb
