import abc
from typing import Optional

import torch.nn

from .nbptt_attention import NbpttAttention
from .neural_switch import BinaryNeuralSwitch
from .positional_encoding import apply_rotary_emb, compute_periods_cs

class NbpttAttentionMemory(abc.ABC):
    @abc.abstractmethod
    def append(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None):
        """
        :param x: shape=[..., seq_len, i_size]
        """
        pass

    @abc.abstractmethod
    def read(self) -> tuple[torch.Tensor, torch.Tensor] | None:
        """
        :return: (mem_x, mem_mask)
            mem_x:    shape=[..., mem_size, i_size]
            mem_mask: shape=[..., mem_size], dtype=bool
        """
        pass

    @abc.abstractmethod
    def reset(self, mask: Optional[torch.Tensor] = None):
        """
        :param mask: shape=[...], dtype=bool
        """
        pass

class NbpttAttentionWithMemory(torch.nn.Module):
    def __init__(self,
        attention: NbpttAttention,
        memory: NbpttAttentionMemory,
        switch: BinaryNeuralSwitch,
    ):
        super().__init__()
        self.attention = attention
        self.memory = memory
        self.switch = switch

    def forward(self, *,
        x: torch.Tensor,
        reset_mask: Optional[torch.Tensor] | None = None,
    ) -> torch.Tensor:
        """
        :param x: shape=[..., seq_len (q), i_size]
        :param reset_mask: shape=[...], dtype=bool
        :return: y: shape=[..., seq_len (q), o_size]
        """
        self.memory.reset(reset_mask)
        memory = self.memory.read()

        memorize_mask, _ = self.switch(x)
        memorize_mask = memorize_mask.squeeze(-1)
        self.memory.append(x.detach(), memorize_mask)

        if memory is None:
            return torch.zeros(
                size=x.shape[:-1] + (self.attention.o_size,),
                dtype=x.dtype, device=x.device)

        mem_x, mem_mask = memory
        # x:        shape=[..., seq_len (q), i_size]
        # mem_x:    shape=[..., mem_len (k), i_size]
        # mem_mask: shape=[..., mem_len (k)], dtype=bool

        def qkv_injector(q: torch.Tensor, k: torch.Tensor, v: torch.Tensor):
            qk_size = q.shape[-1]
            q_tokens_n = q.shape[-3]
            kv_tokens_n = k.shape[-3]

            periods_cs = compute_periods_cs(
                pos=torch.arange(
                    kv_tokens_n + q_tokens_n,
                    dtype=torch.float64,
                    device=q.device
                ).flip(0),
                size=qk_size,
                period_min=2,
                period_max=1024,
                dtype=q.dtype,
                device=q.device)

            q_periods_cs = periods_cs[-q_tokens_n:]
            k_periods_cs = periods_cs[:-q_tokens_n]

            q = apply_rotary_emb(q, q_periods_cs.unsqueeze(-2))
            k = apply_rotary_emb(k, k_periods_cs.unsqueeze(-2))
            return q, k, v

        y, _ = self.attention.forward(
            x=x,
            mem_x=mem_x,
            mem_mask=mem_mask,
            qkv_injector=qkv_injector)
        # [..., seq_len (q), o_size]

        return y
