from dataclasses import dataclass

import torch

from .positioner import Positioner
from .router import SynergyRouter, SynergyRouterOutput
from .transformer import Transformer, TransformerOutput
from .utils import gather, scatter_add


@dataclass(kw_only=True)
class SynergyTransformerOutput(TransformerOutput):
    tokens_out_emb: torch.Tensor
    encoder: TransformerOutput
    decoder: TransformerOutput
    middle: TransformerOutput
    router: SynergyRouterOutput


class SynergyTransformer(torch.nn.Module):
    def __init__(self, *,
        encoder: Transformer,
        decoder: Transformer,
        middle: Transformer,
        router: SynergyRouter,
        positioner: Positioner | None = None,
    ):
        super().__init__()
        self.encoder = encoder
        self.decoder = decoder
        self.middle = middle
        self.router = router
        self.positioner = positioner

    def forward(self, *,
        middle_tokens_n: int,
        tokens_in_emb: torch.Tensor,
        tokens_in_pos_emb: torch.Tensor | None = None,
        tokens_in_mask: torch.Tensor | None = None,
        tokens_out_mask: torch.Tensor | None = None,
        router_temperature: torch.Tensor | float | None = None,
        encoder_kwargs: dict | None = None,
        decoder_kwargs: dict | None = None,
        middle_kwargs: dict | None = None,
    ) -> SynergyTransformerOutput:
        tokens_emb = tokens_in_emb
        # [..., chunk_tokens_n, emb_size]

        encoder_output = self.encoder(
            tokens_in_emb=tokens_emb,
            tokens_in_pos_emb=tokens_in_pos_emb,
            tokens_in_mask=tokens_in_mask,
            tokens_out_mask=tokens_out_mask,
            **(encoder_kwargs if encoder_kwargs is not None else {}))
        tokens_emb = encoder_output.tokens_out_emb
        # [..., chunk_tokens_n, emb_size]

        router_output = self.router(
            tokens_in_emb=tokens_emb,
            tokens_out_mask=tokens_out_mask,
            middle_tokens_n=middle_tokens_n,
            temperature=router_temperature)
        middle_tokens_index = router_output.middle_tokens_index
        # [..., middle_tokens_n]
        middle_tokens_sigma = router_output.middle_tokens_sigma
        # [..., middle_tokens_n]

        middle_tokens_in_emb = gather(tokens_emb, middle_tokens_index, dim=-2)
        # [..., middle_tokens_n, emb_size]
        middle_tokens_in_pos_emb = self.positioner(
            router_output=router_output, tokens_in_pos_emb=tokens_in_pos_emb) \
            if self.positioner is not None else None
        # [..., middle_tokens_n, pos_size]
        middle_tokens_in_mask = gather(tokens_in_mask, middle_tokens_index, dim=-1) \
            if tokens_in_mask is not None else None
        # [..., middle_tokens_n]
        middle_tokens_out_mask = gather(tokens_out_mask, middle_tokens_index, dim=-1) \
            if tokens_out_mask is not None else None
        # [..., middle_tokens_n]

        middle_tokens_emb = middle_tokens_in_emb
        # [..., middle_tokens_n, emb_size]

        middle_output = self.middle(
            tokens_in_emb=middle_tokens_emb,
            tokens_in_pos_emb=middle_tokens_in_pos_emb,
            tokens_in_mask=middle_tokens_in_mask,
            tokens_out_mask=middle_tokens_out_mask,
            **(middle_kwargs if middle_kwargs is not None else {}))
        middle_tokens_emb = middle_output.tokens_out_emb
        # [..., middle_tokens_n, emb_size]

        middle_tokens_emb_delta = middle_tokens_emb - middle_tokens_in_emb
        middle_tokens_emb_delta *= middle_tokens_sigma.unsqueeze(-1)
        # [..., middle_tokens_n, emb_size]

        tokens_emb = scatter_add(tokens_emb, middle_tokens_emb_delta, middle_tokens_index, dim=-2)
        # [..., chunk_tokens_n, emb_size]

        decoder_output = self.decoder(
            tokens_in_emb=tokens_emb,
            tokens_in_pos_emb=tokens_in_pos_emb,
            tokens_in_mask=tokens_in_mask,
            tokens_out_mask=tokens_out_mask,
            **(decoder_kwargs if decoder_kwargs is not None else {}))
        tokens_emb = decoder_output.tokens_out_emb
        # [..., chunk_tokens_n, emb_size]

        tokens_out_emb = tokens_emb
        return SynergyTransformerOutput(
            tokens_out_emb=tokens_out_emb,
            encoder=encoder_output,
            decoder=decoder_output,
            middle=middle_output,
            router=router_output)
