from typing import Callable, Optional, TypedDict, Unpack

import torch

from .router import SynergyRouterOutput
from .utils import gather


class PositionerKwargs(TypedDict, total=False):
    router_output: SynergyRouterOutput
    tokens_in_pos_emb: Optional[torch.Tensor]


Positioner = Callable[[Unpack[PositionerKwargs]], torch.Tensor | None]


def none_positioner(**_) -> torch.Tensor | None:
    return None


def original_positioner(
    router_output: SynergyRouterOutput,
    tokens_in_pos_emb: torch.Tensor | None,
    **_,
) -> torch.Tensor | None:
    return gather(tokens_in_pos_emb, router_output.middle_tokens_index, dim=-2) \
        if tokens_in_pos_emb is not None else None


def make_sigma_positioner(
    positional_encoder: Callable[[torch.Tensor], torch.Tensor],
    keep_gradient: bool = False,
    only_middle: bool = True,
) -> torch.Tensor | None:
    def middle_tokens_sigma_positioner(
        router_output: SynergyRouterOutput,
        **_,
    ) -> torch.Tensor | None:
        if only_middle:
            middle_tokens_sigma = router_output.middle_tokens_sigma
            if not keep_gradient:
                middle_tokens_sigma = middle_tokens_sigma.detach()
            middle_tokens_pos = torch.cumsum(middle_tokens_sigma, dim=-1)
        else:
            tokens_sigma = router_output.tokens_sigma
            if not keep_gradient:
                tokens_sigma = tokens_sigma.detach()
            tokens_pos = torch.cumsum(tokens_sigma, dim=-1)
            middle_tokens_pos = gather(tokens_pos, router_output.middle_tokens_index, dim=-1)
        # [..., middle_tokens_n]

        middle_tokens_pos_emb = positional_encoder(middle_tokens_pos)
        # [..., middle_tokens_n, pos_emb_size]

        return middle_tokens_pos_emb

    return middle_tokens_sigma_positioner
