"""YOCO's cross attention modules for the decoder block."""

from typing import Dict, Optional
import torch

from megatron.core.jit import jit_fuser
from megatron.core.transformer.module import MegatronModule
from megatron.core.transformer.attention import Attention
from megatron.core.transformer.attention import CrossAttentionSubmodules
from megatron.core.transformer.enums import AttnMaskType
from megatron.core.transformer.transformer_config import TransformerConfig
from megatron.core.transformer.spec_utils import build_module
from megatron.core.tensor_parallel import split_tensor_along_last_dim
from megatron.core.fusions.fused_bias_dropout import _bias_dropout_add_func


class YOCOCrossAttention(Attention):
    """YOCO Cross-attention layer class

    take key-value directly from entry layer, returns output with size of [s, b, h]
    """

    def __init__(
        self,
        config: TransformerConfig,
        submodules: CrossAttentionSubmodules,
        layer_number: int,
        attn_mask_type: AttnMaskType,
    ):
        super().__init__(
            config=config,
            submodules=submodules,
            layer_number=layer_number,
            attn_mask_type=attn_mask_type,
            attention_type="cross",
        )

        self.linear_q = build_module(
            submodules.linear_q,
            self.config.hidden_size,
            self.query_projection_size,
            config=self.config,
            init_method=self.config.init_method,
            gather_output=False,
            bias=self.config.add_bias_linear,
            skip_bias_add=False,
            is_expert=False,
        )

    def get_query_key_value_tensors(self, hidden_states, key_value_states):
        """Get query, key, and value tensors."""
        assert (
            key_value_states is not None
        ), "Cross-attention requires key-value states."

        # key,value: [s, b, g, h/n]
        (key, value) = split_tensor_along_last_dim(key_value_states, 2)

        # query: [s, b, h]
        query, _ = self.linear_q(hidden_states)

        # query: [s, b, h] -> [s, b, n, h/n]
        new_tensor_shape = query.size()[:-1] + (
            self.num_attention_heads_per_partition,
            self.hidden_size_per_attention_head,
        )
        query = query.view(*new_tensor_shape)

        return query, key, value


def _retrieve_x_with_bias(x_with_bias):
    return (x_with_bias["output"], x_with_bias["bias"])


def _bias_dropout_add_unfused(training):
    def _bias_dropout_add(x_with_bias, residual, prob):
        x_with_bias = _retrieve_x_with_bias(x_with_bias)
        return _bias_dropout_add_func(x_with_bias, residual, prob, training)

    return _bias_dropout_add


@jit_fuser
def _bias_dropout_add_fused_train(
    x_with_bias: Dict[str, Optional[torch.Tensor]],
    residual: torch.Tensor,
    prob: float,
) -> torch.Tensor:
    x_with_bias = _retrieve_x_with_bias(x_with_bias)
    return _bias_dropout_add_func(x_with_bias, residual, prob, True)


@jit_fuser
def _bias_dropout_add_fused_inference(
    x_with_bias: Dict[str, Optional[torch.Tensor]],
    residual: torch.Tensor,
    prob: float,
) -> torch.Tensor:
    x_with_bias = _retrieve_x_with_bias(x_with_bias)
    return _bias_dropout_add_func(x_with_bias, residual, prob, False)


def get_yoco_bias_dropout_add(training, fused):
    if fused:
        # jit scripting for a nn.module (with dropout) is not
        # triggering the fusion kernel. For now, we use two
        # different nn.functional routines to account for varying
        # dropout semantics during training and inference phases.
        if training:
            return _bias_dropout_add_fused_train
        else:
            return _bias_dropout_add_fused_inference
    else:
        return _bias_dropout_add_unfused(training)
