from mindspeed.core.models.yoco.config import YOCOConfig
from mindspeed.core.models.yoco.decoder_layer import YOCOTransformerLayer
from mindspeed.core.models.yoco.decoder_attention import YOCOCrossAttention
from mindspeed.core.models.yoco.decoder_attention import get_yoco_bias_dropout_add
from mindspeed.core.transformer.custom_layers.transformer_engine import PTNorm

from megatron.core.fusions.fused_bias_dropout import get_bias_dropout_add
from megatron.core.tensor_parallel.layers import ColumnParallelLinear, RowParallelLinear
import megatron.core.parallel_state as parallel_state
from megatron.core.transformer.identity_op import IdentityOp
from megatron.core.transformer.transformer_block import TransformerBlockSubmodules
from megatron.core.transformer.attention import SelfAttention, SelfAttentionSubmodules
from megatron.core.transformer.transformer_layer import TransformerLayerSubmodules
from megatron.core.transformer.attention import CrossAttentionSubmodules
from megatron.core.transformer.dot_product_attention import DotProductAttention
from megatron.core.transformer.enums import AttnMaskType
from megatron.core.transformer.spec_utils import ModuleSpec
from megatron.core.transformer.mlp import MLP, MLPSubmodules
from megatron.core.transformer.moe.moe_layer import MoELayer


# NPU mlp layer
def _get_mlp_module_spec(
    num_experts: int = None, moe_grouped_gemm: bool = False
) -> ModuleSpec:
    if num_experts is None:
        # Dense MLP w/ or w/o TE modules.
        return ModuleSpec(
            module=MLP,
            submodules=MLPSubmodules(
                linear_fc1=ColumnParallelLinear,
                linear_fc2=RowParallelLinear,
            ),
        )
    else:
        # Mixture of experts with modules in megatron core.
        return ModuleSpec(
            module=MoELayer,
            submodules=(
                MLPSubmodules(
                    linear_fc1=ColumnParallelLinear,
                    linear_fc2=RowParallelLinear,
                )
                if not moe_grouped_gemm
                else None
            ),
        )


def get_yoco_self_layer_spec(
    num_experts: int = None, moe_grouped_gemm: bool = False, qk_layernorm: bool = False
) -> ModuleSpec:
    mlp = _get_mlp_module_spec(
        num_experts=num_experts, moe_grouped_gemm=moe_grouped_gemm
    )
    return ModuleSpec(
        module=YOCOTransformerLayer,
        submodules=TransformerLayerSubmodules(
            input_layernorm=PTNorm,
            self_attention=ModuleSpec(
                module=SelfAttention,
                params={"attn_mask_type": AttnMaskType.causal},
                submodules=SelfAttentionSubmodules(
                    linear_qkv=ColumnParallelLinear,
                    core_attention=DotProductAttention,
                    linear_proj=RowParallelLinear,
                    q_layernorm=PTNorm if qk_layernorm else IdentityOp,
                    k_layernorm=PTNorm if qk_layernorm else IdentityOp,
                ),
            ),
            self_attn_bda=get_bias_dropout_add,
            pre_mlp_layernorm=PTNorm,
            mlp=mlp,
            mlp_bda=get_bias_dropout_add,
            sharded_state_dict_keys_map={
                "input_layernorm.": "self_attention.linear_qkv.layer_norm_",
                "pre_mlp_layernorm.": "mlp.linear_fc1.layer_norm_",
            },
        ),
    )


def get_yoco_cross_layer_spec(
    num_experts: int = None, moe_grouped_gemm: bool = False
) -> ModuleSpec:
    mlp = _get_mlp_module_spec(
        num_experts=num_experts, moe_grouped_gemm=moe_grouped_gemm
    )
    return ModuleSpec(
        module=YOCOTransformerLayer,
        submodules=TransformerLayerSubmodules(
            cross_attention=ModuleSpec(
                module=YOCOCrossAttention,
                params={"attn_mask_type": AttnMaskType.causal},
                submodules=CrossAttentionSubmodules(
                    linear_q=ColumnParallelLinear,
                    core_attention=DotProductAttention,
                    linear_proj=RowParallelLinear,
                ),
            ),
            pre_cross_attn_layernorm=PTNorm,
            cross_attn_bda=get_yoco_bias_dropout_add,
            pre_mlp_layernorm=PTNorm if num_experts else IdentityOp,
            mlp=mlp,
            mlp_bda=get_bias_dropout_add,
        ),
    )


def get_yoco_decoder_block_spec(args, config: YOCOConfig) -> TransformerBlockSubmodules:

    assert args.use_yoco, "YOCO model is not enabled, please set `--use-yoco` to True."
    assert (
        args.num_self_attn_layers is not None and args.num_self_attn_layers > 0
    ), "Number of self attention layers must be > 0."
    assert (
        parallel_state.get_pipeline_model_parallel_world_size() == 1
    ), "YOCO model does not support pipeline parallelism."
    assert (
        parallel_state.get_virtual_pipeline_model_parallel_world_size() is None
    ), "YOCO model does not support virtual pipeline parallelism."

    layer_specs = []

    self_layer_spec = get_yoco_self_layer_spec(
        args.num_experts, args.moe_grouped_gemm, args.qk_layernorm
    )
    self_layer_spec.submodules.self_attn_bda = get_yoco_bias_dropout_add

    cross_layer_spec = get_yoco_cross_layer_spec(
        args.num_experts, args.moe_grouped_gemm
    )

    num_self_layers = config.num_self_attn_layers
    num_cross_layers = config.num_layers - config.num_self_attn_layers
    layer_specs.extend([self_layer_spec] * num_self_layers)
    layer_specs.extend([cross_layer_spec] * num_cross_layers)

    block_spec = TransformerBlockSubmodules(layer_specs=layer_specs)
    return block_spec
