from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import Dict, List
from ..common.base_simulator_builder import GeesTransformerLayerSimulatorBuilder
from ..common.language_model_embedding import RotaryEmbeddingSimulator
from ..common.base_simulator import RMSNormSimulator, IdentityOpSimulator
from ..common.mlp_layer_simulator import MLPLayer
from ..common.output_layer_simulator import OutputLayer, OutputLayerSimulator
from ..llama.simulator import *
from ..llama.llama_self_attention_simulator import LlamaSelfAttentionSimulator
from ..llama.llama_language_embedding import LLamaLanguageModelEmbedding

'''
    llama 论文中改动只有
    1. Normlization使用RMSNorm
    2. replace the ReLU non-linearity by the SwiGLU activation function
    3. add rotary positional embeddings
'''
class GeesllamaSimulatorBuilder(GeesTransformerLayerSimulatorBuilder):
    def __init__(self):
        super().__init__()
        self.TransformerLayer_methond_mapping = {
            # embedding
            "LanguageModelEmbedding": [self.build_language_embeddings],
            'VocabParallelEmbedding': [self.build_word_embeddings],# 1 
            'RotaryEmbedding': [self.build_rotary_embedding], # llama的rotary embedding #1
            
            # relative 相对顺序判断调用哪一个
            'SelfAttention': [self.build_self_attention],
            'TEDotProductAttention': [self.build_core_attention], # TEDotProductAttention 是 Transformer Engine (TE) 中注意力机制的基础实现类，它会根据配置和硬件条件自动选择使用 FlashAttention、FusedAttention 或 UnfusedDotProductAttention  
            'FusedScaleMaskSoftmax': [self.build_scale_mask_softmax],#1 
            'Dropout': [self.build_embedding_dropout, self.build_attention_dropout],
            'TELayerNormColumnParallelLinear': [self.build_linear_qkv, self.build_linear_fc1],#1
            'TERowParallelLinear': [self.build_linear_proj, self.build_linear_fc2],#2 
            'IdentityOp': [self.build_input_layernorm, self.build_q_layernorm, self.build_k_layernorm, self.build_pre_cross_attn_layernorm, self.build_cross_attention, self.build_pre_mlp_layernorm],
            'IdentityFuncOp': [self.build_self_attn_bda],
            'MLP': [self.build_mlp],
            'ColumnParallelLinear': [self.build_output_layer],

            # layer_spec之后的layer_nomal, llama中替换为RMSNorm
            'RMSNorm':[self.build_layer_norm]
        }

    def build_language_embeddings(self, args):
        self.TransformerLayer.language_model_embedding = LLamaLanguageModelEmbedding()
        return self

    # 改进点1, 旋转位置编码
    def build_rotary_embedding(self, args):
        self.TransformerLayer.language_model_embedding.rotary_embedding = RotaryEmbeddingSimulator(args)
        return self
    
    # llama 改进点2.1，不是对输出做Norm，而是对输入，这边不需要操作，直接IdentityOp
    def build_input_layernorm(self, args):
        self.TransformerLayer.input_layernorm = IdentityOpSimulator(args)
        return self
    
    def build_self_attention(self, args):
        self.TransformerLayer.self_attention = LlamaSelfAttentionSimulator()
        return self

    def build_core_attention(self, args: Dict[str, object]):
        self.TransformerLayer.self_attention.core_attention = LlamaCoreAttentionSimulator(args)
        return self
    
    def build_pre_mlp_layernorm(self, args: Dict[str, object]):
        self.TransformerLayer.pre_mlp_layernorm = IdentityOpSimulator(args)
        return self
    
    # 改动点3 激活函数使用SwiGLU
    def build_mlp(self, args):
        self.TransformerLayer.mlp = MLPLayer()
        self.TransformerLayer.mlp.mlp = LlamaMLPSimulator(args)
        return self
    
    # 改进点2 归一化RMSNorm
    def build_layer_norm(self, args):
        self.TransformerLayer.layer_norm = RMSNormSimulator(args)
        return self
    