from dataclasses import dataclass, field
from abc import ABC, abstractmethod
from typing import Dict, List
from .output_layer_simulator import OutputLayer, OutputLayerSimulator
from .mlp_layer_simulator import MLPLayer, MLPSimulator, MLPLayerAbstractBuilder
from .transfomer_layer_simulator import TransformerLayer
from .self_attention_simulator import SelfAttentionAbstructBuilder, SelfAttentionSimulator
from .base_simulator import(
    LNImplSimulator,
    DropoutSimulator,
    DotProductAttentionSimulator,
    FusedScaleMaskSoftmaxSimulator,
    TELayerNormSimulator,
    IdentityFuncOpSimulator,
    IdentityOpSimulator,
    ColumnParallelLinearSimulator,
    RowParallelLinearSimulator
)
from .language_model_embedding import (
    LanguageModelEmbeddingAbstractBuilder,
    LanguageModelEmbedding, 
    VocabParallelEmbeddingSimulator,
    EmbeddingSimulator,
    EmbeddingDropoutSimulator
)

# 定义接口
class TransformerLayerSimulatorBuilder(ABC):
    def __init__(self):
        super().__init__()

    @abstractmethod
    def build_input_layernorm(self, args: Dict[str, object]): ...
    @abstractmethod
    def build_self_attention(self, args: Dict[str, object]): ...
    @abstractmethod
    def build_self_attn_bda(self, args: Dict[str, object]): ...
    @abstractmethod
    def build_pre_mlp_layernorm(self, args: Dict[str, object]): ...
    @abstractmethod
    def build_layer_norm(self, args: Dict[str, object]): ...
    @abstractmethod
    def build_output_layer(self, args: Dict[str, object]): ...


# 具体建造者
class GeesTransformerLayerSimulatorBuilder(TransformerLayerSimulatorBuilder, LanguageModelEmbeddingAbstractBuilder, SelfAttentionAbstructBuilder, MLPLayerAbstractBuilder):
    def __init__(self):
        super().__init__()
        self.TransformerLayer = TransformerLayer()
        self.relative_methond_call_order: Dict[str, int] = {}
        self.TransformerLayer_methond_mapping = {}

    def build_language_embeddings(self, args: Dict[str, object]):
        self.TransformerLayer.language_model_embedding = LanguageModelEmbedding()
        return self

    def build_word_embeddings(self, args: Dict[str, object]):
        self.TransformerLayer.language_model_embedding.word_embeddings = VocabParallelEmbeddingSimulator(args)
        return self

    def build_tokentype_embeddings(self, args: Dict[str, object]):
        self.TransformerLayer.language_model_embedding.tokentype_embeddings = EmbeddingSimulator(args)
        return self
    
    def build_position_embeddings(self, args: Dict[str, object]):
        self.TransformerLayer.language_model_embedding.position_embeddings = EmbeddingSimulator(args)
        return self

    def build_embedding_dropout(self, args: Dict[str, object]):
        self.TransformerLayer.language_model_embedding.embedding_dropout = EmbeddingDropoutSimulator(args)
        return self
    
    def build_input_layernorm(self, args: Dict[str, object]):
        self.TransformerLayer.input_layernorm = LNImplSimulator(args)
        return self
    
    def build_self_attention(self, args: Dict[str, object]):
        self.TransformerLayer.self_attention = SelfAttentionSimulator()
        return self

    def build_linear_qkv(self, args: Dict[str, object]):
        self.TransformerLayer.self_attention.linear_qkv = ColumnParallelLinearSimulator(args)
        return self
    
    def build_cross_attention(self, args):
        self.TransformerLayer.self_attention.cross_attention = IdentityOpSimulator(args)
        return self
    
    def build_pre_cross_attn_layernorm(self, args):
        self.TransformerLayer.self_attention.pre_cross_attn_layernorm = IdentityOpSimulator(args)
        return self
    
    def build_core_attention(self, args: Dict[str, object]):
        self.TransformerLayer.self_attention.core_attention = DotProductAttentionSimulator(args)
        return self
    
    def build_scale_mask_softmax(self, args: Dict[str, object]):
        self.TransformerLayer.self_attention.scale_mask_softmax = FusedScaleMaskSoftmaxSimulator(args)
        return self
    
    def build_attention_dropout(self, args: Dict[str, object]):
        self.TransformerLayer.self_attention.attention_dropout = DropoutSimulator(args)
        return self
    
    def build_linear_proj(self, args: Dict[str, object]):
        self.TransformerLayer.self_attention.linear_proj = RowParallelLinearSimulator(args)
        return self
    
    def build_q_layernorm(self, args: Dict[str, object]):
        self.TransformerLayer.self_attention.q_layernorm = IdentityOpSimulator(args)
        return self
    
    def build_k_layernorm(self, args: Dict[str, object]):
        self.TransformerLayer.self_attention.k_layernorm = IdentityOpSimulator(args)
        return self
    
    def build_mlp(self, args: Dict[str, object]):
        # TODO 把mlp层解耦出去，不管是普通mlp还是专家层，都可以用 mlp.calculate_workload
        self.TransformerLayer.mlp = MLPLayer()
        self.TransformerLayer.mlp.mlp = MLPSimulator(args)
        return self
    
    def build_linear_fc1(self, args: Dict[str, object]):
        self.TransformerLayer.mlp.linear_fc1 = ColumnParallelLinearSimulator(args)
        return self
    
    def build_linear_fc2(self, args: Dict[str, object]):
        self.TransformerLayer.mlp.linear_fc2 = RowParallelLinearSimulator(args)
        return self
    
    def build_pre_mlp_layernorm(self, args: Dict[str, object]):
        self.TransformerLayer.pre_mlp_layernorm = LNImplSimulator(args)
        return self
    
    def build_self_attn_bda(self, args: Dict[str, object]):
        self.TransformerLayer.self_attn_bda = IdentityFuncOpSimulator(args)
        return self
    
    def build_layer_norm(self, args: Dict[str, object]):
        self.TransformerLayer.layer_norm = TELayerNormSimulator(args)
        return self
    
    def build_output_layer(self, args):
        if self.TransformerLayer.output_layer is None:
            self.TransformerLayer.output_layer = OutputLayer()
        active_memory_allocated = args['memory_allocated'] - self.TransformerLayer.layer_norm.mp['memory_allocated']
        args['active_memory_allocated'] = active_memory_allocated
        
        # 输出层和embedding共用权重
        args['weight_shape'] = self.TransformerLayer.language_model_embedding.word_embeddings.mp['weight_shape']
        self.TransformerLayer.output_layer.output_layer = OutputLayerSimulator(args)
        return self
    
    def build_rotary_embedding(self, args):
        pass
        
    def invork_method_mapping(self, model_name, name, args: Dict[str, object]):
        method_list = self.TransformerLayer_methond_mapping.get(name, [])
        if len(method_list) != 0:
            idx = self.relative_methond_call_order.get(name, 0)
            assert idx < len(method_list), f'model defined error, {name}, idx = {idx},length = {len(method_list)}, method_list={ method_list}'

            method = method_list[idx]
            self.relative_methond_call_order[name] = idx+1
            return method(args)
        else:
            return self
    
    def build(self) -> TransformerLayer:
        return self.TransformerLayer
