from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import Dict, List
from ..common.base_simulator import RMSNormSimulator
from .llama_simulator_builder import GeesllamaSimulatorBuilder
from ..moe.moe_layer import MOELayerAbstractBuilder, MOELayer, MoeTransformerLayer
from ..moe.moe_simulator import TopKRouterSimulator, TokenDispatcherSimulator, ExpertsSimulator, MoeMlpSimulator, ExpertColumnParallelLinearSimulator, ExpertRowParallelLinearSimulator

class GeesMixtralSimulatorBuilder(GeesllamaSimulatorBuilder, MOELayerAbstractBuilder):
    def __init__(self):
        super().__init__()
        self.TransformerLayer = MoeTransformerLayer()
        self.TransformerLayer_methond_mapping.update({
            'MoELayer': [self.build_mlp],
            'RMSNorm':[self.build_pre_mlp_layernorm, self.build_layer_norm],
            'ColumnParallelLinear': [self.build_linear_fc1, self.build_output_layer],
            'RowParallelLinear': [self.build_linear_fc2],
            'TELayerNormColumnParallelLinear': [self.build_linear_qkv],#1
            'TERowParallelLinear': [self.build_linear_proj],#2 
            'IdentityOp': [self.build_input_layernorm, self.build_q_layernorm, self.build_k_layernorm, self.build_pre_cross_attn_layernorm, self.build_cross_attention],
            'TopKRouter': [self.build_topk_router],
            'MLP': [self.build_experts],
            'SequentialMLP': [self.build_token_dispatcher],
        })
        
    def build_mlp(self, args: Dict[str, object]):
        self.TransformerLayer.mlp = MOELayer()
        return self
    
    def build_pre_mlp_layernorm(self, args):
        max_memory_allocated = args['max_memory_allocated'] - self.TransformerLayer.language_model_embedding.embedding_dropout.mp['max_memory_allocated']
        args['max_memory_allocated_processed'] = max_memory_allocated
        self.TransformerLayer.pre_mlp_layernorm = RMSNormSimulator(args)
        return self
    
    def build_topk_router(self, args):
        self.TransformerLayer.mlp.topk_router = TopKRouterSimulator(args)
        return self

    def build_token_dispatcher(self, args):
        # new_sizes = [(tuple(s[:1]) + (1,) + tuple(s[1:])) for s in args['input_shape']]
        # args['input_shape'] = new_sizes
        self.TransformerLayer.mlp.token_dispatcher = TokenDispatcherSimulator(args)
        return self
    
    def build_experts(self, args):
        # new_sizes = [(tuple(s[:1]) + (1,) + tuple(s[1:])) for s in args['input_shape']]
        # args['input_shape'] = new_sizes
        self.TransformerLayer.mlp.experts = ExpertsSimulator()
        self.TransformerLayer.mlp.experts.mlp = MoeMlpSimulator(args)
        return self

    # 专家内部线性层
    def build_linear_fc1(self, args):
        # new_sizes = [(tuple(s[:1]) + (1,) + tuple(s[1:])) for s in args['input_shape']]
        # args['input_shape'] = new_sizes
        max_memory_allocated = args['memory_allocated'] - self.TransformerLayer.mlp.token_dispatcher.mp['memory_allocated']
        args['max_memory_allocated_processed'] = max_memory_allocated
        self.TransformerLayer.mlp.experts.linear_fc1 = ExpertColumnParallelLinearSimulator(args)
        return self
    
    # 专家内部
    def build_linear_fc2(self, args):
        # new_sizes = [(tuple(s[:1]) + (1,) + tuple(s[1:])) for s in args['input_shape']]
        # args['input_shape'] = new_sizes
        self.TransformerLayer.mlp.experts.linear_fc2 = ExpertRowParallelLinearSimulator(args)
        return self