from dataclasses import dataclass, field
from typing import Dict, List, Tuple
from abc import ABC, abstractmethod
from ..common.common import MoeCostCalculator
from ..common.mlp_layer_simulator import MLPLayer
from ..common.transfomer_layer_simulator import TransformerLayer
from .moe_simulator import TopKRouterSimulator, TokenDispatcherSimulator, ExpertsSimulator


@dataclass
class MOELayer(MLPLayer, MoeCostCalculator):
    topk_router: TopKRouterSimulator = None
    experts: ExpertsSimulator = None
    token_dispatcher: TokenDispatcherSimulator = None

    def statistic_single_layer_calculate_workload_with_ep(self, tp_size, ep_size, experts_num):
        workload_total = [0, 0, 0, 0, 0]
        for attr_name, attr_value in self.__dict__.items():
            if attr_value is not None:
                if attr_name in ['experts']:
                    workload_total = [x+y for x, y in zip(workload_total, attr_value.statistic_single_layer_calculate_workload_with_ep(tp_size, ep_size, experts_num))]
                else:
                    workload_total = [x+y for x, y in zip(workload_total, attr_value.get_calculate_workload(experts_num // ep_size))]
        
        return workload_total
    
    def statistic_single_layer_tp_communication_workload_with_ep(self, tp_size, ep_size, experts_num):
        communication_total = 0
        for attr_name, attr_value in self.__dict__.items():
            if attr_value is not None:
                if attr_name in ['experts']:
                    communication_total += attr_value.statistic_single_layer_tp_communication_workload_with_ep(tp_size, ep_size, experts_num)
                else:
                    communication_total += attr_value.get_communication_workload(tp_size)

        return communication_total
    
    def statistic_single_layer_dp_communication_workload_with_ep(self, tp_size, dp_size, ep_size, experts_num):
        dp_communication_total = 0
        for attr_name, attr_value in self.__dict__.items():
            if attr_value is not None:
                if attr_name in ['experts']:
                    dp_communication_total += attr_value.statistic_single_layer_dp_communication_workload_with_ep(tp_size, dp_size, ep_size, experts_num)
                else:
                    dp_communication_total += attr_value.get_dp_communication_workload_with_ep(tp_size, dp_size, ep_size, experts_num)

        return dp_communication_total



class MOELayerAbstractBuilder(ABC):
    @abstractmethod
    def build_topk_router(self, args: Dict[str, object]): ...

    @abstractmethod
    def build_experts(self, args: Dict[str, object]): ...

    @abstractmethod
    def build_token_dispatcher(self, args: Dict[str, object]): ...


@dataclass
class MoeTransformerLayer(TransformerLayer, MoeCostCalculator):

    def statistic_single_layer_calculate_workload_with_ep(self, tp_size, ep_size, experts_num):
         # 分别表示 加、减、乘、除、指数 次数
        workload_total = [0, 0, 0, 0, 0]
        for attr_name, attr_value in self.__dict__.items():
            if attr_name in ['language_model_embedding', 'output_layer']:
                continue
            if attr_value is not None:
                # mlp 与 moe 层
                if attr_name in ['self_attention']:
                    workload_total = [x + y for x,y in zip(workload_total, attr_value.statistic_single_layer_calculate_workload(tp_size))]
                elif attr_name in ['mlp']:
                    workload_total = [x + y for x,y in zip(workload_total, attr_value.statistic_single_layer_calculate_workload_with_ep(tp_size, ep_size, experts_num))]
                else:
                    workload_total = [x+y for x, y in zip(workload_total, attr_value.get_calculate_workload(tp_size))]
        
        return workload_total
    
    def statistic_single_layer_memory_workload_with_ep(self, tp_size, ep_size, experts_num) -> Tuple[float, float]:
        block_init_memory = self.mlp.experts.mlp.mp['init_memory_allocated']
        block_memory = self.mlp.experts.mlp.mp['max_memory_allocated']
        # 32位 * 4
        theoretical_embedding_memory = self.language_model_embedding.word_embeddings.weight_hidden_size * self.language_model_embedding.word_embeddings.hidden_size * 4

        expert_theorical_memory = (self.mlp.experts.linear_fc1.mp['weight_shape'][0] * self.mlp.experts.linear_fc1.mp['weight_shape'][1] \
                                + self.mlp.experts.linear_fc2.mp['weight_shape'][0] * self.mlp.experts.linear_fc2.mp['weight_shape'][1] \
                                + self.mlp.experts.linear_fc1.mp['weight_shape'][0]) * 4
        # 加上多个专家的权重显存
        layer_memory = (block_init_memory - theoretical_embedding_memory + (experts_num // ep_size - 1) * expert_theorical_memory) / tp_size / 1024 / 1024 / 1024
        active_memory = (block_memory - block_init_memory) / tp_size / 1024 / 1024 / 1024
        
        return layer_memory, active_memory
    
    def statistic_single_layer_tp_communication_workload_with_ep(self, tp_size, ep_size, experts_num):

        communication_allocated = 0
        for attr_name, attr_value in self.__dict__.items():
            if attr_name in ['language_model_embedding', 'output_layer']:
                continue
            if attr_value is not None:
                if attr_name in ['self_attention']:
                    communication_allocated += attr_value.statistic_single_layer_communication_workload(tp_size)
                elif attr_name in ['mlp']:
                    communication_allocated += attr_value.statistic_single_layer_tp_communication_workload_with_ep(tp_size, ep_size, experts_num)
                else:
                    communication_allocated += attr_value.get_communication_workload(tp_size)
        
        return communication_allocated
    
    def statistic_single_layer_dp_communication_workload_with_ep(self, tp_size, dp_size, ep_size, experts_num):
        communication_allocated = 0
        for attr_name, attr_value in self.__dict__.items():
            if attr_name in ['language_model_embedding', 'output_layer']:
                continue
            if attr_value is not None:
                if attr_name in ['self_attention']:
                    communication_allocated += attr_value.statistic_single_layer_dp_communication_workload(tp_size, dp_size)
                elif attr_name in ['mlp']:
                    communication_allocated += attr_value.statistic_single_layer_dp_communication_workload_with_ep(tp_size, dp_size, ep_size, experts_num)
                else:
                    communication_allocated += attr_value.get_dp_communication_workload(tp_size, dp_size)
        
        return communication_allocated
    
    def statistic_single_layer_pp_communication_workload(self, tp_size):
        return self.mlp.experts.mlp.batch_size * self.mlp.experts.mlp.hidden_size * 4 / 1024 / 1024 / 1024 / tp_size