from dataclasses import dataclass, field
from typing import Dict, List
from ..common.common import Simulator, MoeCostCalculator
from ..common.base_simulator import ColumnParallelLinearSimulator, RowParallelLinearSimulator
from ..common.mlp_layer_simulator import MLPLayer
from ..llama.simulator import LlamaMLPSimulator

@dataclass
class TopKRouterSimulator(Simulator):
    def __init__(self, mp: Dict[str, object]):
        super().__init__()
        self.mp = mp
        self.hidden_size = mp['input_shape'][0][2]
        self.batch_size = mp['input_shape'][0][1]
        self.seq_len = mp['input_shape'][0][0]
        self.wieght_hidden_size = mp['weight_shape'][1]

    def get_calculate_workload(self, experts_num: int) -> List[int]:
        calculator = [0,0,0,0,0]
        token_num = self.batch_size * self.seq_len

        # gating部分的计算量, 这部分没有tp_size的关系
        calculator[0] += token_num * (self.hidden_size - 1) * experts_num
        calculator[2] += token_num * experts_num * self.hidden_size

        # logit_softmax的计算量
        calculator[0] += token_num * (self.hidden_size - 1)
        calculator[4] += token_num * self.hidden_size
        calculator[3] += token_num

        # route 部分， top_k 排序遍历，忽略不计
        return calculator
    
    def get_dp_communication_workload(self, tp_size, dp_size):
        return 0

    def get_communication_workload(self, tp_size):
        return 0
    
    def get_dp_communication_workload_with_ep(self, tp_size, dp_size, ep_size, experts_num):
        if dp_size == 1:
            return 0
        
        return 2 * experts_num * self.wieght_hidden_size * 4 / 1024 / 1024 / 1024
    
class TokenDispatcherSimulator(Simulator):
    def __init__(self, mp: Dict[str, object]):
        super().__init__()
        self.mp = mp
        self.hidden_size = mp['input_shape'][0][1]
        self.token_nums = mp['input_shape'][0][0]

    
    def get_calculate_workload(self, experts_num: int) -> List[int]:
        calculator = [0,0,0,0,0]
        # token 重排，反重排， token数要*tp_size
        # 重排部分
        # 1. 计算num_local_tokens_per_expert
        # calculator[0] += (self.seq_len - 1) * experts_num 
        # 2. 计算input_splits, 约等于experts_num
        calculator[0] += experts_num

        return calculator
    
    def get_dp_communication_workload(self, tp_size, dp_size):
        pass

    # 两次all-to-all, 一次 all-gather, 一次 reduce-scatter
    # 这边通讯比非moe大，计算通信比就会变小，感觉会导致不希望开ep
    def get_communication_workload(self, tp_size):
        return 10 * self.token_nums * self.hidden_size * 4 / 1024 / 1024 / 1024
    
    def get_dp_communication_workload_with_ep(self, tp_size, dp_size, ep_size, experts_num):
        return 0
    
class MoeMlpSimulator(LlamaMLPSimulator):
    def __init__(self, mp: Dict[str, object]):
        self.mp = mp
        self.hidden_size = mp['input_shape'][0][1]
        self.batch_size = mp['input_shape'][0][0]

    def get_calculate_workload_with_ep(self, tp_size, ep_size, experts_num):
        calculator = [0,0,0,0,0]
        per_partition_output_size = self.hidden_size // tp_size

        # 每张卡需要处理的token数 = 总token数 / ep_size * tp_size(专家也被切了）
        token_num = self.batch_size // ep_size * tp_size

        # SwiGLU 中间状态维度
        media = per_partition_output_size * 8 // 3
        # 1. 双线性变换（up 和 gate）
        calculator[0] += token_num * per_partition_output_size * media * (experts_num // ep_size)
        calculator[2] += token_num * per_partition_output_size * media * (experts_num // ep_size)
        # 2. 逐元素相乘（intermediate）
        calculator[2] += token_num * media * (experts_num // ep_size)

        # 线性投影（down_proj）
        calculator[0] += token_num * per_partition_output_size * media * (experts_num // ep_size)
        calculator[2] += token_num * per_partition_output_size * media * (experts_num // ep_size)

        return calculator
    def get_communication_workload_with_ep(self, tp_size, ep_size, experts_num):
        return 0
    
    # dp = dp_size / ep_size
    def get_dp_communication_workload_with_ep(self, tp_size, dp_size, ep_size, experts_num):
        
        return 0
    
    
class ExpertColumnParallelLinearSimulator(ColumnParallelLinearSimulator):
    def __init__(self, mp: Dict[str, object]):
        self.mp = mp
        self.hidden_size = mp['input_shape'][0][1]
        self.batch_size = mp['input_shape'][0][0]
        self.weight_hidden_size = mp['weight_shape'][0]

    def get_calculate_workload_with_ep(self, tp_size, ep_size, experts_num):
        calculator = [0,0,0,0,0]
        per_partition_output_size = self.weight_hidden_size // tp_size

        token_num = self.batch_size // ep_size * tp_size
        calculator[0] = token_num * ( self.hidden_size - 1 ) * per_partition_output_size * (experts_num // ep_size)
        calculator[2] = token_num * self.hidden_size * per_partition_output_size * (experts_num // ep_size)
        return calculator
    
    # TODO
    def get_communication_workload_with_ep(self, tp_size, ep_size, experts_num):
        return 0
    
    # dp = dp_size / ep_size
    def get_dp_communication_workload_with_ep(self, tp_size, dp_size, ep_size, experts_num):
        if dp_size // ep_size == 1:
            return 0
        # 每个专家 权重和梯度 同步
        return 2 * self.weight_hidden_size * self.hidden_size * experts_num // ep_size * 4 / 1024 / 1024 / 1024 / tp_size 
    
    
class ExpertRowParallelLinearSimulator(RowParallelLinearSimulator):
    def __init__(self, mp: Dict[str, object]):
        self.mp = mp
        self.hidden_size = mp['input_shape'][0][1]
        self.batch_size = mp['input_shape'][0][0]
        self.query_projection_size = mp['weight_shape'][0]

    def get_calculate_workload_with_ep(self, tp_size, ep_size, experts_num):
        calculator = [0,0,0,0,0]
        per_partition_output_size = self.query_projection_size // tp_size

        token_num = self.batch_size // ep_size * tp_size
        calculator[0] = token_num * ( self.hidden_size // tp_size - 1 ) * per_partition_output_size * (experts_num // ep_size)
        calculator[2] = token_num * self.hidden_size // tp_size * per_partition_output_size * (experts_num // ep_size)
        return calculator
    
    # TODO
    def get_communication_workload_with_ep(self, tp_size, ep_size, experts_num):
        # 每个专家并没有直接all-reduce, 在despatcher中 reduce-scatter
        return 0
    
    # dp = dp_size / ep_size
    def get_dp_communication_workload_with_ep(self, tp_size, dp_size, ep_size, experts_num):
        if dp_size // ep_size == 1:
            return 0
        return 2 * self.query_projection_size * self.hidden_size * experts_num // ep_size * 4 / 1024 / 1024 / 1024 / tp_size

 
class ExpertsSimulator(MLPLayer, MoeCostCalculator):
    mlp: MoeMlpSimulator = None
    linear_fc1: ExpertColumnParallelLinearSimulator = None
    linear_fc2: ExpertRowParallelLinearSimulator = None

    def statistic_single_layer_calculate_workload_with_ep(self, tp_size, ep_size, experts_num):
        workload_total = [0, 0, 0, 0, 0]
        for attr_name, attr_value in self.__dict__.items():
            if attr_value is not None:
                workload_total = [x+y for x, y in zip(workload_total, attr_value.get_calculate_workload_with_ep(tp_size, ep_size, experts_num))]
        
        return workload_total
    
    def statistic_single_layer_tp_communication_workload_with_ep(self, tp_size, ep_size, experts_num):
        return super().statistic_single_layer_tp_communication_workload_with_ep(tp_size, ep_size, experts_num)
    
    def statistic_single_layer_dp_communication_workload_with_ep(self, tp_size, dp_size, ep_size, experts_num):
        return super().statistic_single_layer_dp_communication_workload_with_ep(tp_size, dp_size, ep_size, experts_num)