from dataclasses import dataclass, field
from typing import Dict, List
from .common import Simulator

 
class LNImplSimulator(Simulator):
    def __init__(self, mp: Dict[str, object]):
        super().__init__()
        self.mp = mp
        self.hidden_size = mp['input_shape'][0][2]
        self.batch_size = mp['input_shape'][0][1]
        self.seq_len = mp['input_shape'][0][0]
    
    # TODO tp_size还没用
    def get_calculate_workload(self, tp_size: int) -> List[float]:
        calculator = [0,0,0,0,0]
        # 计算均值
        calculator[0] += self.batch_size * self.seq_len * (self.hidden_size - 1)
        calculator[3] += self.batch_size * self.seq_len * self.hidden_size

        # 计算方差
        calculator[0] += self.batch_size * self.seq_len * (self.hidden_size - 1)
        calculator[1] += self.batch_size * self.seq_len * self.hidden_size
        calculator[2] += self.batch_size * self.seq_len * self.hidden_size
        calculator[3] += self.batch_size * self.seq_len

        # 归一化
        calculator[1] += self.batch_size * self.seq_len * self.hidden_size
        calculator[3] += self.batch_size * self.seq_len * self.hidden_size
        calculator[4] += self.batch_size * self.seq_len
        return calculator
    
    def get_communication_workload(self, tp_size) -> int:
        return 0
    
    def get_dp_communication_workload(self, tp_size, dp_size):
        if dp_size == 1:
            return 0
        # 1. bias & weight
        # 2. weight & grad
        # 3. *4 4字节
        # 4. 不用除tp_size，输入维度一样
        return 2 * 2 * self.hidden_size * 4 / 1024 / 1024 / 1024


class ColumnParallelLinearSimulator(Simulator):
    def __init__(self, mp: Dict[str, object]):
        super().__init__()
        self.mp = mp
        self.hidden_size = mp['input_shape'][0][2]
        self.seq_len = mp['input_shape'][0][1]
        self.batch_size = mp['input_shape'][0][0]
        self.weight_hidden_size = mp['weight_shape'][0]
    
    def get_calculate_workload(self, tp_size: int) -> List[float]:
        calculator = [0,0,0,0,0]
        per_partition_output_size = self.weight_hidden_size // tp_size
        calculator[0] = self.batch_size * self.seq_len * ( self.hidden_size - 1 ) * per_partition_output_size
        calculator[2] = self.batch_size * self.seq_len * self.hidden_size * per_partition_output_size
        return calculator


    def get_communication_workload(self, tp_size) -> int:
        return 0
    
    def get_dp_communication_workload(self, tp_size, dp_size):
        if dp_size == 1:
            return 0
        return 2 * self.weight_hidden_size * self.hidden_size * 4 / tp_size /1024 / 1024 / 1024

class DotProductAttentionSimulator(Simulator):
    def __init__(self, mp: Dict[str, object],num_heads = 1):
        super().__init__()
        self.mp = mp
        self.q = mp['input_shape'][0]
        self.k = mp['input_shape'][1]
        self.v = mp['input_shape'][2]
        self.mask = mp['input_shape'][3]
        self.num_heads = num_heads
    

    def get_calculate_workload(self, tp_size: int) -> List[float]:
        # 两部分 Q * K 的计算量 和 soft_max后的T = (QK/sqrt(D)), T * K 的计算量
        # soft_max 和 droupout 计算量有另外的模拟器统计

        calculator = [0, 0, 0, 0, 0]
        total_head_num = self.q[2]
        head_num_per_partition = total_head_num // tp_size  # 每张卡分到几个头
        head_dim = self.q[3] // tp_size # 每个头维度
        seq_len = self.q[0]
        batch_size = self.q[1]

        # QK^T/sqrt(dk) = P
        calculator[0] += batch_size * seq_len * head_num_per_partition * batch_size * seq_len * head_num_per_partition * (head_dim-1)
        calculator[2] += batch_size * seq_len * head_num_per_partition * batch_size * seq_len * head_num_per_partition * head_dim
        calculator[3] += batch_size * seq_len * head_num_per_partition * batch_size * seq_len * head_num_per_partition
        calculator[4] += batch_size * seq_len * head_num_per_partition * batch_size * seq_len * head_num_per_partition

        # PV
        calculator[0] += batch_size * seq_len * head_num_per_partition * batch_size * seq_len * head_num_per_partition * (head_dim-1)
        calculator[2] += batch_size * seq_len * head_num_per_partition * batch_size * seq_len * head_num_per_partition * head_dim

        return calculator
        


    def get_communication_workload(self, tp_size) -> int:
        return 0
    
    def get_dp_communication_workload(self, tp_size, dp_size):
        return 0
    

class RowParallelLinearSimulator(Simulator):
    def __init__(self, mp: Dict[str, object]):
        super().__init__()
        self.mp = mp
        self.hidden_size = mp['input_shape'][0][2]
        self.seq_len = mp['input_shape'][0][1]
        self.batch_size = mp['input_shape'][0][0]
        self.query_projection_size = mp['weight_shape'][0]
    
    def get_calculate_workload(self, tp_size: int) -> List[float]:
        calculator = [0,0,0,0,0]
        per_partition_output_size = self.query_projection_size // tp_size
        # 行切前已经列切过了，维度还要 // tp_size
        calculator[0] = self.batch_size * self.seq_len * ( self.hidden_size / tp_size - 1) * per_partition_output_size
        calculator[2] = self.batch_size * self.seq_len * self.hidden_size / tp_size * per_partition_output_size
        return calculator


    # selfAttetnion 和 mlp 的tp all-reduce 都在这边
    def get_communication_workload(self, tp_size) -> int:
        if tp_size == 1:
            return 0
        # 前向反向共4 fi， 4字节
        return self.batch_size * self.seq_len * self.hidden_size * 4 * 4 / 1024 / 1024 / 1024
    
    def get_dp_communication_workload(self, tp_size, dp_size):
        if dp_size == 1:
            return 0
        return 2 * self.query_projection_size * self.hidden_size * 4 / 1024 / 1024 / 1024 / tp_size 
 
class IdentityOpSimulator(Simulator):
    def __init__(self, mp: Dict[str, object]):
        super().__init__()
        self.mp = mp

    def get_calculate_workload(self, tp_size: int) -> List[float]:
        return [0,0,0,0,0]
    
    def get_communication_workload(self, tp_size) -> int:
        return 0
    
    def get_dp_communication_workload(self, tp_size, dp_size):
        return 0

class FusedScaleMaskSoftmaxSimulator(Simulator):
    def __init__(self, mp: Dict[str, object]):
        super().__init__()
        self.mp = mp
        self.hidden_size = mp['input_shape'][0][2]
        self.batch_size = mp['input_shape'][0][1]
        self.seq_len = mp['input_shape'][0][0]
        # TODO
        self.num_heads = 8 

    def get_calculate_workload(self, tp_size: int) -> List[float]:
        #缩放（scale）+ 掩码（mask）+ softmax 相融合
        per_partition_heads = self.num_heads // tp_size
        calculator = [0,0,0,0,0]
        #缩放
        calculator[3] += self.batch_size * per_partition_heads *  self.seq_len *  self.seq_len
        # +掩码
        calculator[0] += self.batch_size * per_partition_heads *  self.seq_len *  self.seq_len
        #softmax:指数+求和+归一
        calculator[4] += self.batch_size * per_partition_heads *  self.seq_len *  self.seq_len
        calculator[0] += self.batch_size * per_partition_heads *  (self.seq_len - 1) *  self.seq_len
        calculator[3] += self.batch_size * per_partition_heads *  self.seq_len *  self.seq_len
        return calculator
    
    def get_communication_workload(self, tp_size) -> int:
        return 0
    
    def get_dp_communication_workload(self, tp_size, dp_size):
        return 0

class IdentityFuncOpSimulator(Simulator):
    def __init__(self, mp: Dict[str, object]):
        super().__init__()
        self.mp = mp

    def get_calculate_workload(self, tp_size: int) -> List[float]:
        return [0,0,0,0,0]

    def get_communication_workload(self, tp_size) -> int:
        return 0
    
    def get_dp_communication_workload(self, tp_size, dp_size):
        return 0

class DropoutSimulator(Simulator):
    def __init__(self, mp: Dict[str, object]):
        super().__init__()
        self.mp = mp

    def get_calculate_workload(self, tp_size: int) -> List[float]:
        return [0,0,0,0,0]
    
    def get_communication_workload(self, tp_size) -> int:
        return 0
    
    def get_dp_communication_workload(self, tp_size, dp_size):
        return 0
    
class TELayerNormSimulator(Simulator):
    def __init__(self, mp: Dict[str, object]):
        super().__init__()
        self.mp = mp
        self.weight_hidden_size = mp['weight_shape'][0]

    # TODO transformer_engine 中的LaynerNorm 计算量统计
    def get_calculate_workload(self, tp_size) -> List[float]:
        return [0,0,0,0,0]
    
    def get_communication_workload(self, tp_size) -> int:
        return 0
    
    #  *2参数和梯度, *4 4字节,不用除tp_size因为输出和输入的维度一样
    def get_dp_communication_workload(self, tp_size, dp_size):
        if dp_size == 1:
            return 0
        return self.weight_hidden_size * 2 * 4 / 1024 / 1024 / 1024
    
'''
RMSNorm = sqrt(1/d * sigma(x^2))
res = xi/RMSNorm(x)
'''
class RMSNormSimulator(Simulator):
    def __init__(self, mp: Dict[str, object]):
        super().__init__()
        self.mp = mp
        self.hidden_size = mp['input_shape'][0][2]
        self.batch_size = mp['input_shape'][0][1]
        self.seq_len = mp['input_shape'][0][0]

    # 约为LayerNorm 计算量的一半
    def get_calculate_workload(self, tp_size) -> List[float]:
        partition_hidden_size = self.hidden_size // tp_size
        calculator = [0,0,0,0,0]

        calculator[0] = self.batch_size * self.seq_len * (partition_hidden_size -1)  # 每一个x求RMSNorm，每次partition_hidden_size -1次加法
        calculator[2] = self.batch_size * self.seq_len * partition_hidden_size  # 每一个x的^2
        calculator[3] = self.batch_size * self.seq_len # 每个x / RMSNorm(x), 总共有batch_size * seq_len个x
        calculator[4] = self.batch_size * self.seq_len # 每个 RMSNorm(x) 需要开根号，总共batch_size * seq_len个x
        return calculator
    
    def get_communication_workload(self, tp_size) -> int:
        return 0
    
    def get_dp_communication_workload(self, tp_size, dp_size):
        return 0