from dataclasses import dataclass, field
from typing import Dict, List
from ..common.common import Simulator, CostCalculator
from ..common.base_simulator import DotProductAttentionSimulator
from ..common.mlp_layer_simulator import MLPSimulator
    
class LlamaCoreAttentionSimulator(DotProductAttentionSimulator):
    def __init__(self, mp: Dict[str, object]):
        super().__init__(mp)
        self.mp = mp
        self.q = mp['input_shape'][0]
        self.k = mp['input_shape'][1]
        self.v = mp['input_shape'][2]
        self.mask = mp['input_shape'][3]
        # self.num_heads = 8

    # 除了 QKV 计算，还有旋转位置编码的计算量
    def get_calculate_workload(self, tp_size: int) -> List[int]:
        calculator = [0, 0, 0, 0, 0]
        total_head_num = self.q[2]
        head_num_per_partition = total_head_num // tp_size  # 每张卡分到几个头
        head_dim = self.q[3] // tp_size # 每个头维度
        seq_len = self.q[0]
        batch_size = self.q[1]

        # QK^T/sqrt(dk) = P
        calculator[0] += batch_size * seq_len * head_num_per_partition * batch_size * seq_len * head_num_per_partition * (head_dim-1) * 4 / 1024 / 1024 / 1024
        calculator[2] += batch_size * seq_len * head_num_per_partition * batch_size * seq_len * head_num_per_partition * head_dim * 4 / 1024 / 1024 / 1024
        calculator[3] += batch_size * seq_len * head_num_per_partition * batch_size * seq_len * head_num_per_partition * 4 / 1024 / 1024 / 1024
        calculator[4] += batch_size * seq_len * head_num_per_partition * batch_size * seq_len * head_num_per_partition * 4 / 1024 / 1024 / 1024

        # PV
        calculator[0] += batch_size * seq_len * head_num_per_partition * batch_size * seq_len * head_num_per_partition * (head_dim-1) * 4 / 1024 / 1024 / 1024
        calculator[2] += batch_size * seq_len * head_num_per_partition * batch_size * seq_len * head_num_per_partition * head_dim * 4 / 1024 / 1024 / 1024

        # 旋转位置编码计算量
        # 初始化 cos 和 sin 向量 计算量可以忽略不计
        # 变换QK \(q\_embed=(q * cos)+(rotate\_half(q)*sin)\)
        calculator[0] += 2 * batch_size * seq_len * head_num_per_partition * head_dim * 4 / 1024 / 1024 / 1024
        calculator[2] += 4 * batch_size * seq_len * head_num_per_partition * head_dim * 4 / 1024 / 1024 / 1024

        return calculator

    def get_memory_workload(self, tp_size: int) -> int:
        return 0
    
    def get_communication_workload(self, tp_size):
        return 0

class LlamaMLPSimulator(MLPSimulator):
    def __init__(self, mp: Dict[str, object]):
        super().__init__(mp)

    def get_calculate_workload(self, tp_size):
        calculator = [0,0,0,0,0]
        per_partition_output_size = self.hidden_size // tp_size
        # SwiGLU 中间状态维度
        media = per_partition_output_size * 8 // 3
        # 1. 双线性变换（up 和 gate）
        calculator[0] += self.batch_size * self.seq_len * per_partition_output_size * media * 4 / 1024 / 1024 / 1024
        calculator[2] += self.batch_size * self.seq_len * per_partition_output_size * media * 4 / 1024 / 1024 / 1024
        # 2. 逐元素相乘（intermediate）
        calculator[2] += self.batch_size * self.seq_len * media * 4 / 1024 / 1024 / 1024

        # 线性投影（down_proj）
        calculator[0] += self.batch_size * self.seq_len * per_partition_output_size * media * 4 / 1024 / 1024 / 1024
        calculator[2] += self.batch_size * self.seq_len * per_partition_output_size * media * 4 / 1024 / 1024 / 1024

        return calculator