from dataclasses import dataclass, field
from typing import Dict, List
from ..common.common import Simulator, CostCalculator
from ..common.output_layer_simulator import OutputLayer
from .bert_binary_head_simulator import BertBinaryHeadSimulator, PoolerSimulator

# Todo： bert_lm_head 这部分计算Linear和gelu的计算量， layer_norm 复用前面的LNImpl
@dataclass
class BertLMHeadSimulator(Simulator):
    def __init__(self, mp: Dict[str, object]):
        super().__init__()
        self.mp = mp
        self.hidden_size = mp['input_shape'][0][2]
        self.batch_size = mp['input_shape'][0][1]
        self.seq_len = mp['input_shape'][0][0]
        self.h_out = mp['output_shape'][2]
    
    # gulu的计算量 bert 头linear没有切分，维度和输入X一致
    def get_calculate_workload(self, tp_size: int) -> List[int]:
        calculator = [0,0,0,0,0]
        # glue 部分 GELU(x)≈0.5x⋅(1+tanh(π2​​(x+0.044715x3)))
        calculator[0] = self.hidden_size * self.seq_len * self.batch_size * 2  # 加法计算次数
        calculator[2] = self.hidden_size * self.seq_len * self.batch_size * 4  # 乘法计算次数 标量乘法：(0.044715 * x^3) 
        calculator[4] = self.hidden_size * self.seq_len * self.batch_size * 5  # x^3 (一次) 和tanh计算次数 (约等于4次)

        # Linear 部分 总计算量=b×s×(h×hout​+hout​)
        calculator[0] += self.batch_size * self.seq_len   # 加法计算次数
        calculator[2] += self.batch_size * self.seq_len * self.hidden_size * self.h_out  # 乘法计算次数
        
        return calculator
    
    def get_communication_workload(self, tp_size) -> int:
        return 0

@dataclass
class BertOutputLayer(OutputLayer):
    bert_lm_head: BertLMHeadSimulator = None
    binary_head: BertBinaryHeadSimulator = None
    pooler: PoolerSimulator = None