"""
    主体思想,对于入参Layer_info字典dfs便利树
    建造者模式构建tansformers层,每个类有自己的模拟开销方法

    伪代码如下
    if name == 'LanguageModelEmbedding':
        LanguageModelEmbeddingBuilder = LanguageModelEmbeddingBuild()
    if name == 'VocabParallelEmbedding'
        LanguageModelEmbeddingBuilder.build(VocabParallelEmbedding(Dict{}))
"""
from dataclasses import dataclass, field
from typing import Tuple
from .common import CostCalculator
import numpy as np
from .output_layer_simulator import OutputLayer
from .language_model_embedding import LanguageModelEmbedding
from .mlp_layer_simulator import MLPLayer
from .self_attention_simulator import SelfAttentionSimulator
from .base_simulator import (
    LNImplSimulator, 
    IdentityFuncOpSimulator,
    TELayerNormSimulator
)
from ..llama.simulator import  *

 
@dataclass
class TransformerLayer(CostCalculator):
    language_model_embedding: LanguageModelEmbedding = None

    input_layernorm: LNImplSimulator = None

    self_attention: SelfAttentionSimulator = None

    mlp: MLPLayer = None # mlp层或者moe层
    self_attn_bda: IdentityFuncOpSimulator = None
    pre_mlp_layernorm: LNImplSimulator = None

    # layer_nomal
    layer_norm: TELayerNormSimulator = None

    # output_layer
    output_layer: OutputLayer = None

    # 统计计算量
    def statistic_single_layer_calculate_workload(self, tp_size: int):
        # 分别表示 加、减、乘、除、指数 次数
        workload_total = [0, 0, 0, 0, 0]
        for attr_name, attr_value in self.__dict__.items():
            if attr_name in ['language_model_embedding', 'output_layer']:
                continue
            if attr_value is not None:
                # mlp 与 moe 层
                if attr_name in ['mlp', 'self_attention']:
                    workload_total = [x + y for x,y in zip(workload_total, attr_value.statistic_single_layer_calculate_workload(tp_size))]
                else:
                    workload_total = [x+y for x, y in zip(workload_total, attr_value.get_calculate_workload(tp_size))]
        
        return workload_total
    
    def statistic_single_layer_memory_workload(self, tp_size: int) -> Tuple[float, float]:
        block_init_memory = self.mlp.mlp.mp['init_memory_allocated']
        block_memory = self.mlp.mlp.mp['max_memory_allocated']
        # 32位 * 4
        theoretical_embedding_memory = self.language_model_embedding.word_embeddings.weight_hidden_size * self.language_model_embedding.word_embeddings.hidden_size * 4

        layer_memory = (block_init_memory - theoretical_embedding_memory) / tp_size / 1024 / 1024 /1024
        active_memory = (block_memory - block_init_memory) / tp_size / 1024 / 1024 /1024
        
        return layer_memory, active_memory
    
    def statistic_single_layer_communication_workload(self, tp_size):
        communication_allocated = 0
        for attr_name, attr_value in self.__dict__.items():
            if attr_name in ['language_model_embedding', 'output_layer']:
                continue
            if attr_value is not None:
                if attr_name in ['mlp', 'self_attention']:
                    communication_allocated += attr_value.statistic_single_layer_communication_workload(tp_size)
                else:
                    communication_allocated += attr_value.get_communication_workload(tp_size)
        
        return communication_allocated
    
    def statistic_single_layer_dp_communication_workload(self, tp_size, dp_size):
        communication_allocated = 0
        for attr_name, attr_value in self.__dict__.items():
            if attr_name in ['language_model_embedding', 'output_layer']:
                continue
            if attr_value is not None:
                if attr_name in ['mlp', 'self_attention']:
                    communication_allocated += attr_value.statistic_single_layer_dp_communication_workload(tp_size, dp_size)
                else:
                    communication_allocated += attr_value.get_dp_communication_workload(tp_size, dp_size)
        
        return communication_allocated
    
    def statistic_single_layer_pp_communication_workload(self, tp_size):
        return self.mlp.mlp.batch_size * self.mlp.mlp.seq_len * self.mlp.mlp.hidden_size * 4 / 1024 / 1024 / 1024 / tp_size