from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import Dict, List
from ..common.transfomer_layer_simulator import LNImplSimulator
from ..common.base_simulator_builder import TransformerLayerSimulatorBuilder, GeesTransformerLayerSimulatorBuilder
from ..bert.bert_output_layer_simulator import BertOutputLayer, BertLMHeadSimulator
from ..bert.bert_binary_head_simulator  import BertBinaryHeadSimulator, PoolerSimulator

@dataclass
class BertSimulatorBuilder(TransformerLayerSimulatorBuilder):
    def __init__(self):
        super().__init__()

    
    @abstractmethod
    def build_bert_lm_head(self, args: Dict[str, object]): ...
    @abstractmethod
    def build_binary_head(self, args: Dict[str, object]): ...
    @abstractmethod
    def build_pooler(self, args: Dict[str, object]): ...
    @abstractmethod
    def build_bert_output_layer(self, args: Dict[str, object]): ...
    @abstractmethod
    def build_bert_lm_head_fused_layer_norm(self, args: Dict[str, object]): ...

class GeesBertSimulatorBuilder(GeesTransformerLayerSimulatorBuilder):
    def __init__(self):
        super().__init__()
        self.TransformerLayer_methond_mapping = {
            # embedding
            "LanguageModelEmbedding": [self.build_language_embeddings],
            'VocabParallelEmbedding': [self.build_word_embeddings],
            'Embedding': [self.build_position_embeddings, self.build_tokentype_embeddings],

            # relative 相对顺序判断调用哪一个
            'FusedLayerNorm': [self.build_input_layernorm, self.build_pre_mlp_layernorm, self.build_bert_lm_head_fused_layer_norm],
            'SelfAttention': [self.build_self_attention],
            'DotProductAttention': [self.build_core_attention],
            'FusedScaleMaskSoftmax': [self.build_scale_mask_softmax],
            'Dropout': [self.build_embedding_dropout, self.build_attention_dropout],
            'ColumnParallelLinear': [self.build_linear_qkv, self.build_linear_fc1, self.build_output_layer],
            'RowParallelLinear': [self.build_linear_proj, self.build_linear_fc2],
            'IdentityOp': [self.build_q_layernorm, self.build_k_layernorm, self.build_pre_cross_attn_layernorm, self.build_cross_attention],
            'IdentityFuncOp': [self.build_self_attn_bda],
            'MLP': [self.build_mlp],

            # layer_spec之后的layer_nomal
            'LayerNorm':[self.build_layer_norm],

            'BertLMHead': [self.build_bert_output_layer],
            'Linear': [self.build_bert_lm_head, self.build_binary_head, self.build_pooler],
            #'Pooler': [],  # bert的pooler, 对第一个token做处理，实际上是一个Linear，这边放到Linear中处理
        }

    def build_bert_output_layer(self, args: Dict[str, object]):
        self.TransformerLayer.output_layer = BertOutputLayer()
        return self

    #TODO
    def build_bert_lm_head(self, args: Dict[str, object]):
        print(f'bert_lm_head args is {args}')
        self.TransformerLayer.output_layer.bert_lm_head = BertLMHeadSimulator(args)
        return self
    
    # 复用 LNImpl
    def build_bert_lm_head_fused_layer_norm(self, args: Dict[str, object]):
        # bert_lm_head_fused_layer_norm 这个是bert的fused layer norm
        self.TransformerLayer.output_layer.bert_lm_head.fused_layer_norm = LNImplSimulator(args)
        return self
    
    def build_binary_head(self, args):
        self.TransformerLayer.output_layer.binary_head = BertBinaryHeadSimulator(args)
        return self

    def build_pooler(self, args: Dict[str, object]):
        max_memory_allocated = args['max_memory_allocated'] - self.TransformerLayer.output_layer.output_layer.mp['max_memory_allocated']
        args['max_memory_allocated_processed'] = max_memory_allocated
        return self

    
