import os
import re
import shutil
import json
import pandas as pd
import numpy as np
from enum import Enum
from typing import Dict, List
from collections import defaultdict
import copy

from .operators_template import Template
from ..utils.parallel_config import ParallelConfig
from ..utils import logger, get_system_config, get_model_config, TaskType
from ..utils.profiler import Launch
from ..utils.profiler_args import OperatorProfileArgs
from ..utils.utils import find_csv, get_cache_path


OPERATORS_POOL = [
    'MatMul', 'SwiGlu', 'TopKV2', 'Softmax', 'OneHot', 'Mul',
    'NonZero', 'GatherV2', 'Sort', 'LinearIndex', 'Slice', 'GroupedMatmul'
]

OPERATORS_GRAD_POOL = [
    'MatMul', 'SwiGluGrad', 'TopKV2', 'SoftmaxGrad', 'OneHot',
    'Mul', 'Slice', 'NonZero', 'GatherV2', 'Sort', 'LinearIndex', 'GroupedMatmul'
]

ATTN_OPERATORS_POOL = [
    'MatMul', 'Cos', 'Mul', 'Cast', 'Sin', 'Neg',
    'ConcatD', 'Transpose', 'FlashAttentionScore', 'AsStrided'
]

ATTN_OPERATORS_GRAD_POOL = [
    'Cast', 'MatMul', 'Transpose', 'NonZero', 'ConcatD',
    'FlashAttentionScoreGrad', 'Mul', 'Neg', 'AsStrided'
]

FLAG_OPERATORS = [
    'SoftplusV2', 'SoftShrink', 'HardShrink'
]

FLAG_OPERATORS_GRAD = [
    'SoftShrinkGrad', 'SoftplusV2Grad', 'HardShrinkGrad', 'HardShrink'
]

COMMUNICATION_POOL = [
    'hcom_allGather_', 'hcom_reduceScatter_', 'hcom_alltoall_', 'hcom_alltoallv_', 'hcom_allReduce_'
]

COMMUNICATION_GRAD_POOL = [
    'hcom_reduceScatter_', 'hcom_allGather_', 'hcom_alltoall_', 'hcom_alltoallv_', 'hcom_allReduce_'
]

ATTN_COMMUNICATION_POOL = [
    'hcom_allGather_', 'hcom_alltoall_', 'hcom_send_', 'hcom_receive_', 'hcom_reduceScatter_'
]

ATTN_COMMUNICATION_GRAD_POOL = [
    'hcom_allGather_', 'hcom_send_', 'hcom_receive_', 'hcom_alltoall_', 'hcom_reduceScatter_'
]


PATTERNS_POOL = {
    'TP_EP': ['ep', 'tp'], 
    'TP': ['tp'], 
    'EP': ['ep'], 
    'MBS': ['mbs'],
    'TP_MBS': ['tp', 'mbs'],
    'EP_MBS': ['ep', 'mbs'], 
    'TP_EP_MBS': ['ep', 'tp', 'mbs'],
    'CUP': ['cup'],
    'TP_CUP': ['tp', 'cup'],
    'CUP_EP': ['cup', 'ep'],
    'CUP_MBS': ['cup', 'mbs'],
    'TP_CUP_EP': ['tp', ' cup', 'ep'],
    'TP_CUP_MBS': ['tp', 'cup', 'mbs'],
    'CUP_EP_MBS': ['cup', 'ep', 'mbs'],
    'TP_CUP_EP_MBS': ['tp', 'cup', 'ep', 'mbs']
}

ATTN_PATTERN_POOL = {
    'TP': ['tp'],
    'CP': ['cp'],
    'UP': ['up'],
    'MBS': ['mbs'],
    'TP_CP': ['tp', 'cp'],
    'TP_UP': ['tp', 'up'],
    'TP_MBS': ['tp', 'mbs'],
    'CP_UP': ['cp', 'up'],
    'CP_MBS': ['cp', 'mbs'],
    'UP_MBS': ['up', 'mbs'],
    'TP_CP_UP': ['tp', 'cp', 'up'],
    'TP_CP_MBS': ['tp', 'cp', 'mbs'],
    'TP_UP_MBS': ['tp', 'up', 'mbs'],
    'CP_UP_MBS': ['cp', 'up', 'mbs'],
    'TP_CP_UP_MBS': ['tp', 'cp', 'up', 'mbs']
}


class ProfKey(Enum):
    TYPE = 'Type'
    START_TIME = 'Start Time(us)'
    DURATION = 'Duration(us)'
    ACCELERATOR = 'Accelerator Core'
    END_TIME = 'End Time(us)'
    ASYNC = 'async'


class OverlappedAnalysis:

    comm_operators = ['hcom_allGather_', 'hcom_reduceScatter_', 'hcom_send_', 'hcom_receive_', 'hcom_alltoall_',
                      'hcom_alltoallv_', 'hcom_allReduce_']

    @classmethod
    def is_comm_operator(cls, operator_name):
        ret = False
        for op in cls.comm_operators:
            if op in operator_name:
                ret = True
        return ret

    @classmethod
    def get_operators_info(
        cls,
        data_frame, 
        start_block_operator='SoftplusV2', 
        end_block_operator='SoftShrink', 
        ignored_operaotrs=['HardShrink']
    ):
        """
        从data_frame中解析出所有的block的算子信息
        """
        index = 0
        operators_blocks = []
        while index < len(data_frame.index):
            if data_frame.loc[index][ProfKey.TYPE.value] == start_block_operator:
                # 跳过start_block_operator
                index += 1

                operators_block = []
                while data_frame.loc[index][ProfKey.TYPE.value] != end_block_operator:
                    if not data_frame.loc[index][ProfKey.TYPE.value] in ignored_operaotrs:
                        start_time = data_frame.loc[index][ProfKey.START_TIME.value]
                        duration = data_frame.loc[index][ProfKey.DURATION.value]
                        operators_block.append({
                            ProfKey.TYPE.value: data_frame.loc[index][ProfKey.TYPE.value],
                            ProfKey.START_TIME.value: start_time,
                            ProfKey.DURATION.value: duration,
                            ProfKey.END_TIME.value: start_time + duration,
                            ProfKey.ACCELERATOR.value: data_frame.loc[index][ProfKey.ACCELERATOR.value]
                        })
                    index += 1
                operators_blocks.append(operators_block)
            # 跳过end_block_operator
            index += 1
        
        return operators_blocks

    @classmethod
    def get_async_operators(
        cls,
        data_frame, 
        start_block_operator='SoftplusV2', 
        end_block_operator='SoftShrink', 
        ignored_operaotrs=['HardShrink']
    ):
        operator_blocks = cls.get_operators_info(
            data_frame, start_block_operator, end_block_operator, ignored_operaotrs
        )

        async_operators = []
        # 遍历每一个block中的算子，多个block之间取并集
        for operators in operator_blocks:
            # 当前block的算子的异步信息
            block_async_operaotrs = []

            for i in range(len(operators)):
                # 计算通信掩盖仅发生在通信算子下发时
                if cls.is_comm_operator(operators[i][ProfKey.TYPE.value]):
                    is_async = False
                    for j in range(i + 1, len(operators)):
                        # 判断是否发生计算通信掩盖
                        if operators[j][ProfKey.START_TIME.value] < operators[i][ProfKey.END_TIME.value]:
                            is_async = True
                            break
                    
                    block_async_operaotrs.append({
                        ProfKey.TYPE.value: operators[i][ProfKey.TYPE.value],
                        ProfKey.ASYNC.value: is_async
                    })

            # 将当前block的异步信息合并
            if not (len(async_operators) == 0 or len(async_operators) == len(block_async_operaotrs)):
                raise AssertionError(f'async_operators analysis error len(async_operators)={async_operators}')

            if len(async_operators) == 0:
                async_operators.extend(block_async_operaotrs)
            else:
                async_key = ProfKey.ASYNC.value
                for i in range(len(async_operators)):
                    async_operators[i][async_key] = async_operators[i][async_key] | block_async_operaotrs[i][async_key]

        return async_operators


def find_single_seq_mlp_expert(df, seq_mlp_indexes):
    seq_mlp_start_index = seq_mlp_indexes[0]
    seq_mlp_end_index = seq_mlp_indexes[1]
    expert_end_index = seq_mlp_indexes[-1]
    seq_mlp_df = df.iloc[seq_mlp_start_index + 1:seq_mlp_end_index]
    seq_mlp_df.loc[:, 'Is Expert'] = True
    head_df = df.iloc[:seq_mlp_start_index]
    tail_df = df.iloc[expert_end_index + 1:]
    res_df = pd.concat([head_df, seq_mlp_df, tail_df], axis=0).reset_index(drop=True)

    return res_df


def find_duplicate_ring_fa(df, ring_fa_indexes):
    filtered_ops = ['hcom_send_', 'hcom_receive_', 'FlashAttentionScore', 'FlashAttentionScoreGrad']
    first_fa_start_index = ring_fa_indexes[0]
    first_fa_end_index = ring_fa_indexes[1]
    last_fa_start_index = ring_fa_indexes[-2]
    last_fa_end_index = ring_fa_indexes[-1]
    head_df = df.iloc[:first_fa_start_index]
    tail_df = df.iloc[last_fa_end_index + 1:]

    if len(ring_fa_indexes) > 2:
        # forward
        first_fa_df = df.iloc[first_fa_start_index + 1:first_fa_end_index]
        first_fa_df = first_fa_df[first_fa_df['Type'].isin(filtered_ops)]
        last_fa_df = df.iloc[last_fa_start_index + 1:last_fa_end_index]
        last_fa_df = last_fa_df[last_fa_df['Type'].isin(filtered_ops)]

        # 复用第一个FA的send & receive
        first_fa_df.loc[first_fa_df['Type'].isin([filtered_ops[0], filtered_ops[1]]), 'Is Duplicate FA'] = True
        last_fa_df.loc[:, 'Is Duplicate FA'] = True
        res_df = pd.concat([head_df, first_fa_df, last_fa_df, tail_df], axis=0).reset_index(drop=True)
    else:
        # backward
        fa_block_df = df.iloc[first_fa_start_index + 1:last_fa_end_index]
        fa_block_df = fa_block_df[fa_block_df['Type'].isin(filtered_ops)]
        send_op_indexes = fa_block_df.loc[df['Type'] == filtered_ops[0], 'Type'].index
        recv_op_indexes = fa_block_df.loc[df['Type'] == filtered_ops[1], 'Type'].index
        fa_op_indexes = fa_block_df.loc[df['Type'] == filtered_ops[3], 'Type'].index
        first_send_df = df.iloc[send_op_indexes[0]:send_op_indexes[0] + 1]
        first_recv_df = df.iloc[recv_op_indexes[0]:recv_op_indexes[0] + 1]
        second_send_df = df.iloc[send_op_indexes[1]:send_op_indexes[1] + 1]
        second_recv_df = df.iloc[recv_op_indexes[1]:recv_op_indexes[1] + 1]
        first_fa_df = df.iloc[fa_op_indexes[0]:fa_op_indexes[0] + 1]
        last_fa_df = df.iloc[fa_op_indexes[-1]:fa_op_indexes[-1] + 1]
        if send_op_indexes[0] < recv_op_indexes[0]:
            first_send_recv_df = pd.concat([first_send_df, first_recv_df], axis=0)
        else:
            first_send_recv_df = pd.concat([first_recv_df, first_send_df], axis=0)

        if send_op_indexes[1] < recv_op_indexes[1]:
            second_send_recv_df = pd.concat([second_send_df, second_recv_df], axis=0)
        else:
            second_send_recv_df = pd.concat([second_recv_df, second_send_df], axis=0)

        # 复用第一个FA
        first_fa_df.loc[:, 'Is Duplicate FA'] = True
        first_send_recv_df.loc[:, 'Is Duplicate FA'] = True
        second_send_recv_df.loc[:, 'Is Duplicate FA'] = True
        res_df = (pd.concat([head_df, first_send_recv_df, first_fa_df, second_send_recv_df, last_fa_df, tail_df], axis=0).
                  reset_index(drop=True))

    return res_df


def shape_format_preprocess(input_shapes: str, input_types: str, delimiters: str) -> (list, str):
    if not isinstance(input_shapes, str):
        return [], ''

    pattern = '|'.join(re.escape(delimiter) for delimiter in delimiters)
    shape_split = re.split(pattern, input_shapes)
    empty_indices = [i for i, _ in enumerate(shape_split[1:-1]) if shape_split[i + 1] == '']
    types_split = input_types.split(';')
    shape_list = []

    for i in sorted(empty_indices, reverse=True):
        del types_split[i]

    for shape in shape_split:
        if not shape:
            continue
        shape_list.append(shape)

    return shape_list, ';'.join(types_split)


def unify_op_type_name(df, selected_rows):
    res_df = df.copy()

    # 对Type进行重命名
    for selected_row in selected_rows:
        res_df.loc[df['Type'].str.startswith(selected_row), 'Type'] = selected_row

    return res_df


def filter_operators_submodule(df_org, selected_columns, selected_rows, start_flag, end_flag, expert_flag, is_ffn):
    
    # 获得开始标志位、结束标志位的索引, 寻找moe layer
    df = df_org.copy()
    if is_ffn:
        # 前向index第0个flag用于区分attention，index为1个区分moe
        start_index = df.loc[df['Type'] == start_flag, 'Type'].index[1]
        end_index = df.loc[df['Type'] == end_flag, 'Type'].index[1]
    else:
        # 反向index第0个flag用于区分moe，index为1区分attention
        start_index = df.loc[df['Type'] == start_flag, 'Type'].index[0]
        end_index = df.loc[df['Type'] == end_flag, 'Type'].index[0]

    df = df.iloc[start_index + 1:end_index]
    df = df[selected_columns]
    df['Is Expert'] = False
    df = df[df['Type'].str.startswith(tuple(selected_rows))].reset_index(drop=True)
    df = unify_op_type_name(df, selected_rows)

    # SequentialMLP需要检测插入算子找到单个expert
    seq_mlp_indexes = df.loc[df['Type'] == expert_flag, 'Type'].index
    if get_model_config().args.num_experts and len(seq_mlp_indexes) > 0:
        df = find_single_seq_mlp_expert(df=df, seq_mlp_indexes=seq_mlp_indexes)

    # 获得全局索引
    df = df.reset_index().rename(columns={'index': 'Global Index'})

    # 将dataframe转换为字典
    df_to_dict = df[['Type', 'Input Shapes', 'Input Data Types', 'Is Expert', 'Global Index']].to_dict(orient='records')

    for op_dict in df_to_dict:
        input_shapes = op_dict['Input Shapes']
        input_types = op_dict['Input Data Types']
        input_shapes_list, input_types_str = shape_format_preprocess(input_shapes=input_shapes,
                                                                     input_types=input_types,
                                                                     delimiters='";')
        op_dict['Input Shapes'] = input_shapes_list
        op_dict['Input Data Types'] = input_types_str

    return df_to_dict


def filter_operators_attn_submodule(df_org, selected_columns, selected_rows, start_flag, end_flag, ring_flags, is_ffn,
                                    cp_size):
    # 获得开始标志位、结束标志位的索引, 寻找attention block
    df = df_org.copy()
    num_flags = len(ring_flags)
    if is_ffn:
        start_index = df.loc[df['Type'] == start_flag, 'Type'].index[0]
        end_index = df.loc[df['Type'] == end_flag, 'Type'].index[0]
    else:
        start_index = df.loc[df['Type'] == start_flag, 'Type'].index[1]
        end_index = df.loc[df['Type'] == end_flag, 'Type'].index[1]

    df = df.iloc[start_index + 1:end_index]
    df = df[selected_columns]
    df['Is Duplicate FA'] = False
    is_first_flag = ring_flags[0] in df['Type'].values
    ring_flag = ring_flags[0] if num_flags == 1 or (num_flags > 1 and is_first_flag) else ring_flags[1]
    if num_flags > 1:
        selected_rows.remove(ring_flags[1]) if is_first_flag else selected_rows.remove(ring_flags[0])

    df = df[df['Type'].str.startswith(tuple(selected_rows))].reset_index(drop=True)
    df = unify_op_type_name(df, selected_rows)

    # 如果CP开启，获得重复的FA模块, 是否插入标志位算子等待结果(没有ring flag说明没走CP，走正常模版)
    ring_fa_indexes = df.loc[df['Type'] == ring_flag, 'Type'].index
    if len(ring_fa_indexes) >= 2:
        df = find_duplicate_ring_fa(df=df, ring_fa_indexes=ring_fa_indexes)

    df = df.reset_index().rename(columns={'index': 'Global Index'})

    # 将dataframe转换为字典
    df_to_dict = (df[['Type', 'Input Shapes', 'Input Data Types', 'Is Duplicate FA', 'Global Index']].
                  to_dict(orient='records'))

    for op_dict in df_to_dict:
        input_shapes = op_dict['Input Shapes']
        input_types = op_dict['Input Data Types']
        input_shapes_list, input_types_str = shape_format_preprocess(input_shapes=input_shapes,
                                                                     input_types=input_types,
                                                                     delimiters='";')
        op_dict['Input Shapes'] = input_shapes_list
        op_dict['Input Data Types'] = input_types_str

    return df_to_dict


def filter_operators(profile_path: str, config_str: str, is_ffn: bool, is_attn: bool) -> list:
    csv_profile_path = profile_path
    df = pd.read_csv(csv_profile_path)
    selected_columns = ['Name', 'Type', 'Input Shapes', 'Input Data Types']

    config_dict = parse_config(config_str)

    if is_attn:
        # 处理 attn 模块
        if is_ffn:
            selected_rows = ATTN_OPERATORS_POOL + FLAG_OPERATORS + ATTN_COMMUNICATION_POOL
            df_to_dict = filter_operators_attn_submodule(df_org=df,
                                                         selected_columns=selected_columns,
                                                         selected_rows=selected_rows,
                                                         start_flag=FLAG_OPERATORS[0],
                                                         end_flag=FLAG_OPERATORS[1],
                                                         ring_flags=FLAG_OPERATORS[2:],
                                                         is_ffn=is_ffn,
                                                         cp_size=config_dict['cp'])
        else:
            selected_rows = ATTN_OPERATORS_GRAD_POOL + FLAG_OPERATORS_GRAD + ATTN_COMMUNICATION_GRAD_POOL
            df_to_dict = filter_operators_attn_submodule(df_org=df,
                                                         selected_columns=selected_columns,
                                                         selected_rows=selected_rows,
                                                         start_flag=FLAG_OPERATORS_GRAD[0],
                                                         end_flag=FLAG_OPERATORS_GRAD[1],
                                                         ring_flags=FLAG_OPERATORS_GRAD[2:],
                                                         is_ffn=is_ffn,
                                                         cp_size=config_dict['cp'])
    else:
        # 处理 moe 模块
        if is_ffn:
            selected_rows = OPERATORS_POOL + FLAG_OPERATORS + COMMUNICATION_POOL
            df_to_dict = filter_operators_submodule(df_org=df,
                                                    selected_columns=selected_columns,
                                                    selected_rows=selected_rows,
                                                    start_flag=FLAG_OPERATORS[0],
                                                    end_flag=FLAG_OPERATORS[1],
                                                    expert_flag=FLAG_OPERATORS[2],
                                                    is_ffn=is_ffn)
        else:
            selected_rows = OPERATORS_GRAD_POOL + FLAG_OPERATORS_GRAD[:-1] + COMMUNICATION_GRAD_POOL
            df_to_dict = filter_operators_submodule(df_org=df,
                                                    selected_columns=selected_columns,
                                                    selected_rows=selected_rows,
                                                    start_flag=FLAG_OPERATORS_GRAD[0],
                                                    end_flag=FLAG_OPERATORS_GRAD[1],
                                                    expert_flag=FLAG_OPERATORS_GRAD[2],
                                                    is_ffn=is_ffn)

    return df_to_dict


def parse_config(config_str: str) -> dict:
    re_pattern = r'PP(\d+)_TP(\d+)_DP(\d+)_CP(\d+)_UP(\d+)_EP(\d+)_MBS(\d+)'
    parallel_dim = ['pp', 'tp', 'dp', 'cp', 'up', 'ep', 'mbs']
    match = re.search(re_pattern, config_str)
    config = list(match.groups())
    config_dict = dict()

    for i in range(len(config)):
        config_dict[parallel_dim[i]] = int(config[i])

    return config_dict


def concat_shape(shapes_org: list):
    shapes_concat = []
    shape_revert_index = []
    for shape in shapes_org:
        shape_split = shape.split(',')
        shape_revert_index.append(len(shape_split))
        for shape_dim in shape_split:
            shapes_concat.append(int(shape_dim))

    return shapes_concat, shape_revert_index


def split_shape(combine_shapes_coefficient: list, shapes_revert_index: list) -> list:
    coefficient_split = []

    for shape_coefficient, block in zip(combine_shapes_coefficient, shapes_revert_index):
        start_index = 0
        end_index = 0
        temp_list = []
        for block_index in block:
            start_index = end_index
            end_index += block_index
            temp_list.append(shape_coefficient[start_index:end_index])

        coefficient_split.append(temp_list)

    return coefficient_split


def shape_comparison(control_operator, experimental_operator, pattern_matrix):
    # 校验算子是否对齐
    if control_operator['Type'] != experimental_operator['Type'] or \
            ('Is Expert' in control_operator and control_operator['Is Expert'] != experimental_operator['Is Expert']):
        raise AssertionError('Operators are not aligned.')

    # 对每一个维度进行比较，获得比值
    control_shapes = control_operator['Input Shapes']
    experimental_shapes = experimental_operator['Input Shapes']
    control_shapes_concat, shape_revert_index = concat_shape(shapes_org=control_shapes)
    experimental_shapes_concat, _ = concat_shape(shapes_org=experimental_shapes)
    shape_ratio = []

    for control_ele, experimental_ele in zip(control_shapes_concat, experimental_shapes_concat):
        if experimental_ele / control_ele == 1:
            shape_ratio.append(0)
            continue
        ele_matrix = np.array([control_ele, experimental_ele])
        correlation_matrix = np.corrcoef(pattern_matrix, ele_matrix)
        correlation_coefficient = np.round(correlation_matrix).astype(int)[0, 1]
        shape_ratio.append(correlation_coefficient)

    return shape_ratio, shape_revert_index


def block_operators(operators_list, base_config_dict=None):
    res_list = []
    temp_dict = defaultdict(list)

    for operator_rule_dict in operators_list:
        op_type = operator_rule_dict['Type']
        temp_dict[op_type].append(operator_rule_dict)

    if base_config_dict:
        temp_dict_aligned = {key: temp_dict[key] for key in base_config_dict}
    else:
        temp_dict_aligned = temp_dict

    for _, operators_group in temp_dict_aligned.items():
        for operator in operators_group:
            res_list.append(operator)

    return res_list, temp_dict_aligned


def config_comparison(config_to_op_dict: dict, control_config_str: str, experimental_config_str: str, pattern_ele: str):
    control_operators = config_to_op_dict[control_config_str]
    experimental_operators = config_to_op_dict[experimental_config_str]
    control_config_dict = parse_config(control_config_str)
    experimental_config_dict = parse_config(experimental_config_str)

    # 感知变化的维度，找到每个维度作用的变化系数
    if pattern_ele == 'cup':
        control_pattern = control_config_dict['cp'] * control_config_dict['up']
        experimental_pattern = experimental_config_dict['cp'] * experimental_config_dict['up']
    else:
        control_pattern = control_config_dict[pattern_ele]
        experimental_pattern = experimental_config_dict[pattern_ele]
    pattern_matrix = np.array([control_pattern, experimental_pattern])
    shapes_revert_index = []
    shapes_coefficient = []

    for control_operator, experimental_operator in zip(control_operators, experimental_operators):
        shape_ratio, shape_revert_index = shape_comparison(control_operator, experimental_operator, pattern_matrix)
        shapes_revert_index.append(shape_revert_index)
        # shape_ratio_array = np.array(shape_ratio)
        # shape_pattern_ratio = shape_ratio_array * pattern_ele_ratio
        # shape_pattern_ratio = shape_pattern_ratio.tolist()
        shape_coefficient = []

        # 寻找系数
        for ratio in shape_ratio:
            if ratio == 1:
                shape_coefficient.append(pattern_ele)
            elif ratio == -1:
                shape_coefficient.append('/'.join(['1', pattern_ele]))
            else:
                shape_coefficient.append(1)

        shapes_coefficient.append(shape_coefficient)

    return shapes_coefficient, shapes_revert_index


def coefficient_combine(prev_shapes_coefficient: list, curr_shapes_coefficient: list) -> list:
    coefficient_res = []

    for prev_shape_coefficient, curr_shape_coefficient in zip(prev_shapes_coefficient, curr_shapes_coefficient):
        temp_list = []
        for prev_ele, curr_ele in zip(prev_shape_coefficient, curr_shape_coefficient):
            if prev_ele == 1 and curr_ele == 1:
                temp_list.append(curr_ele)
            elif prev_ele != 1 and curr_ele == 1:
                temp_list.append(prev_ele)
            elif prev_ele == 1 and curr_ele != 1:
                temp_list.append(curr_ele)
            else:
                temp_list.append(';'.join([prev_ele, curr_ele]))

        coefficient_res.append(temp_list)

    return coefficient_res


def revert_dynamic_shape(shape_dim: int, alpha: str, config_dict: dict):
    alpha_list = alpha.split(';')
    for ele in alpha_list:
        if ele == '1/tp':
            shape_dim *= config_dict['tp']
        elif ele == 'tp':
            shape_dim /= config_dict['tp']
        if ele == '1/cp':
            shape_dim *= config_dict['cp']
        elif ele == 'cp':
            shape_dim /= config_dict['cp']
        if ele == '1/up':
            shape_dim *= config_dict['up']
        elif ele == 'up':
            shape_dim /= config_dict['up']
        if ele == '1/ep':
            shape_dim *= config_dict['ep']
        elif ele == 'ep':
            shape_dim /= config_dict['ep']
        if ele == '1/mbs':
            shape_dim *= config_dict['mbs']
        elif ele == 'mbs':
            shape_dim /= config_dict['mbs']
        if ele == '1/cup':
            shape_dim *= config_dict['cp'] * config_dict['up']
        elif ele == 'cup':
            shape_dim /= config_dict['cp'] * config_dict['up']

    return shape_dim


def compute_base_shape(config_to_op_dict: dict, coefficient: list, control_config: str) -> list:
    control_operators = config_to_op_dict[control_config]
    config_dict = parse_config(control_config)
    base_shape = []

    for operator, shapes_coefficient in zip(control_operators, coefficient):
        shapes_org = operator['Input Shapes']
        temp_base_shape = []

        for shape, shape_coefficient in zip(shapes_org, shapes_coefficient):
            shape_list = [int(ele) for ele in shape.split(',')]
            temp_list = []

            for shape_ele, coefficient_ele in zip(shape_list, shape_coefficient):
                if isinstance(coefficient_ele, str):
                    shape_ele = revert_dynamic_shape(shape_ele, coefficient_ele, config_dict)
                temp_list.append(shape_ele)

            temp_base_shape.append(temp_list)

        base_shape.append(temp_base_shape)

    return base_shape


def analyze_operator_rule(config_to_op_dict: dict, pattern_list: list, is_attn: bool) -> list:
    all_configs = list(config_to_op_dict.keys())
    control_config = all_configs[0]
    experimental_configs = all_configs[1:]
    comparison_res = dict()
    prev_shapes_coefficient = []
    combine_shapes_coefficient = []
    shapes_revert_index = []

    for experimental_config, pattern_ele in zip(experimental_configs, pattern_list):
        shapes_coefficient, shapes_revert_index = config_comparison(config_to_op_dict, control_config,
                                                                    experimental_config, pattern_ele)
        if prev_shapes_coefficient:
            combine_shapes_coefficient = coefficient_combine(prev_shapes_coefficient, shapes_coefficient)

        prev_shapes_coefficient = shapes_coefficient if not combine_shapes_coefficient else combine_shapes_coefficient
        comparison_res[pattern_ele] = shapes_coefficient

    if not combine_shapes_coefficient:
        combine_shapes_coefficient = prev_shapes_coefficient
    coefficient = split_shape(combine_shapes_coefficient, shapes_revert_index)
    base_shape = compute_base_shape(config_to_op_dict, coefficient, control_config)

    # 获得算子执行顺序
    res_list = []

    for operator, shape, compute_coefficient in zip(config_to_op_dict[control_config], base_shape, coefficient):
        temp_dict = dict()
        temp_dict['Type'] = operator['Type']
        temp_dict['base_input_shape'] = shape
        temp_dict['compute_coefficient'] = compute_coefficient
        temp_dict['Input Data Types'] = operator['Input Data Types']
        if not is_attn:
            temp_dict['Is Expert'] = operator['Is Expert']
        else:
            temp_dict['Is Duplicate FA'] = operator['Is Duplicate FA']
        temp_dict['Global Index'] = operator['Global Index']
        res_list.append(temp_dict)

    return res_list


def get_attn_comm(filtered_op_list, attn_comm_ops, limit_send_num, limit_recv_num):
    send_count = 0
    recv_count = 0
    send_indexes = [i for i, comm_op in enumerate(attn_comm_ops) if comm_op['type'] == 'isend']
    recv_indexes = [i for i, comm_op in enumerate(attn_comm_ops) if comm_op['type'] == 'irecv']
    send_recv_pair = list(zip(send_indexes, recv_indexes))
    if send_recv_pair:
        attn_comm_ops = (attn_comm_ops[:min(send_recv_pair[0])] +
                        attn_comm_ops[min(send_recv_pair[0]):max(send_recv_pair[limit_send_num - 1]) + 1] +
                        attn_comm_ops[max(send_recv_pair[-1]) + 1:])

    for op_info in filtered_op_list:
        if not attn_comm_ops:
            break

        if ((op_info['Type'] == 'hcom_send_' and attn_comm_ops[0]['type'] == 'irecv') or
                (op_info['Type'] == 'hcom_receive_' and attn_comm_ops[0]['type'] == 'isend')):
            attn_comm_ops[0], attn_comm_ops[1] = attn_comm_ops[1], attn_comm_ops[0]

        if send_count < limit_send_num and op_info['Type'] == 'hcom_send_' and attn_comm_ops[0]['type'] == 'isend':
            comm_input_shape = [attn_comm_ops[0]['input_shapes'], [1]]
            op_info['Input Shapes'] = [','.join(map(str, sublist)) for sublist in comm_input_shape]
            send_count += 1
            attn_comm_ops.pop(0)
        elif send_count >= limit_send_num and attn_comm_ops[0]['type'] == 'isend':
            attn_comm_ops.pop(0)

        if recv_count < limit_recv_num and op_info['Type'] == 'hcom_receive_' and attn_comm_ops[0]['type'] == 'irecv':
            comm_input_shape = [attn_comm_ops[0]['input_shapes'], [1]]
            op_info['Input Shapes'] = [','.join(map(str, sublist)) for sublist in comm_input_shape]
            recv_count += 1
            attn_comm_ops.pop(0)
        elif recv_count >= limit_recv_num and attn_comm_ops[0]['type'] == 'irecv':
            attn_comm_ops.pop(0)

        if op_info['Type'] == attn_comm_ops[0]['type'] and op_info['Type'] not in ['hcom_send_', 'hcom_receive_']:
            comm_input_shape = [attn_comm_ops[0]['input_shapes'],
                                attn_comm_ops[0]['output_shapes'],
                                [attn_comm_ops[0]['world_size']]]
            op_info['Input Shapes'] = [','.join(map(str, sublist)) for sublist in comm_input_shape]
            op_info['Input Data Types'] = attn_comm_ops[0]['dtype']
            attn_comm_ops.pop(0)

    return filtered_op_list


def get_mlp_comm(filtered_op_list, mlp_comm_ops):
    comm_ops_count = 0
    num_mlp_comm_ops = len(mlp_comm_ops)
    for op_info in filtered_op_list:
        if comm_ops_count == num_mlp_comm_ops:
            break

        if op_info['Type'] == mlp_comm_ops[comm_ops_count]['type']:
            comm_input_shape = [mlp_comm_ops[comm_ops_count]['input_shapes'],
                                mlp_comm_ops[comm_ops_count]['output_shapes'],
                                [mlp_comm_ops[comm_ops_count]['world_size']]]
            op_info['Input Shapes'] = [','.join(map(str, sublist)) for sublist in comm_input_shape]
            op_info['Input Data Types'] = mlp_comm_ops[comm_ops_count]['dtype']
            comm_ops_count += 1

    return filtered_op_list


def load_comm_info(comm_info_path, filtered_op_list, is_ffn, is_attn):
    with open(comm_info_path, 'r') as file:
        data = json.load(file)

    if 'comm_operators' not in data.keys():
        return filtered_op_list

    communication_operators = data['comm_operators']
    if is_attn:
        if is_ffn and 'attention' in communication_operators.keys():
            attn_comm_ops = communication_operators['attention']
            filtered_op_list = get_attn_comm(filtered_op_list, attn_comm_ops, 1, 1)
        elif not is_ffn and 'attention_grad' in communication_operators.keys():
            attn_grad_comm_ops = communication_operators['attention_grad']
            filtered_op_list = get_attn_comm(filtered_op_list, attn_grad_comm_ops, 2, 2)
    else:
        if is_ffn and 'moe/mlp' in communication_operators.keys():
            mlp_comm_ops = communication_operators['moe/mlp']
            filtered_op_list = get_mlp_comm(filtered_op_list, mlp_comm_ops)
        elif not is_ffn and 'moe/mlp_grad' in communication_operators.keys():
            mlp_grad_comm_ops = communication_operators['moe/mlp_grad']
            filtered_op_list = get_mlp_comm(filtered_op_list, mlp_grad_comm_ops)

    return filtered_op_list


def analyze_profile_csv(config_to_op_dict: dict, profile_path: str, comm_path: str, is_ffn: bool, is_attn: bool) -> dict:
    config_str = profile_path.split('/')[-1]
    config_str = config_str.split('_KERNEL.csv')[0]

    filtered_op_list = filter_operators(profile_path, config_str, is_ffn, is_attn)

    # 读取通信算子数据
    filtered_op_list = load_comm_info(comm_path, filtered_op_list, is_ffn, is_attn)
    config_to_op_dict[config_str] = filtered_op_list

    return config_to_op_dict


def initialize_comm_status(operators_rule_list: list, async_operators: list):
    if not async_operators:
        return operators_rule_list

    starter_rule = 0
    starter_async = 0
    async_op_indexes = []

    org_op_list = [(op['Type'], op['Global Index']) for op in operators_rule_list]
    org_op_list = sorted(org_op_list, key=lambda x: x[1])

    while starter_async < len(async_operators):
        operator_rule = org_op_list[starter_rule]
        async_operator = async_operators[starter_async]

        if operator_rule[0] == async_operator['Type']:
            async_op_indexes.append((operator_rule[1], async_operator['async']))
            starter_async += 1

        starter_rule += 1

    for async_op_index in async_op_indexes:
        for operator_rule in operators_rule_list:
            if operator_rule['Global Index'] == async_op_index[0]:
                operator_rule['Async'] = async_op_index[1]

    return operators_rule_list


def get_ffn_async_operators(dataframe, flag_index):
    start_op_ffn = FLAG_OPERATORS[0]
    end_op_ffn = FLAG_OPERATORS[1]
    ignored_op_ffn = [FLAG_OPERATORS[2]]
    start_index = dataframe.loc[dataframe['Type'] == start_op_ffn, 'Type'].index[flag_index]
    end_index = dataframe.loc[dataframe['Type'] == end_op_ffn, 'Type'].index[flag_index]
    dataframe = dataframe.iloc[start_index:end_index + 1].reset_index(drop=True)

    async_operators = OverlappedAnalysis.get_async_operators(data_frame=dataframe,
                                                             start_block_operator=start_op_ffn,
                                                             end_block_operator=end_op_ffn,
                                                             ignored_operaotrs=ignored_op_ffn)
    
    # 对send, receive剪枝
    perf_async_operators = []
    seen_hcom_send = False
    seen_hcom_recv = False
    for async_operator in async_operators:
        if async_operator['Type'] not in ['hcom_send_', 'hcom_receive_']:
            perf_async_operators.append(async_operator)

        if async_operator['Type'] == 'hcom_send_' and not seen_hcom_send:
            perf_async_operators.append(async_operator)
            seen_hcom_send = True
            continue

        if async_operator['Type'] == 'hcom_receive_' and not seen_hcom_recv:
            perf_async_operators.append(async_operator)
            seen_hcom_recv = True
            continue

    return perf_async_operators


def get_bwd_async_operators(dataframe, flag_index, is_ffn, is_ring_fa):
    start_op_bwd = FLAG_OPERATORS_GRAD[0]
    end_op_bwd = FLAG_OPERATORS_GRAD[1]
    ignored_op_bwd = [FLAG_OPERATORS_GRAD[3]] if not is_ffn and is_ring_fa else [FLAG_OPERATORS_GRAD[2]]
    start_index = dataframe.loc[dataframe['Type'] == start_op_bwd, 'Type'].index[flag_index]
    end_index = dataframe.loc[dataframe['Type'] == end_op_bwd, 'Type'].index[flag_index]
    dataframe = dataframe.iloc[start_index:end_index + 1].reset_index(drop=True)

    async_operators = OverlappedAnalysis.get_async_operators(data_frame=dataframe,
                                                             start_block_operator=start_op_bwd,
                                                             end_block_operator=end_op_bwd,
                                                             ignored_operaotrs=ignored_op_bwd)
    
    # 对send, receive剪枝
    perf_async_operators = []
    num_hcom_send = 0
    num_hcom_recv = 0
    for async_operator in async_operators:
        if async_operator['Type'] not in ['hcom_send_', 'hcom_receive_']:
            perf_async_operators.append(async_operator)

        if async_operator['Type'] == 'hcom_send_' and num_hcom_send < 2:
            perf_async_operators.append(async_operator)
            num_hcom_send += 1

        if async_operator['Type'] == 'hcom_receive_' and num_hcom_recv < 2:
            perf_async_operators.append(async_operator)
            num_hcom_recv += 1

    return perf_async_operators


def analyze_operator_list(pattern: str, operator_profiles_path: list, comm_profile_paths: list, coeff_orders: list, is_ffn: bool,
                          is_attn: bool) -> list:
    config_to_op_dict = defaultdict(list)
    config_to_op_dict_ffn = defaultdict(list)

    for profile_path, comm_path in zip(operator_profiles_path, comm_profile_paths):
        config_to_op_dict = analyze_profile_csv(config_to_op_dict, profile_path, comm_path, is_ffn, is_attn)
    
    # 将每个相同算子序列放在一起
    base_config_ffn = None
    base_config_flag = True
    for config_str, operators in config_to_op_dict.items():
        ffn_operators = config_to_op_dict[config_str]

        if base_config_flag:
            block_operators_list_ffn, base_config_ffn = block_operators(ffn_operators)
            config_to_op_dict_ffn[config_str] = block_operators_list_ffn
            base_config_flag = False
        else:
            config_to_op_dict_ffn[config_str], _ = block_operators(ffn_operators, base_config_ffn)

    if not is_attn:
        # moe 部分
        if pattern in PATTERNS_POOL:
            # 生成初始模板（通信算子信息未知）
            operators_rule_list_ffn = analyze_operator_rule(config_to_op_dict_ffn, coeff_orders, is_attn)

            # 开始统计通信算子信息（前向&反向）
            base_config = operator_profiles_path[0]
            dataframe = pd.read_csv(base_config)

            if is_ffn:
                async_operators = get_ffn_async_operators(dataframe=dataframe, flag_index=1)
            else:
                async_operators = get_bwd_async_operators(dataframe=dataframe, flag_index=0, is_ffn=is_ffn, is_ring_fa=False)
            
            operators_rule_list_ffn = initialize_comm_status(operators_rule_list_ffn, async_operators)
        else:
            raise AssertionError('This pattern is not supported in MoE module.')
    else:
        # attention 部分
        if pattern in ATTN_PATTERN_POOL:
            # 生成Attention初始模版（通信算子未知）
            operators_rule_list_ffn = analyze_operator_rule(config_to_op_dict_ffn, coeff_orders, is_attn)
            is_ring_fa = True if 'cp' in coeff_orders else False

            # 开始统计通信算子信息
            base_config = operator_profiles_path[0]
            dataframe = pd.read_csv(base_config)
            if is_ffn:
                async_operators = get_ffn_async_operators(dataframe=dataframe, flag_index=0)
            else:
                async_operators = get_bwd_async_operators(dataframe=dataframe, flag_index=1, is_ffn=is_ffn, is_ring_fa=is_ring_fa)
            
            operators_rule_list_ffn = initialize_comm_status(operators_rule_list_ffn, async_operators)
        else:
            raise AssertionError('This pattern is not supported in Attention module.')

    operators_rule_list_ffn = sorted(operators_rule_list_ffn, key=lambda x: x['Global Index'])

    return operators_rule_list_ffn


def compute_output_shape(base_input_shape: list, compute_coefficient: list, input_config: ParallelConfig) -> list:
    output_shape = []

    for shape, coefficient_group in zip(base_input_shape, compute_coefficient):
        temp_shape = []

        for shape_ele, coefficients in zip(shape, coefficient_group):
            if isinstance(coefficients, str):
                coefficients = coefficients.split(';')
                for coefficient in coefficients:
                    if coefficient == '1/tp':
                        shape_ele /= input_config.tensor_model_parallel_size
                    elif coefficient == 'tp':
                        shape_ele *= input_config.tensor_model_parallel_size
                    if coefficient == '1/cp':
                        shape_ele /= input_config.ring_attention_size
                    elif coefficient == 'cp':
                        shape_ele *= input_config.ring_attention_size
                    if coefficient == '1/up':
                        shape_ele /= input_config.ulysses_size
                    elif coefficient == 'up':
                        shape_ele *= input_config.ulysses_size
                    if coefficient == '1/ep':
                        shape_ele /= input_config.expert_model_parallel_size
                    elif coefficient == 'ep':
                        shape_ele *= input_config.expert_model_parallel_size
                    if coefficient == '1/mbs':
                        shape_ele /= input_config.micro_batch_size
                    elif coefficient == 'mbs':
                        shape_ele *= input_config.micro_batch_size
                    if coefficient == '1/cup':
                        shape_ele /= (input_config.ring_attention_size * input_config.ulysses_size)
                    elif coefficient == 'cup':
                        shape_ele *= (input_config.ring_attention_size * input_config.ulysses_size)

            temp_shape.append(int(shape_ele))

        output_shape.append(temp_shape)

    return output_shape


def estimate_operators_submodule(operator_rules_list: list, input_config: ParallelConfig, num_local_experts: float) -> list:
    output_res = []
    experts_indexes = []

    for i, operator_info in enumerate(operator_rules_list):
        base_input_shape = operator_info['base_input_shape']
        compute_coefficient = operator_info['compute_coefficient']
        operator_type = operator_info['Type']
        is_expert = operator_info['Is Expert']
        input_data_types = operator_info['Input Data Types']
        temp_dict = dict()

        if is_expert:
            experts_indexes.append(i)

        output_shape = compute_output_shape(base_input_shape, compute_coefficient, input_config)
        temp_dict['Type'] = operator_type
        temp_dict['Input Shape'] = output_shape
        temp_dict['Input Data Types'] = input_data_types
        if 'Async' in operator_info.keys():
            temp_dict['Async'] = operator_info['Async']

        output_res.append(temp_dict)

    output_res = revert_comm_domains(output_res)

    # experts重复local_experts次
    if len(experts_indexes) > 0:
        expert_start = experts_indexes[0]
        expert_end = experts_indexes[-1]
        output_res = (output_res[:expert_start] + output_res[expert_start:expert_end + 1] * int(num_local_experts) +
                      output_res[expert_end + 1:])

    return output_res


def revert_comm_domains(output_shape):
    comm_ops_with_domains = ['hcom_reduceScatter_', 'hcom_allGather_', 'hcom_alltoall_', 'hcom_alltoallv_',
                             'hcom_send_', 'hcom_receive_', 'hcom_allReduce_']
    for op in output_shape:
        if op['Type'] in comm_ops_with_domains:
            op_domain = op['Input Shape'][-1]
            op['Input Shape'] = op['Input Shape'][:-1]
            op['Domains'] = op_domain[0]

    return output_shape


def estimate_operators_attn_submodule(operator_rules_list, input_config, num_fa, is_ffn):
    output_res = []
    duplicate_indexes = []
    fa_indexes = []

    for i, operator_info in enumerate(operator_rules_list):
        base_input_shape = operator_info['base_input_shape']
        compute_coefficient = operator_info['compute_coefficient']
        operator_type = operator_info['Type']
        is_duplicate_fa = operator_info['Is Duplicate FA']
        input_data_types = operator_info['Input Data Types']

        if is_ffn:
            if operator_type == 'FlashAttentionScore':
                fa_indexes.append(i)
        else:
            if operator_type == 'FlashAttentionScoreGrad':
                fa_indexes.append(i)

        if is_duplicate_fa:
            duplicate_indexes.append(i)

        output_shape = compute_output_shape(base_input_shape, compute_coefficient, input_config)
        temp_dict = dict()
        temp_dict['Type'] = operator_type
        temp_dict['Input Shape'] = output_shape
        temp_dict['Input Data Types'] = input_data_types
        if 'Async' in operator_info.keys():
            temp_dict['Async'] = operator_info['Async']

        output_res.append(temp_dict)

    output_res = revert_comm_domains(output_res)

    # FA重复, i.e.
    if num_fa > 1:
        fa_start = fa_indexes[0] if is_ffn else duplicate_indexes[-1]
        fa_end = fa_indexes[1]
        duplicate_fa = [output_res[i] for i in duplicate_indexes]
        output_res = output_res[:fa_start + 1] + duplicate_fa * int(num_fa - 2) + output_res[fa_end:]

    return output_res


def estimate_operators(input_config: ParallelConfig, operator_rules_list: list, is_ffn: bool, is_attn: bool) -> list:
    if not is_attn:
        # moe 模块
        num_experts = get_model_config().args.num_experts
        
        if not num_experts:
            output_res = estimate_operators_submodule(operator_rules_list, input_config, 0)
        else:
            # tp_extend_ep会影响ep大小
            print(f'tp-extend-ep: {get_model_config().args.moe_tp_extend_ep}')
            if get_model_config().args.moe_tp_extend_ep:
                num_local_experts = num_experts / input_config.tensor_model_parallel_size / input_config.expert_model_parallel_size
            else:
                num_local_experts = num_experts / input_config.expert_model_parallel_size
                
            output_res = estimate_operators_submodule(operator_rules_list, input_config, num_local_experts)
    else:
        # attention 模块
        cp_size = input_config.ring_attention_size
        output_res = estimate_operators_attn_submodule(operator_rules_list, input_config, cp_size, is_ffn)

    return output_res


class DeriveOperators:
    unmatched_configs_attn: List[ParallelConfig] = []
    basic_configs_attn: Dict[str, List[ParallelConfig]] = {}
    compute_orders_attn: Dict[str, list] = {}

    unmatched_configs_mlp: List[ParallelConfig] = []
    basic_configs_mlp: Dict[str, List[ParallelConfig]] = {}
    compute_orders_mlp: Dict[str, list] = {}

    # 前向模板
    attn_operator_rules = defaultdict(list)
    mlp_operator_rules = defaultdict(list)

    # 反向模板
    attn_bwd_operator_rules = defaultdict(list)
    mlp_bwd_operator_rules = defaultdict(list)

    @classmethod
    def print_templates(cls, tag):
        logger.info(f">>> {tag}")
        logger.info(f"Attention:")
        for key, value in cls.basic_configs_attn.items():
            logger.info(f"template: {key} order: {cls.compute_orders_attn[key]}")
            for config in value:
                logger.info(config)

        if len(cls.unmatched_configs_attn):
            logger.warning(f"len(unmatched_configs_attn): {len(cls.unmatched_configs_attn)}")
            for config in cls.unmatched_configs_attn:
                logger.warning(config)
        
        logger.info(f"MLP/MoE:")
        for key, value in cls.basic_configs_mlp.items():
            logger.info(f"template: {key} order: {cls.compute_orders_mlp[key]}")
            for config in value:
                logger.info(config)

        if len(cls.unmatched_configs_mlp):
            logger.warning(f"len(unmatched_configs_mlp): {len(cls.unmatched_configs_mlp)}")
            for config in cls.unmatched_configs_mlp:
                logger.warning(config)

    @classmethod
    def get_basic_configs(cls):
        configs: List[ParallelConfig] = []
        for _, value in cls.basic_configs_attn.items():
            configs.extend([cfg for cfg in value])
        for _, value in cls.basic_configs_mlp.items():
            configs.extend([cfg for cfg in value])
        return configs

    @classmethod
    def find_mode(cls, config: ParallelConfig):
        from .operators_template import Template

        mode_attn = Template.select_mode(config, is_attn=True)
        mode_mlp = Template.select_mode(config, is_attn=False)

        return mode_attn in cls.attn_operator_rules.keys() and mode_mlp in cls.mlp_operator_rules.keys()

    @classmethod
    def set_basic_configs(cls, search_space: List[ParallelConfig]):
        """根据搜索空间自适应选择需要生成的模板以及每一组模板需要的基础并行配置"""
        # attn
        cls.basic_configs_attn, cls.compute_orders_attn, cls.unmatched_configs_attn = Template.set_all_template(search_space, is_attn=True)
        # mlp or moe
        cls.basic_configs_mlp, cls.compute_orders_mlp, cls.unmatched_configs_mlp = Template.set_all_template(search_space, is_attn=False)

    @classmethod
    def filter_unused_template(cls, search_space: List[ParallelConfig]):

        """去除不需要的模板"""
        used_templates_attn = set()
        used_templates_mlp = set()
        for config in search_space:
            mode_attn = Template.select_mode(config, is_attn=True)
            used_templates_attn.add(mode_attn)
            mode_mlp = Template.select_mode(config, is_attn=False)
            used_templates_mlp.add(mode_mlp)
        
        for mode in list(cls.basic_configs_attn.keys()):
            if not mode in used_templates_attn:
                cls.basic_configs_attn.pop(mode)
                cls.compute_orders_attn.pop(mode)
        
        for mode in list(cls.basic_configs_mlp.keys()):
            if not mode in used_templates_mlp:
                cls.basic_configs_mlp.pop(mode)
                cls.compute_orders_mlp.pop(mode)

    @classmethod
    def generate_operators_templates(cls, is_attn):

        def _save_template(mode, fwd_rule, bwd_rule, is_attn):
            def _save(rule, file_path):
                with open(file_path, 'w') as file:
                    json.dump(rule, file, indent=4)

            module_name = 'attn' if is_attn else ('moe' if get_model_config().is_moe_model() else 'mlp')
            file_path = get_cache_path() + os.sep + '{}_fwd_{}.json'.format(mode, module_name)
            _save(fwd_rule, file_path)

            file_path = get_cache_path() + os.sep + '{}_bwd_{}.json'.format(mode, module_name)
            _save(bwd_rule, file_path)

        profiled_configs = []
        if is_attn:
            cur_basic_configs = copy.deepcopy(cls.basic_configs_attn)
        else:
            cur_basic_configs = copy.deepcopy(cls.basic_configs_mlp)

        for _, basic_configs in cur_basic_configs.items():
            for config in basic_configs:
                if not config.crop_config() in profiled_configs:
                    profiled_configs.append(config.crop_config())
        logger.info(f"profiler {len(profiled_configs)} times for generate_operators_templates")
        for config in profiled_configs:
            print(config)

        for mode, basic_configs in cur_basic_configs.items():
            profile_paths = []
            comm_paths = []

            # 准备模板mode需要的基础并行配置的kernel_detials.csv
            for config in basic_configs:
                cropped_config = config.crop_config()
                logger.debug(f"start run {cropped_config}")
                Launch.launch(OperatorProfileArgs(cropped_config), TaskType.OPERATOR_PROFILLING)

                config_arr = [cropped_config.pipeline_model_parallel_size,
                              cropped_config.tensor_model_parallel_size,
                              cropped_config.data_parallel_size,
                              cropped_config.ring_attention_size,
                              cropped_config.ulysses_size,
                              cropped_config.expert_model_parallel_size,
                              cropped_config.micro_batch_size]

                print(f'cropped_config.operator_profile_path: {cropped_config.operator_profile_path}')
                src_profile_path = find_csv(cropped_config.operator_profile_path)
                print(f'src_profile_path: {src_profile_path}')
                dst_profile_path = os.path.dirname(src_profile_path) + \
                    '/PP{}_TP{}_DP{}_CP{}_UP{}_EP{}_MBS{}_KERNEL.csv'.format(*config_arr)
                shutil.copy(src_profile_path, dst_profile_path)
                profile_paths.append(dst_profile_path)
                comm_paths.append(cropped_config.module_profile_path())

            if is_attn:
                attn_ffn_rule = analyze_operator_list(mode, profile_paths, comm_paths, cls.compute_orders_attn[mode], is_ffn=True, is_attn=True)
                cls.attn_operator_rules[mode] = attn_ffn_rule

                attn_bwd_rule = analyze_operator_list(mode, profile_paths, comm_paths, cls.compute_orders_attn[mode], is_ffn=False, is_attn=True)
                cls.attn_bwd_operator_rules[mode] = attn_bwd_rule

                _save_template(mode, attn_ffn_rule, attn_bwd_rule, True)
            else:
                mlp_ffn_rule = analyze_operator_list(mode, profile_paths, comm_paths, cls.compute_orders_mlp[mode], is_ffn=True, is_attn=False)
                cls.mlp_operator_rules[mode] = mlp_ffn_rule

                mlp_bwd_rule = analyze_operator_list(mode, profile_paths, comm_paths, cls.compute_orders_mlp[mode], is_ffn=False, is_attn=False)
                cls.mlp_bwd_operator_rules[mode] = mlp_bwd_rule

                _save_template(mode, mlp_ffn_rule, mlp_bwd_rule, False)

            logger.info(f"generate {mode} template success")

    @classmethod
    def infer_operators_shape(cls, config: ParallelConfig, is_attn=False, save=True) -> list:

        def _save_opertors(operators, file_path):
            if not operators:
                return
            sheet = pd.DataFrame(columns=['Type', 'Input Shape', 'Input Data Types', 'Domains', 'Async'])
            for op in operators:
                sheet.loc[len(sheet)] = [
                    op['Type'],
                    op['Input Shape'],
                    op.get('Input Data Types', 'BF16'),
                    op.get('Domains', ''),
                    op.get('Async', '')
                ]
            sheet.to_csv(file_path, index=False)

        if is_attn:
            attn_mode = Template.select_mode(config, is_attn=True)
            if not (attn_mode in cls.attn_operator_rules.keys() and attn_mode in cls.attn_bwd_operator_rules):
                return None, None
            
            operators = estimate_operators(config, cls.attn_operator_rules[attn_mode], is_ffn=True, is_attn=True)
            bwd_operators = estimate_operators(config, cls.attn_bwd_operator_rules[attn_mode], is_ffn=False, is_attn=True)
        else:
            mlp_mode = Template.select_mode(config, is_attn=False)
            if not (mlp_mode in cls.mlp_operator_rules.keys() and mlp_mode in cls.mlp_bwd_operator_rules.keys()):
                return None, None
            
            operators = estimate_operators(config, cls.mlp_operator_rules[mlp_mode], is_ffn=True, is_attn=False)
            bwd_operators = estimate_operators(config, cls.mlp_bwd_operator_rules[mlp_mode], is_ffn=False, is_attn=False)

        if save:
            module_name = 'attn' if is_attn else ('moe' if get_model_config().is_moe_model() else 'mlp')
            file_path = get_cache_path() + os.sep + '{}_{}_fwd_operators.csv'.format(config, module_name)
            _save_opertors(operators, file_path)

            file_path = get_cache_path() + os.sep + '{}_{}_bwd_operators.csv'.format(config, module_name)
            _save_opertors(bwd_operators, file_path)
        
        return operators, bwd_operators
    







