import numpy as np
from .common.base_simulator_builder import GeesTransformerLayerSimulatorBuilder
from .simulator_builder.gpt_simulator_builder import GeesGptSimulatorBuilder
from .simulator_builder.bert_simulator_builder import GeesBertSimulatorBuilder
from .simulator_builder.llama_simulator_builder import GeesllamaSimulatorBuilder
from .simulator_builder.mixtral_simulator_builder import GeesMixtralSimulatorBuilder
from typing import Dict, List, Tuple
from itertools import islice

dcu_flops = 4.3e13
dcu_bandwidth = 24.3

def get_simulator_builder(model_name: str) -> GeesTransformerLayerSimulatorBuilder:
    if model_name == 'bert':
        return GeesBertSimulatorBuilder()
    elif model_name == 'gpt':
        return GeesGptSimulatorBuilder()
    elif model_name == 'llama':
        return GeesllamaSimulatorBuilder()
    elif model_name == 'mixtral':
        return GeesMixtralSimulatorBuilder()
    else:
        raise ValueError(f"Unsupported model name: {model_name}")

class AutoParallelService:
    
    @staticmethod
    def mock(model_name: str, pp_layers_per_partition: List[int], tp_splits: List[int], dp_size: int, layer_info: Dict[str, Dict[str, object]], ep_size: int = 1, experts_num: int = 0):
        # np 定义一个数组[], 存每张卡的计算量和通讯量
        # 用类似moe的loss看看是否负载均衡
        # 返回 计算负载均衡因子， 通讯因子
        #print(f'model_name is {model_name}')
        transformer_builder = get_simulator_builder(model_name)
        for layer_name, megatron_node in layer_info.items():
            class_name = layer_name.partition('_')[0]
            transformer_builder = transformer_builder.invork_method_mapping(model_name, class_name, megatron_node)
        transfomer_layer_info = transformer_builder.build()
        
        calculate_workload_list = []
        memory_workload_list = []
        communication_workload_list = []

        # 计算不算ebedding 和output的负载均衡
        calculate_workload_list_balance = []
        memory_workload_list_balance = []
        communication_workload_list_balance = []
        # 遍历每张卡
        for pp_rank in range(len(pp_layers_per_partition)):
            for tp_rank in range(tp_splits[pp_rank]):
                for dp_rank in range(dp_size):
                    if model_name in ['mixtral']:
                        card_calculate_workload = transfomer_layer_info.statistic_single_layer_calculate_workload_with_ep(tp_splits[pp_rank], ep_size, experts_num)
                        
                        layer_memory, active_memory = transfomer_layer_info.statistic_single_layer_memory_workload_with_ep(tp_splits[pp_rank], ep_size, experts_num)
                        communication_workload = transfomer_layer_info.statistic_single_layer_tp_communication_workload_with_ep(tp_splits[pp_rank], ep_size, experts_num)
                        communication_workload += transfomer_layer_info.statistic_single_layer_dp_communication_workload_with_ep(tp_splits[pp_rank], dp_size, ep_size, experts_num)
                    else:
                        # 用于记录这一张卡需要多少计算量
                        card_calculate_workload = transfomer_layer_info.statistic_single_layer_calculate_workload(tp_splits[pp_rank])
                        layer_memory, active_memory = transfomer_layer_info.statistic_single_layer_memory_workload(tp_splits[pp_rank])

                        communication_workload = transfomer_layer_info.statistic_single_layer_communication_workload(tp_splits[pp_rank])
                        communication_workload += transfomer_layer_info.statistic_single_layer_dp_communication_workload(tp_splits[pp_rank], dp_size)

                    # 有多少层
                    
                    card_calculate_workload = [x * pp_layers_per_partition[pp_rank] for x in card_calculate_workload]
                    card_memory_workload = pp_layers_per_partition[pp_rank] * layer_memory * 6 + active_memory * 8 # 权重 、梯度、激活值
                    communication_workload *= pp_layers_per_partition[pp_rank]

                    calculate_workload_list_balance.append(card_calculate_workload)
                    memory_workload_list_balance.append(card_memory_workload)
                    communication_workload_list_balance.append(communication_workload)

                    communication_workload += transfomer_layer_info.statistic_single_layer_pp_communication_workload(tp_splits[pp_rank])

                    # 如果是第一个stage，包含embedding
                    if pp_rank == 0: 
                        card_calculate_workload = [x + y for x, y in zip(card_calculate_workload, transfomer_layer_info.language_model_embedding.statistic_single_layer_calculate_workload(tp_splits[pp_rank]))]
                        
                        theoretical_embedding_memory, embedding_active_memory = transfomer_layer_info.language_model_embedding.statistic_single_layer_memory_workload(tp_splits[pp_rank])
                        card_memory_workload += theoretical_embedding_memory * 2
                        card_memory_workload += embedding_active_memory

                        communication_workload += transfomer_layer_info.language_model_embedding.statistic_single_layer_communication_workload(tp_splits[pp_rank])

                    # 如果是最后一个stage，包含output_layer
                    if pp_rank == len(pp_layers_per_partition)-1:
                        card_calculate_workload = [x + y for x, y in zip(card_calculate_workload, transfomer_layer_info.output_layer.statistic_single_layer_calculate_workload(tp_splits[pp_rank]))]

                        _, output_active_memory= transfomer_layer_info.output_layer.statistic_single_layer_memory_workload(tp_splits[pp_rank])
                        card_memory_workload += output_active_memory

                        communication_workload += transfomer_layer_info.output_layer.statistic_single_layer_communication_workload(tp_splits[pp_rank])

                        communication_workload -= transfomer_layer_info.statistic_single_layer_pp_communication_workload(tp_splits[pp_rank])

                    calculate_workload_list.append(card_calculate_workload)
                    memory_workload_list.append(card_memory_workload)
                    communication_workload_list.append(communication_workload)

        # 如果有一张卡超出显存，返回最大值
        if not AutoParallelService.check_memory_limit(memory_workload_list):
            return float('inf'), float('inf')

         #算出每张卡加减乘除指数的计算总代价  
        total_calculate_workload_list =[]
        for item in calculate_workload_list:
            sum = item[0] + item[1] + item[2] * 1.5 + item[3] * 1.5 + item[4] * 20
            total_calculate_workload_list.append(sum)

        total_calculate_workload_list_balance =[]
        for item in calculate_workload_list_balance:
            sum = item[0] + item[1] + item[2] * 1.5 + item[3] * 1.5 + item[4] * 20
            total_calculate_workload_list_balance.append(sum)

        total_compute_load = np.var(total_calculate_workload_list_balance)
        
        #计算内存负载情况
        total_memory_variance = np.var(memory_workload_list_balance)

        total_communication_variance = np.var(communication_workload_list_balance)

        balance_score = np.average([total_compute_load, total_memory_variance, total_communication_variance], weights=[0.3, 0.3, 0.4])

        memory_mean =  np.mean(memory_workload_list)

        calculate_mean = np.mean(total_calculate_workload_list)

        communication_mean = np.mean(communication_workload_list)

        # 单位为ms
        iterater_time_cost = calculate_mean / dcu_flops * 1000 + communication_mean / dcu_bandwidth * 1000

        time_cost_total = 2 * (len(pp_layers_per_partition) - 1) * iterater_time_cost + iterater_time_cost * 1000

        # 计算通讯比，越大越好
        cal_comm_rate =  calculate_mean / communication_mean

        if pp_layers_per_partition == [16] and tp_splits == [1] and dp_size == 8 and ep_size == 4:
            print(f'pp_layers_per_partition: {pp_layers_per_partition}, tp_splits: {tp_splits}, dp_size: {dp_size}, ep_size: {ep_size}, layer_memory: {layer_memory}, active_memory: {active_memory}, time_cost_total: {time_cost_total}, calculate_mean: {calculate_mean}, communication_mean: {communication_mean}, cal_comm_rate: {cal_comm_rate}, memory_mean: {memory_mean}, balance_score: {balance_score}, memory_workload_list is {memory_workload_list}, calculate_workload_list is {calculate_workload_list}, communication_workload_list is {communication_workload_list}')
        elif pp_layers_per_partition == [16] and tp_splits == [1] and dp_size == 8 and ep_size == 8:
            print(f'pp_layers_per_partition: {pp_layers_per_partition}, tp_splits: {tp_splits}, dp_size: {dp_size}, ep_size: {ep_size}, layer_memory: {layer_memory}, active_memory: {active_memory}, time_cost_total: {time_cost_total}, calculate_mean: {calculate_mean}, communication_mean: {communication_mean}, cal_comm_rate: {cal_comm_rate}, memory_mean: {memory_mean}, balance_score: {balance_score}, memory_workload_list is {memory_workload_list}, calculate_workload_list is {calculate_workload_list}, communication_workload_list is {communication_workload_list}')
        elif pp_layers_per_partition == [2, 2, 2, 2, 2, 2, 2, 2] and tp_splits == [1, 1, 1, 1, 1, 1, 1, 1] and dp_size == 1 and ep_size == 1:
            print(f'pp_layers_per_partition: {pp_layers_per_partition}, tp_splits: {tp_splits}, dp_size: {dp_size}, ep_size: {ep_size}, experts_num: {experts_num}, time_cost_total: {time_cost_total}, calculate_mean: {calculate_mean}, communication_mean: {communication_mean}, cal_comm_rate: {cal_comm_rate}, memory_mean: {memory_mean}, balance_score: {balance_score}, memory_workload_list is {memory_workload_list}, calculate_workload_list is {calculate_workload_list}, communication_workload_list is {communication_workload_list}')
        
        return time_cost_total, balance_score

    '''
    @Author: jfy
    @Description: 获取单层权重显存占用，激活值显存占用
    @LastEditors: jfy
    @FilePath: /GeeSibling/python/geesibling/adapters/pytorch/megatronModelToGeesiblingGraph.py
    '''
    @staticmethod
    def get_layer_momory(layer_info: Dict[str, Dict[str, object]]) -> Tuple[float, float]:
        model_info = next(iter(layer_info.values()))
        world_embedding_info = next(islice(layer_info.values(), 2, None))
        theoretical_embedding_memory = world_embedding_info['weight_shape'][0] * world_embedding_info['weight_shape'][1] * 4

        layer_memory = (model_info['init_memory_allocated'] - theoretical_embedding_memory) / 1024 / 1024 /1024
        active_memory = (model_info['max_memory_allocated'] - model_info['init_memory_allocated']) / 1024 / 1024 /1024
        
        return layer_memory, active_memory
    
    @staticmethod
    def get_layer_memory_with_ep(layer_info: Dict[str, Dict[str, object]], ep_size: int, experts_num: int)->Tuple[float, float]:
        model_info = next(iter(layer_info.values()))
        world_embedding_info = next(islice(layer_info.values(), 2, None))
        theoretical_embedding_memory = world_embedding_info['weight_shape'][0] * world_embedding_info['weight_shape'][1] * 4

        layer_info_list = list(layer_info.values())
        experts_linear_fc1_info = layer_info_list[-4]
        experts_linear_fc2_info = layer_info_list[-3]

        expert_theorical_memory = (experts_linear_fc1_info['weight_shape'][0] * experts_linear_fc1_info['weight_shape'][1] \
                                + experts_linear_fc2_info['weight_shape'][0] * experts_linear_fc2_info['weight_shape'][1] \
                                + experts_linear_fc1_info['weight_shape'][0]) * 4
        
        layer_memory = (model_info['init_memory_allocated'] - theoretical_embedding_memory + (experts_num // ep_size -1) * expert_theorical_memory) / 1024 / 1024 /1024
        active_memory = (model_info['max_memory_allocated'] - model_info['init_memory_allocated']) / 1024 / 1024 /1024
        
        return layer_memory, active_memory
    
    @staticmethod
    def check_memory_limit(card_more_memory: List[float]) -> bool:
        for card_memory in card_more_memory:
            if card_memory > 64:
                return False
            
        return True
