import os
import copy
import numpy as np
from galvatron.utils import (
    read_allreduce_bandwidth_config, 
    read_json_config, 
    read_p2p_bandwidth_config, 
    form_strategy, 
    print_strategies,
    strategy2config,
    array2str,
    write_json_config
)

# from geesibling.adapters.pytorch.galvatron.core import MemoryCostModel, TimeCostModel, DpOnModel        # TODO
from galvatron.core import TimeCostModel
from scipy.optimize import curve_fit

class SearchEngine():
    def __init__(self, args):
        self.args = args
        args.gpu_num = args.num_nodes * args.num_gpus_per_node
        self.layernum_arg_names = None
        self.mem_path = None
        self.time_path = None
        self.model_name = None
        self.time_config = None
        self.memory_config = None
        self.param_sizes = None
        self.act_sizes = None
        self.other_memory_pp_off = None
        self.other_memory_pp_on = None
        self.time_profiled_list = None
        self.use_pipeline_costmodel = args.use_pipeline_costmodel
        self.model_type = 'gpt'
        self.optimal_chunk_func = optimal_chunk_func_default
        self.memory_constraint = args.memory_constraint * 1024
        
    # =============== Setting Galvatron Search Engine Basic Information ===============
    def set_search_engine_info(self, path, model_layer_configs, model_name):
        self.set_model_layer_configs(model_layer_configs)
        self.set_path(path)
        self.set_model_name(model_name)
        self.memory_profiling_path()
        self.time_profiling_path()
    
    def set_path(self, path):
        self.path = path

    def set_model_type(self, model_type):
        self.model_type = model_type

    def set_model_name(self, name):
        self.model_name = name
        
    def memory_profiling_path(self):
        if self.mem_path is not None:
            return self.mem_path
        assert self.model_name is not None, 'Should specify the model name!'
        args = self.args
        memory_config_path = 'configs/memory_profiling_%s_%s.json'%(args.mixed_precision, self.model_name)
        self.mem_path = os.path.join(self.path, memory_config_path)
        return self.mem_path
    
    def time_profiling_path(self):
        if self.time_path is not None:
            return self.time_path
        assert self.model_name is not None, 'Should specify the model name!'
        args = self.args
        time_config_path = "configs/computation_profiling_%s_%s.json"%(args.mixed_precision, self.model_name)
        self.time_path = os.path.join(self.path, time_config_path)
        return self.time_path
    
    def set_microbatch_func(self, microbatch_size, max_chunk):
        self.optimal_chunk_func = lambda local_bsz, strategy: optimal_chunk_func_default(local_bsz, strategy, microbatch_size)
    
    def set_model_layer_configs(self, model_layer_configs):
        if model_layer_configs is None:
            return
        self.hiddensize_list = [config['hidden_size'] for config in model_layer_configs]
        self.layernum_list = [config['layer_num'] for config in model_layer_configs]        # lyh gees 其实就是保存了layer_num 比如[48]  多层就是[12,24]
        self.seqlen_list = [config['seq_len'] for config in model_layer_configs]
        self.headnum_list = [config['num_attention_heads'] for config in model_layer_configs]
        self.num_layertype = len(self.layernum_list)
    
    # =============== Initializing Galvatron Search Engine ===============
    # Generating Strategies, Loading Profiled Memory & Time Config, Setting Memory & Time Cost Models
    def initialize_search_engine(self):
        self.generate_strategies()
        self.get_profiled_model_configs()       # TODO 删除所有memory相关的内容，暂时先把memory的放着
        self.get_profiled_hardware_configs()
        self.set_cost_models()
        self.show_search_info()
        
    def convert_keys_to_int(self, d):
        if isinstance(d, dict):
            new_dict = {}
            for k, v in d.items():
                if isinstance(k, str) and k.isdigit():
                    new_dict[int(k)] = self.convert_keys_to_int(v)
                else:
                    new_dict[k] = self.convert_keys_to_int(v)
            return new_dict
        return d
    def get_profiled_model_configs(self):
        self.time_config = read_json_config(self.time_profiling_path())

        # gees
        # self.memory_config = read_json_config(self.memory_profiling_path())
        # self.memory_config = self.convert_keys_to_int(self.memory_config)

        if self.args.computation_mode=='linear':
            self.time_profiled_list = [self.time_config['layertype_%d'%i] for i in range(self.num_layertype)]
        else:
            self.time_profiled_list = []
            for i in range(self.num_layertype):
                x_data = []
                y_data = []
                for s,t in self.time_config.items():
                    if s.startswith('layertype_%d_'%i):
                        x_data.append(int(s.split('bsz')[-1]))
                        y_data.append(t)
                assert len(x_data) >= 8, "Different bsz in computation profile of layertype_%d should not be lower than 8."%i
                
                def exp_decay(x, a, b, c):
                    return a * np.exp(-b * x) + c
                popt, pcov = curve_fit(exp_decay, x_data, y_data)
                
                print("Fitted parameters:", popt)
                self.time_profiled_list.append(popt)
                
        self.param_sizes = [0] * self.num_layertype
        self.act_sizes = [{} for _ in range(self.num_layertype)]

        # # gees
        # self.args.sequence_parallel = False
        #
        # if self.args.sequence_parallel:
        #     for i in range(self.num_layertype):
        #         layer_mem_config = self.memory_config['layertype_%d_sp'%i]
        #         parameter_size = layer_mem_config['parameter_size']
        #         tp_activation_per_bsz_dict = layer_mem_config['tp_activation_per_bsz_dict'].copy()
        #         self.param_sizes[i] = parameter_size
        #         self.act_sizes[i] = tp_activation_per_bsz_dict
        #     self.other_memory_pp_off = self.memory_config['other_memory_pp_off_sp']
        #     self.other_memory_pp_on = {'first_stage':self.memory_config['other_memory_pp_on_first_sp'], 'last_stage':self.memory_config['other_memory_pp_on_last_sp']}
        # else:
        #     for i in range(self.num_layertype):
        #         layer_mem_config = self.memory_config['layertype_%d'%i]
        #         parameter_size = layer_mem_config['parameter_size']
        #         tp_activation_per_bsz_dict = layer_mem_config['tp_activation_per_bsz_dict'].copy()
        #         self.param_sizes[i] = parameter_size                # TODO 主要需要这个！！！！！能模拟嘛？？？
        #         self.act_sizes[i] = tp_activation_per_bsz_dict
        #     self.other_memory_pp_off = self.memory_config['other_memory_pp_off']
        #     self.other_memory_pp_on = {'first_stage':self.memory_config['other_memory_pp_on_first'], 'last_stage':self.memory_config['other_memory_pp_on_last']}
        
        return self.time_config, self.memory_config
        
    def get_profiled_hardware_configs(self):
        args = self.args
        hardware_configs_dir = '../../profile_hardware/hardware_configs/'
        gpu_num_config = '_%dnodes_%dgpus_per_node.json'%(args.num_nodes, args.num_gpus_per_node)
        allreduce_bandwidth_config_path = hardware_configs_dir + 'allreduce_bandwidth' + gpu_num_config
        self.allreduce_bandwidth, self.allreduce_comm_coe = read_allreduce_bandwidth_config(os.path.join(self.path, allreduce_bandwidth_config_path), gpu_num=args.gpu_num)
        p2p_bandwidth_config_path = hardware_configs_dir + 'p2p_bandwidth' + gpu_num_config
        self.p2p_bandwidth, self.p2p_comm_coe = read_p2p_bandwidth_config(os.path.join(self.path, p2p_bandwidth_config_path))

        # gees 使dp的overlap使用默认的
        # overlap_coe_path = hardware_configs_dir + 'overlap_coefficient.json'
        # self.overlap_coe = read_json_config(overlap_coe_path)['overlap_coe']

    def set_cost_models(self):
        self.set_time_cost_models()
        # self.set_memory_cost_models()     # gees TODO memory相关


    def set_time_cost_models(self):
        self.timecost_model_args_list = []
        for i in range(self.num_layertype):
            self.timecost_model_args_list.append({
                    'parameter_size': estimate_layer_parameter_size(self.model_type,self.hiddensize_list[i],self.headnum_list[i]),      # TODO --------能否用估计的手段获得？--------，现在先写死好了！！！！
                    'microbatch': True,
                    # 'optimal_chunk_func': self.optimal_chunk_func,
                    'sequence_length': self.seqlen_list[i],
                    'hidden_size': self.hiddensize_list[i],
                    'vocab_size': self.args.padded_vocab_size,
                    'forward_computation_time': self.time_profiled_list[i],
                    'bct_fct_coe': 2,
                    'extra_overhead': 0,
                    'comm_coe_dict': self.allreduce_comm_coe,
                    # 'dp_overlap_coe': self.overlap_coe,
                    # 'bct_overlap_coe': self.overlap_coe,
                    'p2p_comm_coe_dict': self.p2p_comm_coe,
                    'layer_num': self.layernum_list[i],
                    'use_zero2_for_dp':  0,
                    'mixed_precision': False,
                    'costmodel_coe': self.args.costmodel_coe,
                    })
    
    # def set_time_cost_models(self):
    #     self.timecost_model_args_list = []
    #     for i in range(self.num_layertype):
    #         self.timecost_model_args_list.append({
    #                 'parameter_size': self.param_sizes[i],      # TODO --------能否用估计的手段获得？--------，现在先写死好了！！！！
    #                 'microbatch': False if self.use_pipeline_costmodel else True,       # lyh 如果使用use_pipeline_costmodel
    #                 'optimal_chunk_func': self.optimal_chunk_func,
    #                 'sequence_length': self.seqlen_list[i],
    #                 'hidden_size': self.hiddensize_list[i],
    #                 'vocab_size': self.args.padded_vocab_size,
    #                 'forward_computation_time': self.time_profiled_list[i],
    #                 'bct_fct_coe': 2,
    #                 'extra_overhead': 0,
    #                 'comm_coe_dict': self.allreduce_comm_coe,       # lyh 这是如何读取的
    #                 'dp_overlap_coe': self.overlap_coe,
    #                 'bct_overlap_coe': self.overlap_coe,
    #                 'p2p_comm_coe_dict': self.p2p_comm_coe,
    #                 'layer_num': self.layernum_list[i],
    #                 'use_zero2_for_dp': 1 if self.args.default_dp_type == 'zero2' else 0,
    #                 'mixed_precision': False if self.args.mixed_precision == 'fp32' else True,
    #                 'costmodel_coe': self.args.costmodel_coe,       # lyh 这又是什么？
    #                 })
    
    def set_memory_cost_models(self):
        self.memcost_model_args_list = []
        for i in range(self.num_layertype):
            self.memcost_model_args_list.append({  
                    # 'parameter_size': self.param_sizes[i],
                    'tp_activation_per_bsz_dict': self.act_sizes[i],
                    'other_memory_pp_off': self.other_memory_pp_off,
                    'other_memory_pp_on': self.other_memory_pp_on,
                    'microbatch': True,
                    'optimal_chunk_func': self.optimal_chunk_func,
                    'model_type': self.model_type,
                    'checkpoint': 0 if self.args.disable_ckpt else 1,
                    'use_zero2_for_dp':1 if self.args.default_dp_type == 'zero2' else 0,
                    'use_zero3_for_embed':self.args.embed_sdp,
                    'mixed_precision': False if self.args.mixed_precision == 'fp32' else True,
                    'pipeline_type': self.args.pipeline_type,
                    'disable_vtp': self.args.disable_vtp,
                    'max_tp_deg': self.args.max_tp_deg,
                    'gpu_num': self.args.gpu_num,
                    })
    
    # =============== For Galvatron Search Engine Parallelism Optimization ===============
    def parallelism_optimization(self):
        print('='*25, 'Galvatron Search Engine Start Searching','='*25)
        self.set_searching_bsz()
        
        # print('-----', '[Searching Memory Info]', 'Memory constraint:', self.memory_constraint, 'MB', '-----')
        results = dict()
        self.search_history = dict()
        # pp_stage_dict_for_bsz = get_pp_stages_for_all_bsz(self.strategies, self.memcost_model_args_list, self.layernum_list, self.BSZs)
        # pp_stage_dict_for_bsz = {
        #     # bsz=8 的分层方案
        #     8: {
        #         1: [24],  # pp=1
        #         2: [12, 12],  # pp=2
        #         4: [6, 6, 6, 6],  # pp=4
        #         8: [3, 3, 3, 3, 3, 3, 3, 3]
        #     },
        #     # 其他 bsz 的分层方案（类似）
        #     ...
        # }
        temp_strategies = copy.deepcopy(self.strategies)
        # gees self.BSZs.len = 1
        for bsz in self.BSZs:       #
            # pp_stage_dict = pp_stage_dict_for_bsz[bsz]
            # {1: [24], 2: [12, 12], 4: [6, 6, 6, 6], 8: [3, 3, 3, 3, 3, 3, 3, 3]}

            results[bsz] = dict()
            chunk_list = range(1,bsz+1)
            # assert(bsz % self.args.gpu_num == 0), "bdz should be divisible by world size"
            if self.args.settle_chunk != -1:
                chunk_list = [self.args.settle_chunk]       # gees 使用settle_chunk
            for chunk in chunk_list:
                results[bsz][chunk] = dict()
                results[bsz][chunk] = self.dynamic_programming(self.strategies,bsz,chunk)
                # TODO  self.dynamic_programming(bsz, pp_stage_dict)应该返回{时间消耗:[2,2,2]}
        return results[self.args.settle_bsz][self.args.settle_chunk]

        # TODO 现在的bsz和chunk是固定死传入的，在给定global_batch_size和num_of_mb下寻找好的并行策略，所以现在只有一个结果，不用再遍历了。
        # print(results[self.BSZs[0]][self.args.settle_chunk])
        # TODO 保存结果到json文件中去
        print("-----------------------------------------")
        print('='*25, 'Galvatron Search Engine End Searching','='*25)

    def set_searching_bsz(self):
        args = self.args
        # Set Searching BSZs
        if args.settle_bsz is not None and args.settle_bsz > 0:
            args.settle_bsz = int(np.ceil(args.settle_bsz / min(args.gpu_num, 8)) * min(args.gpu_num, 8))   # gees 将 bsz 调整为8或gpu_num的整数倍
            if args.search_space in ['dp', 'tp', 'sdp', 'dp+tp'] and args.settle_bsz < args.gpu_num:
                args.settle_bsz = int(np.ceil(args.settle_bsz // args.gpu_num) * args.gpu_num)
            self.min_bsz = self.max_bsz = args.settle_bsz
            self.bsz_scale = 0
            self.BSZs = [args.settle_bsz]
            print('-----', '[Searching Batch Sizes Info]', 'Settle bsz:', args.settle_bsz, '-----')
            return



    def dynamic_programming(self, strategies, bsz, chunk):
        args = self.args
        # print('bsz=%d'%bsz, pp_stage_dict)
        # lyh 在指定bsz,chunk（准备固定）,mbsz_dict(含多个的情况下)进行动态规划
        print("****Searching with bsz=", bsz, " chunk=", chunk)
        cost_with_strategy = dict()
        # print(f"gees gees len(self.layernum_list) is {len(self.layernum_list)}")
        # print(f"gees gees len(self.layernum_list) is {len(self.layernum_list)}")
        # print(f"gees gees len(self.layernum_list) is {len(self.layernum_list)}")
        print(self.layernum_list)

        print(bsz,chunk)
        # strategies=[[2,2,2]]
        for strategy in strategies:
            cost_time = 0
            for i in range(len(self.layernum_list)):
                stage_layer_num=self.layernum_list[i]//strategy[0]
                # stage_parameter_size = self.param_sizes[i]
                # self.timecost_model_args_list[0]["parameter_size"] = 100
                cost_time += TimeCostModel(strategy, bsz, chunk,stage_layer_num=stage_layer_num,**self.timecost_model_args_list[i]).gen_result() * stage_layer_num       # TODO 在strategy，bsz后面再导入chunk
            cost_with_strategy[cost_time]=strategy

        print("---------------------cost_with_strategy is :-------------------")
        print(cost_with_strategy)

        min_cost = min(cost_with_strategy.keys())
        best_strategy = cost_with_strategy[min_cost]

        print(f"====Best Strategy is {best_strategy}====")

        best_strategy_config_dict = {min_cost : best_strategy}

        return best_strategy



    # TODO 最后修改，只保留最好的[pp,tp,dp]。   config_%s_%dnodes_%dgpus_per_node
    def save_results(self, results, bsz, chunk, pp_stage_dict):
        re, optimal_bsz = results, bsz
        args = self.args
        if re['min_pp_deg'] > 0 and re['min_res_list'] is not None:
            result_strategy = []
            if isinstance(re['min_res_list'],list) and isinstance(re['min_res_list'][0],list) and isinstance(re['min_res_list'][0][0],list):
                for l in re['min_res_list']:
                    result_strategy += l
            else:
                result_strategy = re['min_res_list']
            # print(re['min_res_list'], result_strategy)
            config = strategy2config(result_strategy)
            config['checkpoint'] = array2str([1 if 'cpt' in s[-1] and s[-1]['cpt'] else 0 for s in result_strategy])
            config['global_bsz'] = optimal_bsz
            config['chunks'] = chunk # max([int(self.optimal_chunk_func(optimal_bsz//s[2],s,mbsz)) for s in result_strategy]) # if config['pp_deg'] > 1 else 1
            config['pp_division'] = array2str(pp_stage_dict[config['pp_deg']])
            config['pipeline_type'] = args.pipeline_type
            config['default_dp_type'] = args.default_dp_type
            config['vtp'] = re['vtp']
            if args.embed_sdp:
                config['embed_sdp'] = 1
            
            mixed_precision = '_%s'%args.mixed_precision
            settle_bsz = '_bsz%d'%args.settle_bsz if args.settle_bsz > 0 else ''
            off_options = []
            if args.disable_dp:
                off_options.append('dp')
            if args.disable_tp:
                off_options.append('tp')
            if args.disable_pp:
                off_options.append('pp')
            if args.disable_sdp:
                off_options.append('sdp')
            if args.disable_ckpt:
                off_options.append('ckpt')
            if args.disable_tp_consec:
                off_options.append('tpconsec')
            off_options_str = '_[%s_off]'%('_'.join(off_options))if len(off_options) else ''
            
            config_path = 'configs/galvatron_config_%s_%dnodes_%dgpus_per_node_%dGB'%(self.model_name, args.num_nodes, args.num_gpus_per_node, self.memory_constraint//1024)
            config_path += mixed_precision + settle_bsz + off_options_str
            config_path = os.path.join(self.path, config_path+'.json')
            write_json_config(config, config_path)
            print('Already written optimized parallelism config into galvatron config file %s!'%(config_path))


        
    # =============== Strategies & Search Space Utils ===============
    def generate_strategies(self):
        args = self.args
        gpu_num = args.gpu_num
        strategies = self.generate_dp_tp_pp_sdp()

        # gees temp 暂时丢弃
        strategies = self.filter_strategies(strategies)

        print(f"args.search_space: {args.search_space}")
        if args.search_space == 'dp+tp':
            args.disable_sdp = 1
            args.disable_pp = 1
        elif args.search_space == 'dp+pp':
            args.disable_sdp = 1
            args.disable_tp = 1
        elif args.search_space == '3d':
            args.disable_sdp = 1
        if args.search_space in ['3d', 'dp', 'tp', 'pp', 'sdp']:
            self.strategies = strategies
            args.disable_ckpt = 1
            return strategies
        strategies_new = []
        # assert(not(args.disable_sdp and args.disable_dp))
        for s in strategies:
            if args.disable_dp and s[2] > 1:
                continue
            if args.disable_sdp and s[2] > 1:
                continue
            if args.disable_tp and s[1] > 1:
                continue
            if args.disable_pp and s[0] > 1:
                continue
            strategies_new.append(s)
        strategies = strategies_new

        # if not args.disable_ckpt:
        #     strategies_cpt = []
        #     for s in strategies:
        #         s_cpt = copy.deepcopy(s)
        #         s_cpt[-1]['cpt']=1
        #         strategies_cpt.append(s_cpt)
        #     strategies += strategies_cpt
        self.strategies = strategies
        return strategies

    # def

    # gees
    def generate_dp_tp_pp_sdp(self, gpu_num=None, search_space="full"):
        args = self.args
        gpu_num = args.gpu_num if gpu_num is None else gpu_num
        #gees
        print('------------------------gpu_num is ', gpu_num)
        # strategies = []
        search_space = args.search_space if search_space is None else search_space
        i, total = 1, []
        while i <= gpu_num:
            total.append(i)
            i *= 2

        strategies = []
        if search_space == 'full':
            for pp in total:
                for tp in total:
                    if pp * tp <= gpu_num:
                        dp = gpu_num // (pp * tp)
                        # 仅保留 [pp, tp, dp]，忽略所有 sdp 相关配置
                        strategies.append([pp, tp, dp])

        elif search_space == 'dp+tp':
            pp = 1
            for tp in total:
                if pp * tp <= gpu_num:
                    dp = gpu_num // (pp * tp)
                    strategies.append([pp, tp, dp])

        elif search_space == 'dp+pp':
            tp = 1
            for pp in total:
                if pp * tp <= gpu_num:
                    dp = gpu_num // (pp * tp)
                    strategies.append([pp, tp, dp])

        elif search_space == '3d':
            strategies.append([2, 2, gpu_num // 4])

        elif search_space == 'dp':
            strategies.append([1, 1, gpu_num])

        elif search_space == 'tp':
            strategies.append([1, args.max_tp_deg, gpu_num // args.max_tp_deg])

        elif search_space == 'pp':
            strategies.append([args.max_pp_deg, 1, gpu_num // args.max_pp_deg])

        return strategies

    # gees

    def filter_strategies(self, strategies, gpu_num=None):
        args = self.args
        gpu_num = args.gpu_num if gpu_num is None else gpu_num

        if gpu_num < 32:
            # For GPU counts < 32, remove one largest configuration for each dimension
            max_pp = max(s[0] for s in strategies)
            max_tp = max(s[1] for s in strategies)
            max_dp = max(s[2] for s in strategies)

            # Remove strategies with max pp or max tp or max dp
            filtered = [s for s in strategies if not (s[0] == max_pp or
                                                      s[1] == max_tp or
                                                      s[2] == max_dp)]
        else:
            # For GPU counts >= 32, remove two largest configurations for each dimension
            pp_values = sorted({s[0] for s in strategies}, reverse=True)
            tp_values = sorted({s[1] for s in strategies}, reverse=True)
            dp_values = sorted({s[2] for s in strategies}, reverse=True)

            # Get top 2 largest values for each dimension
            max_pp1, max_pp2 = pp_values[0], pp_values[1] if len(pp_values) > 1 else pp_values[0]
            max_tp1, max_tp2 = tp_values[0], tp_values[1] if len(tp_values) > 1 else tp_values[0]
            max_dp1, max_dp2 = dp_values[0], dp_values[1] if len(dp_values) > 1 else dp_values[0]

            # Remove strategies with any of the two largest values in any dimension
            filtered = [
                s for s in strategies
                if not (s[0] in (max_pp1, max_pp2) or
                        s[1] in (max_tp1, max_tp2) or
                        s[2] in (max_dp1, max_dp2))
            ]

        return filtered
    def generate_strategies_for_memory_test(self):
        strategies = self.generate_dp_tp_pp_sdp(gpu_num=8, search_space='full')
        return strategies

    
    def show_search_info(self):
        print('================================================================================')
        print('--- Optimization Configs ----')
        # print('Memory constraint: %d GB'%self.args.memory_constraint)
        print('Pipeline Type:', self.args.pipeline_type)
        # print('Default DP Type:', self.args.default_dp_type)
        # print('Mixed Precision:', self.args.mixed_precision)
        if self.args.embed_sdp:
            print('Embedding SDP: ON')
        print('Search Space:')
        # print_strategies(self.strategies)
        print('================================================================================')
        print('---- Environment Configs ----')
        print('Allreduce Bandwidth (GB/s):', self.allreduce_bandwidth)
        print('Allreduce Communication Coefficient (ms/MB):', self.allreduce_comm_coe)
        print('P2P Bandwidth (GB/s):', self.p2p_bandwidth)
        print('P2P Communication Coefficient (ms/MB):', self.p2p_comm_coe)
        # print('Overlap coefficient:', self.overlap_coe)
        print('================================================================================')
        print('------- Model Configs -------')
        print('Model Name:', self.model_name)
        print('Num layertype:', self.num_layertype)
        print('Layer_num:', self.layernum_list)
        print('Hidden_size:', self.hiddensize_list)
        print('Seq_len:', self.seqlen_list)
        print('================================================================================')
        print('--- Model Computation Configs ---')
        print('Forward computation time:', self.time_profiled_list)
        print('================================================================================')
        # print('--- Model Memory Configs ---')
        # print('Parameter Memory Cost:', self.param_sizes)
        # print('Activation Memory Cost of Different TP degree (per bsz):')
        # print(self.act_sizes)
        # print('Other Memory Cost (pp = 1):')
        # print(self.other_memory_pp_off)
        # print('Other Memory Cost (pp > 1):')
        # print(self.other_memory_pp_on)
        # print('================================================================================')
        print('Time Cost Model Args:')
        print(self.timecost_model_args_list)
        print('================================================================================')
        # print('Memory Cost Model Args:')
        # print(self.memcost_model_args_list)
        # print('================================================================================')


def estimate_layer_parameter_size(model_type, hidden_size, num_heads):
    """
    估算单层Transformer参数量所占内存大小（MB）
    """
    # 假设使用float32（4字节/参数）
    bytes_per_parameter = 4

    # 每个模型类型的参数计算方式不同
    if model_type.lower() == 'gpt':
        # GPT的单层结构包含:
        # 1. 多头自注意力 (MHA)
        #    - Q, K, V 投影矩阵: 3 * (hidden_size * hidden_size)
        #    - 输出投影矩阵: hidden_size * hidden_size
        # 2. 前馈网络 (FFN)
        #    - 第一个线性层: hidden_size * (4 * hidden_size)
        #    - 第二个线性层: (4 * hidden_size) * hidden_size
        # 3. 两个层归一化
        #    - 2 * (2 * hidden_size) 为gamma和beta参数
        mha_params = 4 * (hidden_size * hidden_size)  # Q,K,V和输出投影
        ffn_params = 2 * (hidden_size * 4 * hidden_size)  # 两个FFN线性层
        ln_params = 2 * (2 * hidden_size)  # 两个层归一化的参数
        total_params = mha_params + ffn_params + ln_params

    elif model_type.lower() == 'llama':
        # LLaMA的单层结构包含:
        # 1. 旋转位置编码 (RoPE) 多头自注意力
        #    - Q, K, V 投影矩阵: 3 * (hidden_size * hidden_size)
        #    - 输出投影矩阵: hidden_size * hidden_size
        # 2. SwiGLU 前馈网络
        #    - 第一个线性层 (扩展更大): hidden_size * (8/3 * hidden_size)
        #    - 第二个线性层: (8/3 * hidden_size) * hidden_size
        # 3. 两个RMSNorm归一化
        #    - 2 * hidden_size 为gamma参数(LLaMA使用RMSNorm而非LayerNorm)
        head_dim = hidden_size // num_heads
        mha_params = 4 * (hidden_size * hidden_size)  # Q,K,V和输出投影
        ffn_expansion = 8 / 3  # LLaMA使用SwiGLU，扩展因子为8/3
        ffn_params = hidden_size * ffn_expansion * hidden_size + ffn_expansion * hidden_size * hidden_size
        ln_params = 2 * hidden_size  # 两个RMSNorm的参数
        total_params = mha_params + ffn_params + ln_params
    else:
        raise ValueError(f"不支持的模型类型: {model_type}，目前只支持 'gpt' 和 'llama'")
    # 转换为MB
    size_in_bytes = total_params * bytes_per_parameter
    size_in_mb = size_in_bytes / (1024 * 1024)
    return size_in_mb



# ========================== Pipeline Division & Pipeline Cost Utils ==========================
def pp_division_memory_balanced(memcost_model_args, layer_num, pp_deg, bsz, strategies):
    assert(len(memcost_model_args)==len(layer_num))
    if pp_deg == 1:
        return [np.sum(layer_num)], None
    layer_type_num = len(layer_num)
    layer_min_memcost = []
    strategies = list(filter(lambda s: s[0] == pp_deg, strategies))
    if len(strategies)==0:
        return None, None
    gpu_num = strategies[0][0] * strategies[0][1] * strategies[0][2]
    for i in range(layer_type_num):
        # memcosts = [MemoryCostModel(strategy, global_batch_size=bsz, **memcost_model_args[i]).get_memory_cost()['enc_total'] for strategy in strategies]
        # layer_min_memcost.append(np.min(memcosts))
        memcost = MemoryCostModel([pp_deg, 1, gpu_num//pp_deg, {}], global_batch_size=bsz, **memcost_model_args[i]).get_memory_cost()['enc_total']
        layer_min_memcost.append(np.min(memcost))
    other_cost = MemoryCostModel(strategies[0], global_batch_size=bsz, **memcost_model_args[0]).get_memory_cost()['other']
    # print(layer_min_memcost, other_cost)
    min_memcost_all_layers = []
    for i in range(layer_type_num):
        min_memcost_all_layers += [layer_min_memcost[i]]*layer_num[i]
    #print(min_memcost_all_layers)
    avg_mem_cost = (np.sum(min_memcost_all_layers)+np.sum(other_cost))/pp_deg
    #print('Avg memcost:', avg_mem_cost)

    pp_divide = [0]*pp_deg
    mem_cost_per_stage = other_cost.copy()
    idx = len(min_memcost_all_layers)-1
    for i in range(pp_deg-1,-1,-1):
        while True:
            if idx < 0:
                break
            if i > 0 and avg_mem_cost - mem_cost_per_stage[i] < 0.5 * min_memcost_all_layers[idx]:
                break
            else:
                mem_cost_per_stage[i]+=min_memcost_all_layers[idx]
                idx-=1
                pp_divide[i]+=1
    # print(pp_divide)

    # Avoid too much memory cost on previous stages
    for i in range(pp_deg-1):
        left, right = int(np.sum(pp_divide[:i])), int(np.sum(pp_divide[:i+1]))
        mem_cost_cur_stage = np.sum(min_memcost_all_layers[left:right]) + other_cost[i]
        while mem_cost_cur_stage > avg_mem_cost * 1.3:
            pp_divide[i] -= 1
            pp_divide[i+1] += 1
            right -= 1
            mem_cost_cur_stage -= min_memcost_all_layers[right]

    # Avoid no layers on previous stages
    for i in range(pp_deg-1):
        while pp_divide[i] <= 0:
            pp_divide[i] += 1
            pp_divide[i+1] -= 1

    # Avoid no layers on last stage
    for i in range(pp_deg-1, 0, -1):
        while pp_divide[i] <= 0:
            pp_divide[i] += 1
            pp_divide[i-1] -= 1
    
    mem_cost_per_stage_adjusted = other_cost.copy()
    # print(pp_divide)
    # print(other_cost, avg_mem_cost)
    for i in range(pp_deg):
        left, right = int(np.sum(pp_divide[:i])), int(np.sum(pp_divide[:i+1]))
        mem_cost_per_stage_adjusted[i] +=  np.sum(min_memcost_all_layers[left:right])
    # print(mem_cost_per_stage,mem_cost_per_stage_adjusted)
    return pp_divide, mem_cost_per_stage_adjusted

def get_pp_stage_for_bsz(strategies, memcost_model_args_list, layer_num_list, bsz, single_layer_even=True):
    pp_stage_dict = dict()
    pp_deg_list = sorted(list(set([s[0] for s in strategies])))
    for pp_deg in pp_deg_list:
        if single_layer_even and len(layer_num_list) == 1:
            pp_divide = pp_division_even(layer_num_list, pp_deg)
        else:
            pp_divide, mem_cost_per_stage = pp_division_memory_balanced(memcost_model_args_list, layer_num_list, pp_deg, bsz, strategies)
            #print(bsz, pp_deg, pp_divide, mem_cost_per_stage)
        pp_stage_dict[pp_deg] = pp_divide
    return pp_stage_dict


def get_pp_stages_for_all_bsz(strategies, memcost_model_args_list, layer_num_list, bszs):
    pp_stage_dict_for_bsz = dict()
    for bsz in bszs:
        pp_stage_dict_for_bsz[bsz] = get_pp_stage_for_bsz(strategies, memcost_model_args_list, layer_num_list, bsz)
    return pp_stage_dict_for_bsz
    
def get_cost_all_stages(layer_memcosts, pp_stage_division):
    pp_stage_division = copy.deepcopy(pp_stage_division)
    # include other memory on first stage
    if np.sum(pp_stage_division) + 1 == len(layer_memcosts):
        pp_stage_division[0] += 1
    elif np.sum(pp_stage_division) + 2 == len(layer_memcosts):
        pp_stage_division[0] += 1
        pp_stage_division[-1] += 1
        dist_costmodel = True
    assert(np.sum(pp_stage_division)==len(layer_memcosts))
    stage_memcosts = []
    for stage_id in range(len(pp_stage_division)):
        layer_start_id, layer_end_id = int(np.sum(pp_stage_division[:stage_id])), int(np.sum(pp_stage_division[:stage_id+1]))
        stage_memcosts.append(np.sum(layer_memcosts[layer_start_id:layer_end_id]))
    return stage_memcosts

def get_layer_costs(layernum_list, layer_costs):
    layer_memcosts = []
    for i in range(len(layernum_list)):
        layer_memcosts += [layer_costs[i]]*layernum_list[i]
    return layer_memcosts
    
def pp_division_even(layernum_list, pp_deg):
    total_layer_num = np.sum(layernum_list)
    avg_layer_num = int(total_layer_num // pp_deg)
    last_layer_num = total_layer_num - avg_layer_num * (pp_deg-1)
    pp_division = [avg_layer_num] * (pp_deg-1) + [last_layer_num]
    return pp_division
    
def optimal_chunk_func_default(local_bsz, strategy, microbatch_size, min_tp):
    # if strategy[0] == 1:
    #     return 1
    assert(strategy[1] % min_tp == 0)
    local_bsz = local_bsz // (strategy[1] // min_tp)
    chunk = np.ceil(local_bsz / microbatch_size)
    chunk = 1 if chunk == 0 else chunk
    # chunk = int(min(max_chunk,chunk))
    return chunk

def check_optimal_chunks(world_size, strategies, optimal_chunk_func, bsz, mbsz_dict, min_tp):
    chunk_dict = {}
    for pp_deg in sorted(set([s[0] for s in strategies])):
        chunk_dict[pp_deg] = optimal_chunk_func(bsz / (world_size // pp_deg // min_tp), [pp_deg, min_tp, world_size // pp_deg, {'fsdp':0, 'cpt':0}], mbsz_dict[pp_deg], min_tp)
    return chunk_dict