import numpy as np
import torch


'''
    TimeCostModel 用于 估算分布式训练中模型的时间开销，综合考虑计算、通信及不同并行策略的影响。
'''
class TimeCostModel:
    def __init__(self,
            strategy,
            global_batch_size,          # lyh 这里global_batch_size是什么含义呢？  TODO gees ---------加入chunk---------------
            chunk=1,        # gees add
            stage_layer_num=None,
            parameter_size=48,
            microbatch=True,
            optimal_chunk_func = None,
            sequence_length=512,
            hidden_size=1024,
            vocab_size=32000,
            forward_computation_time=35 / 24,
            bct_fct_coe=2,
            extra_overhead=0,
            comm_coe_dict={},       # self.allreduce_comm_coe
            dp_overlap_coe=1.3,
            bct_overlap_coe=1.3,
            p2p_comm_coe_dict=None,
            layer_num=None,     # TODO layer_num应该输入strategy下一个stage内的层数。那么完成流水线的时间为gen_res * layer_num,比较这个时间。
            layer_type='enc',
            use_zero2_for_dp=0,
            mixed_precision=False,
            no_comm=False,
            costmodel_coe=1.0):
        self.s = strategy[:3]
        self.sl = sequence_length
        self.hs = hidden_size
        self.microbatch = microbatch
        self.pp_size = self.s[0]
        self.tp_size = self.s[1]
        self.dp_size = self.s[2]
        self.comm_coe_dict = comm_coe_dict
        self.costmodel_coe = costmodel_coe
        if self.tp_size == 1 or self.dp_size == 1:
            self.dc = self.comm_coe_dict['%d'%self.dp_size] if '%d'%self.dp_size in self.comm_coe_dict.keys() else self.comm_coe_dict['%d_1'%self.dp_size]
            self.tc = self.comm_coe_dict['%d'%self.tp_size] if '%d'%self.tp_size in self.comm_coe_dict.keys() else self.comm_coe_dict['%d_1'%self.tp_size]
        else:
            # In this case, strategy[-1]['tp'] represents tp_consecutive_flag
            # gees
            tp_consecutive_flag = True
            # info = strategy[-1]
            # assert 'tp' in info.keys() and info['tp'] in [0, 1]
            # tp_consecutive_flag = info['tp']
            if tp_consecutive_flag:
                self.dc = self.comm_coe_dict['%d_0'%self.dp_size]
                self.tc = self.comm_coe_dict['%d_1'%self.tp_size]
            else:
                self.dc = self.comm_coe_dict['%d_1'%self.dp_size]
                self.tc = self.comm_coe_dict['%d_0'%self.tp_size]
        self.fsdp = False
        # gees delete
        # if 'fsdp' in strategy[-1].keys() and strategy[-1]['fsdp']:
        #     self.fsdp = True
        self.dp_overlap_coe = dp_overlap_coe
        self.dc_overlap = self.dc*dp_overlap_coe
        self.ps = parameter_size/self.tp_size
        self.bs = global_batch_size/self.dp_size        # gees Local Batch size
        self.layer_type = layer_type
        assert(layer_type in ['enc', 'dec'])
        #

        # print(f"strategy is {strategy}")
        # print(f"parameter_size is {parameter_size}")
        # print(f"strategy is {strategy}")
        # print(f"global_batch_size is {global_batch_size}")
        # print(f"parameter_size is {parameter_size}")
        # print(f"sequence_length is {sequence_length}")
        # print(f"hidden_size is {hidden_size}")


        self.optimal_microbatch = chunk if chunk>1 else 1


        # Dummy layer_num, can be any multiple of 8.
        # We estimate the time cost of single layer by averaging the time of whole model to deal with pipeline parallel     #TODO 拿到的是单层的时间成本（图中横向的）
        # self.layer_num = 24 if layer_num is None else layer_num
        # gees
        self.layer_num = 24 if stage_layer_num is None else stage_layer_num
        self.checkpoint = False
        # gees
        # if 'cpt' in strategy[-1].keys() and strategy[-1]['cpt']:
        #     self.checkpoint = True

        # forward & backward computation time of whole model (depending on dummy layer_num)
        if isinstance(forward_computation_time,np.ndarray):
            def exp_decay(x, a, b, c):
                return (a * np.exp(-b * x) + c) * x
            self.fct = exp_decay(self.bs / self.tp_size, *forward_computation_time) * self.layer_num
        else:
            self.fct = forward_computation_time * self.bs / self.tp_size * self.layer_num 
        self.bct = self.fct * bct_fct_coe
        self.bct_overlap_coe = bct_overlap_coe
        self.bct_overlap = self.bct*bct_overlap_coe
        self.eo = extra_overhead

        # dp & tp message size of whole model (depending on dummy layer_num)
        self.dp_message_size = (2*(self.dp_size-1)/self.dp_size*self.ps) * self.layer_num
        tp_comm_times = 4 if layer_type=='enc' else 6
        self.tp_message_size = 2*(self.tp_size-1)/self.tp_size*(self.bs*self.sl*self.hs*tp_comm_times*4/1024/1024) * self.layer_num

        # # print(f"parameter size is {parameter_size}")
        # print(f"self.dp_message_size is {self.dp_message_size}")
        # print(f"self.tp_message_size is {self.tp_message_size}")

        # if self.fsdp:
        #     self.dp_message_size = self.dp_message_size * 0.5

        # if self.fsdp:
        #     self.dp_message_size_ori = self.dp_message_size
        #     self.dp_message_size = self.dp_message_size * 1.5

        self.p2p_comm_coe = None
        if self.pp_size > 1 and p2p_comm_coe_dict is not None:
            self.p2p_comm_coe = p2p_comm_coe_dict[self.pp_size]
            self.p2p_meg_size = self.pp_size*2*self.bs*self.sl*self.hs*4/1024/1024
            if mixed_precision:
                self.p2p_meg_size = self.p2p_meg_size/2
        # print(f"self.p2p_meg_size is {self.p2p_meg_size}")
        self.use_zero2_for_dp = use_zero2_for_dp
        if self.checkpoint:
            # self.fct *= 2
            self.bct += self.fct #  * 0.5
            self.tp_message_size *= 2 # 1.5

        if mixed_precision:
            self.dp_message_size = self.dp_message_size/2
            self.tp_message_size = self.tp_message_size/2

        self.fsdp_allgather_message_size = self.dp_message_size * 0.5
        
        if no_comm:
            self.dp_message_size = 0


    def bct_dp_overlap(self, dp_message_size, bct):
        dp_overlap_time = dp_message_size * self.dc_overlap
        bct_overlap_time = bct * self.bct_overlap_coe
        if dp_overlap_time > bct_overlap_time:
            overlap_part = bct_overlap_time
            rest_part = (dp_message_size - bct_overlap_time / self.dc_overlap) * self.dc
            rest_dp_flag = True
        elif dp_overlap_time < bct_overlap_time:
            overlap_part = dp_overlap_time
            rest_part = (bct - dp_overlap_time / self.bct_overlap_coe) 
            rest_dp_flag = False
        else:
            overlap_part = bct_overlap_time
            rest_part = 0
            rest_dp_flag = False
        rest_dp_flag = False
        return overlap_part, rest_part, rest_dp_flag

    def pipe_with_microbatch(self, computation_overhead, communication_overhead):
        result = computation_overhead*(self.pp_size+self.optimal_microbatch-1)/(self.pp_size*self.optimal_microbatch)+communication_overhead
        return result

    def gen_result(self):
        # if self.pp_size == 1:
        #     if self.tp_size == 1 and self.dp_size > 1: # pure dp
        #         overlap_part, rest_part, _ = self.bct_dp_overlap(self.dp_message_size, self.bct)
        #         # print(self.bct, self.dp_message_size * self.dc_overlap)
        #         result = self.fct + overlap_part + rest_part + self.eo
        #     elif self.dp_size == 1 and self.tp_size > 1: # pure tp
        #         result = self.fct + self.bct + self.tp_message_size*self.tc
        #     else: # dp+tp
        #         if self.tp_size < self.tp_size * self.dp_size // 2: 
        #             if self.layer_type == 'enc':
        #                 overlap_part, rest_part, _ = self.bct_dp_overlap(self.dp_message_size, self.bct)
        #                 result = self.fct + overlap_part + rest_part + self.tp_message_size*self.tc + self.eo
        #                 # print(self.fct, self.bct, self.dp_message_size, self.dp_message_size*self.dc, self.tp_message_size, self.tp_message_size*self.tc)
        #             elif self.layer_type == 'dec':
        #                 overlap_part, rest_part, _ = self.bct_dp_overlap(self.dp_message_size, self.bct*2/3)
        #                 result = self.fct + 1/3*self.bct + overlap_part + rest_part +self.tp_message_size*self.tc+self.eo
        #         else:
        #             if self.layer_type == 'enc':
        #                 overlap_part, rest_part, _ = self.bct_dp_overlap(self.dp_message_size, self.bct*1/2)
        #                 result = self.fct + 1/2*self.bct + overlap_part + rest_part + self.tp_message_size*self.tc + self.eo
        #             elif self.layer_type == 'dec':
        #                 overlap_part, rest_part, _ = self.bct_dp_overlap(self.dp_message_size, self.bct*2/3)
        #                 result = self.fct + 1/3*self.bct + overlap_part + rest_part + self.tp_message_size*self.tc + self.eo
        if self.pp_size >= 1:
            if self.tp_size == 1 and self.dp_size > 1: # pp+dp
                overlap_part, rest_part, _ = self.bct_dp_overlap(self.dp_message_size, self.bct)
                overall_overhead = self.fct + overlap_part + rest_part + self.eo
                if self.microbatch == False:
                    result = overall_overhead
                else:
                    computation_overhead = self.fct + self.bct
                    communication_overhead = overall_overhead-computation_overhead
                    result = self.pipe_with_microbatch(computation_overhead, communication_overhead)
            elif self.dp_size == 1 and self.tp_size > 1: # pp+tp
                if self.microbatch == False:
                    result = self.fct + self.bct + self.tp_message_size*self.tc
                else:
                    overall_overhead = self.fct + self.bct + self.tp_message_size*self.tc
                    result = self.pipe_with_microbatch(overall_overhead, 0)
            elif self.dp_size == 1 and self.tp_size == 1: # pure pp
                if self.microbatch == False:
                    result = self.fct + self.bct
                else:
                    overall_overhead = self.fct + self.bct
                    result = self.pipe_with_microbatch(overall_overhead, 0)
            else: # pp+dp+tp
                if self.tp_size < self.tp_size * self.dp_size // 2:
                    if self.layer_type == 'enc':
                        overlap_part, rest_part, _ = self.bct_dp_overlap(self.dp_message_size, self.bct)
                        overall_overhead = self.fct + overlap_part + rest_part + self.tp_message_size*self.tc + self.eo
                    elif self.layer_type == 'dec':
                        overlap_part, rest_part, _ = self.bct_dp_overlap(self.dp_message_size, self.bct*2/3)
                        overall_overhead = self.fct + 1/3*self.bct + overlap_part + rest_part +self.tp_message_size*self.tc+self.eo
                    if self.microbatch == False:
                        result = overall_overhead
                    else:
                        computation_overhead = self.fct + self.bct + self.tp_message_size*self.tc
                        communication_overhead = overall_overhead-computation_overhead
                        result = self.pipe_with_microbatch(computation_overhead, communication_overhead)
                else:
                    if self.layer_type == 'enc':
                        overlap_part, rest_part, _ = self.bct_dp_overlap(self.dp_message_size, self.bct*1/2)
                        overall_overhead = self.fct + 1/2*self.bct + overlap_part + rest_part + self.tp_message_size*self.tc + self.eo
                    elif self.layer_type == 'dec':
                        overlap_part, rest_part, _ = self.bct_dp_overlap(self.dp_message_size, self.bct*2/3)
                        overall_overhead = self.fct + 1/3*self.bct + overlap_part + rest_part + self.tp_message_size*self.tc + self.eo
                    if self.microbatch == False:
                        result = overall_overhead
                    else:
                        computation_overhead = self.fct + self.bct + self.tp_message_size*self.tc
                        communication_overhead = overall_overhead-computation_overhead
                        result = self.pipe_with_microbatch(computation_overhead, communication_overhead)




        # For fsdp, add allgather time of forward and backward
        if self.fsdp:
            # forward_allgather_time = self.dp_message_size * self.dc 
            # # if self.checkpoint:
            # #     forward_allgather_time *= 2
            # backward_allgather_time = self.dp_message_size * self.dc 
            # result = result + (forward_allgather_time + backward_allgather_time)*self.optimal_microbatch

            # forward_allgather_time = self.dp_message_size * 0.5 * self.dc
            forward_allgather_time = self.fsdp_allgather_message_size * self.dc
            result = result + forward_allgather_time*self.optimal_microbatch

            # forward_allgather_time = self.dp_message_size_ori * 0.5 * self.dc
            # result = result + forward_allgather_time*(self.optimal_microbatch-1)

        if self.pp_size > 1 and self.p2p_comm_coe is not None:
            result = result + self.p2p_meg_size * self.p2p_comm_coe


        # print(self.p2p_meg_size)
        # print(self.tp_message_size)
        # print(self.dp_message_size)

        # gees
        coe = 0.001 * self.costmodel_coe        # lyh
        result = result*coe
        result = result / self.layer_num
        # print(result)
        return result
    
def check_optimal_chunks(world_size, strategies, optimal_chunk_func, bsz):
    chunk_dict = {}
    for pp_deg in sorted(set([s[0] for s in strategies])):
        chunk_dict[pp_deg] = optimal_chunk_func(bsz/(world_size//pp_deg), [pp_deg,1,world_size//pp_deg,{'fsdp':0,'cpt':0}])
    return chunk_dict

def get_real_chunk(local_bsz, chunk):
    if chunk == 1:
        return 1
    chunk = int(chunk)
    re = [t.shape[0] for t in torch.arange(int(local_bsz)).chunk(chunk)]
    return len(re)

def get_time_cost_all_stages(layer_timecosts, pp_stage_division):
    assert(np.sum(pp_stage_division)==len(layer_timecosts))
    stage_timecosts = []
    for stage_id in range(len(pp_stage_division)):
        layer_start_id, layer_end_id = int(np.sum(pp_stage_division[:stage_id])), int(np.sum(pp_stage_division[:stage_id+1]))
        stage_timecosts.append(np.sum(layer_timecosts[layer_start_id:layer_end_id]))
    return stage_timecosts

def pipeline_costmodel(timecostmodel, layer_num_list, timecostmodel_args_list, strategies, partition, chunks, bsz, min_tp, return_stage_cost=False):
    if strategies is None:
        if return_stage_cost:
            return [np.inf] * len(partition), np.inf
        else:
            return np.inf
    layer_type_ids = []
    # print(layer_num_list)
    for layer_type_id in range(len(layer_num_list)):
        layer_type_ids += [layer_type_id] * layer_num_list[layer_type_id]
    print(chunks)
    if isinstance(chunks, list):
        chunks = [get_real_chunk(int(bsz/(strategies[0][1] * strategies[0][2] // min_tp)), chunks_) for chunks_ in chunks]
        bsz_chunked = [bsz / chunks_ for chunks_ in chunks]
        max_chunk = np.max(chunks)
        # print('Detected multi chunks!', chunks, 'Using %d as chunks!'%max_chunk)
    else:
        print(int(bsz/(strategies[0][1] * strategies[0][2] // min_tp)))     # DP 之后的Local batch size 512
        chunks = get_real_chunk(int(bsz/(strategies[0][1] * strategies[0][2] // min_tp)), chunks)
        print(str(chunks) + "-----------21231")
        bsz_chunked = [bsz / chunks] * len(layer_num_list)
        print(chunks)
        # print(bsz, bsz/chunks, chunks)
        max_chunk = chunks
    print("bsz chunk = ")
    print(bsz_chunked)
    pp_deg = len(partition)
    layer_num = len(strategies)     # lyh [0,0,0,0,0,0,0,0,]
    from galvatron.utils import form_strategy, strategy_str2list
    strategies_set = list(set([form_strategy(s) for s in strategies]))
    timecosts_dict_bsz_chunked, timecosts_dict_compute = {}, {}
    for layer_type_id in range(len(layer_num_list)):
        timecosts_dict_bsz_chunked[layer_type_id], timecosts_dict_compute[layer_type_id] = {}, {}
        for s in strategies_set:
            timecosts_dict_bsz_chunked[layer_type_id][s] = timecostmodel(strategy_str2list(s), bsz_chunked[layer_type_id], **timecostmodel_args_list[layer_type_id]).gen_result()
            timecosts_dict_compute[layer_type_id][s] = timecostmodel(strategy_str2list(s), bsz_chunked[layer_type_id], no_comm=True, **timecostmodel_args_list[layer_type_id]).gen_result()
    timecosts_bsz_chunked = [timecosts_dict_bsz_chunked[layer_type_ids[i]][form_strategy(strategies[i])] for i in range(layer_num)]
    timecosts_bsz_compute = [timecosts_dict_compute[layer_type_ids[i]][form_strategy(strategies[i])] for i in range(layer_num)]
    stage_costs_bsz_chunked = get_time_cost_all_stages(timecosts_bsz_chunked, partition)
    stage_costs_compute = get_time_cost_all_stages(timecosts_bsz_compute, partition)
    # print(timecosts_bsz_chunked, stage_costs_bsz_chunked, np.sum(stage_costs_bsz_chunked))
    # print(stage_costs_compute, np.max(stage_costs_compute))
    # print(np.sum(stage_costs_bsz_chunked), np.max(stage_costs_compute), np.max(stage_costs_compute) * (max_chunk-1))

    # # p2p & reduce sync
    # result = np.sum(stage_costs_bsz_chunked) + np.max(stage_costs_compute) * (max_chunk-1)

    # p2p & reduce async
    stage_costs_reduce = [total for total in stage_costs_bsz_chunked]
    # print(stage_costs_compute, stage_costs_reduce, stage_costs_bsz_chunked)
    result = np.max(stage_costs_compute) * (max_chunk-1+pp_deg)
    for i in range(pp_deg):
        stage_costs_reduce[i] -= np.sum(stage_costs_compute[:i+1])
    reduce_time = np.max(stage_costs_reduce)
    reduce_time = reduce_time if reduce_time > 0 else 0
    
    # print(result,reduce_time)
    result += reduce_time
    
    if return_stage_cost:
        return stage_costs_bsz_chunked, result
    return result



if __name__ == "__main__":
    # 数据/张量并行通信系数
    comm_coe_dict: dict = {'8': 0.0062326653993580354, '4_0': 0.006042551648710218, '4_1': 0.006087464692704782, '2_0': 0.006496332820123041, '2_1': 0.006424794567193714, '1': 0}

    p2p_comm_coe_dict: dict = {2: 0.006787944610371979, 4: 0.0074923765069042254, 8: 0.00920674670398468}



    import numpy as np

    # 1. 初始化模型（仅传入必需参数）
    model = TimeCostModel(
        strategy=[4, 1, 2],  # PP=2, TP=2, DP=4
        global_batch_size=32,  # 全局batch_size=32
        chunk=4,        # 改这个会变
        parameter_size=773,
        forward_computation_time=4.5,
        sequence_length=2048,
        hidden_size=4096,
        comm_coe_dict=comm_coe_dict,
        p2p_comm_coe_dict = p2p_comm_coe_dict,
    )

    # 2. 生成时间成本结果
    result = model.gen_result()
    result = result*1000

    print(f"Time cost per layer: {result:.4f} 毫秒")

    cost = pipeline_costmodel(
        timecostmodel=TimeCostModel,
        layer_num_list=[24],  # 所有层同类型
        timecostmodel_args_list=[{'comm_coe_dict':comm_coe_dict,'p2p_comm_coe_dict':p2p_comm_coe_dict}],  # 无特殊参数
        strategies=[[2, 2, 2]] * 24,  # 所有层PP=2,TP=2,DP=2
        partition=[12, 12],  # 2个流水线阶段
        chunks=8,
        bsz=512,
        min_tp=2
    )

    # # 3. 测试不同策略（可选）
    # strategies = [
    #     [1, 1, 8],  # 纯DP
    #     [1, 8, 1],  # 纯TP
    #     [4, 2, 1]  # PP+TP
    # ]
    # for s in strategies:
    #     model = TimeCostModel(strategy=s, global_batch_size=32)
    #     print(f"Strategy {s}: {model.gen_result():.4f} ms")