from typing import Dict, List
#from geesibling.core.types import Graph, Node
from dataclasses import dataclass, field
import pprint
import sys
from copy import deepcopy
from geesibling.adapters.pytorch.auto_parallel_context import AbstractStrategy, AutoParallelContext, MCMCStrategy

GLOBAL_PLAN = None

@dataclass
class ExcutePlan:
    policy: List[int]
    pp_layers_per_partition: List[int]
    tp_splits: List[int]
    ep_size: int
    pre_global_sum: List[int] = field(default=None)

    @property
    def pipeline_model_parallel_size(self):
        return self.policy[0]
    
    
    @property
    def data_parallel_size(self):
        return self.policy[2]

    @property
    def expert_parallel_size(self):
        return self.ep_size

    
    def arg_check(self):
        assert len(self.pp_layers_per_partition) == self.policy[0], 'pp_size error'
        assert len(self.pp_layers_per_partition) == len(self.tp_splits)
        #assert sum(self.tp_splits) * self.policy[2] == 
        pass

    def _get_pre_global_sum(self):
        if self.pre_global_sum is not None:
            return self.pre_global_sum
        else:
            pipeline_model_parallel_size = self.pipeline_model_parallel_size
            # 统计前缀和
            pre_global_sum = []
            for i in range(pipeline_model_parallel_size):
                if i == 0:
                    pre_global_sum.append(0)
                else:
                    pre_global_sum.append(pre_global_sum[i-1] + self.tp_splits[i-1] * self.data_parallel_size)
            self.pre_global_sum = pre_global_sum
            return pre_global_sum

    # global_rank_id = sum[pp_rank_i] + dp_rank * tp_size[i] + tp_rank
    def get_dp_groups(self):
        pre_global_sum = self._get_pre_global_sum()

        ranks = []
        for pp_rank in range(self.pipeline_model_parallel_size):
            for tp_rank in range(self.tp_splits[pp_rank]):
                rank = []
                for dp_rank in range(self.data_parallel_size):
                    rank.append(pre_global_sum[pp_rank] + dp_rank * self.tp_splits[pp_rank] + tp_rank) 
                ranks.append(rank)
        return ranks
    
    def get_tp_groups(self):
        pre_global_sum = self._get_pre_global_sum()

        ranks = []
        for pp_rank in range(self.pipeline_model_parallel_size):
            for dp_rank in range(self.data_parallel_size):
                rank = []
                for tp_rank in range(self.tp_splits[pp_rank]):
                    rank.append(pre_global_sum[pp_rank] + dp_rank * self.tp_splits[pp_rank] + tp_rank)
                ranks.append(rank)
        return ranks

    def get_pp_groups(self):
        pre_global_sum = self._get_pre_global_sum()
        ranks = []
        max_tp_rank = max(self.tp_splits)
        for dp_rank in range(self.data_parallel_size):
            for tp_rank in range(max_tp_rank):
                rank = []
                for pp_rank in range(self.pipeline_model_parallel_size):
                    # tp 不均切，用 % 负载均衡
                    tp_rank_real = tp_rank % self.tp_splits[pp_rank]
                    rank.append(pre_global_sum[pp_rank] + dp_rank * self.tp_splits[pp_rank] + tp_rank_real)
                ranks.append(rank)
        
        # ranks = [rank for rank in ranks if len(rank) == self.pipeline_model_parallel_size]
        return ranks
    
    def get_forward_pp_rank(self):
        def dfs(now: int, next_ranks_map: Dict[int, List[int]], rank: List[int]):
            # print(now)

            if next_ranks_map[now] == []:
                ranks.append(rank.copy())
                # print("ranks is ------", ranks, "rank is ", rank)
                return
            
            for next_id in next_ranks_map[now]:
                rank.append(next_id)
                # print("next_rank is ", next_id, "now rank is", rank)
                dfs(now=next_id, next_ranks_map=next_ranks_map, rank=rank)
                rank.pop()

        pre_global_sum = self._get_pre_global_sum()
        ranks = []

        for dp_rank in range(self.data_parallel_size):
            next_ranks_map: Dict[int, List[int]] = {}
            for pp_rank in range(self.pipeline_model_parallel_size):
                if pp_rank == self.pipeline_model_parallel_size-1:
                    for tp_rank in range(self.tp_splits[pp_rank]):
                        rank_id = pre_global_sum[pp_rank] + dp_rank * self.tp_splits[pp_rank] + tp_rank
                        next_ranks_map[rank_id] = [] 
                else:
                    for tp_rank in range(self.tp_splits[pp_rank]):
                        next_rank_ids = [id for id in range(tp_rank, self.tp_splits[pp_rank + 1], self.tp_splits[pp_rank])]
                        rank_id = pre_global_sum[pp_rank] + dp_rank * self.tp_splits[pp_rank] + tp_rank
                        next_ranks_map[rank_id] = []
                        for next_rank_id in next_rank_ids:
                            next_ranks_map[rank_id].append(pre_global_sum[pp_rank + 1] + dp_rank * self.tp_splits[pp_rank + 1] + next_rank_id)
                        
            # for key, val in next_ranks_map.items():
            #     print(key, " ", val)

            # 从第一个stage开始dfs
            for tp_rank in range(self.tp_splits[0]):
                root = pre_global_sum[0] + dp_rank * self.tp_splits[0] + tp_rank
                # print("root = ", root)
                dfs(now=root,next_ranks_map=next_ranks_map, rank = [root])
        
        groups = [rank.copy() for rank in ranks]
        print(groups)
        for rank in ranks:
            for i in range(self.pipeline_model_parallel_size - len(rank)):
                rank.append(-1)    
        # print(ranks)
        return groups, ranks

    def get_backword_pp_rank(self):

        def dfs(now: int, pre_ranks_map: List[int], rank: List[int]):
            if pre_ranks_map[now] == []:
                ranks.append(rank.copy())
                return

            for pre_id in pre_ranks_map[now]:
                rank.append(pre_id)
                dfs(now=pre_id, pre_ranks_map=pre_ranks_map, rank=rank)
                rank.pop()

        pre_global_sum = self._get_pre_global_sum()
        ranks = []

        for dp_rank in range(self.data_parallel_size):
            pre_ranks_map: Dict[int, List[int]] = {}
            for pp_rank in range(self.pipeline_model_parallel_size):
                if pp_rank == 0:
                    for tp_rank in range(self.tp_splits[pp_rank]):
                        rank_id = pre_global_sum[pp_rank] + dp_rank * self.tp_splits[pp_rank] + tp_rank
                        pre_ranks_map[rank_id] = []
                else:
                    for tp_rank in range(self.tp_splits[pp_rank]):
                        pre_rank_ids = [id for id in range(tp_rank, self.tp_splits[pp_rank - 1], self.tp_splits[pp_rank])]
                        rank_id = pre_global_sum[pp_rank] + dp_rank * self.tp_splits[pp_rank] + tp_rank
                        pre_ranks_map[rank_id] = []
                        for pre_rank_id in pre_rank_ids:
                            pre_ranks_map[rank_id].append(pre_global_sum[pp_rank - 1] + dp_rank * self.tp_splits[pp_rank - 1] + pre_rank_id)
                
            # for key, val in pre_ranks_map.items():
            #     print(key, " ", val)

            for tp_rank in range(self.tp_splits[self.pipeline_model_parallel_size-1]):
                root = pre_global_sum[self.pipeline_model_parallel_size-1] + dp_rank * self.tp_splits[self.pipeline_model_parallel_size-1] + tp_rank
                dfs(now=root, pre_ranks_map=pre_ranks_map, rank=[root])

        groups = [rank.copy()[::-1] for rank in ranks]   
        for rank in ranks:
            for i in range(self.pipeline_model_parallel_size - len(rank)):
                rank.append(-1)
            rank.reverse()   
        # print(ranks)
        return groups, ranks


                    
    
    def get_tp_pp_groups(self):
        pre_global_sum = self._get_pre_global_sum()

        ranks = []
        for dp_rank in range(self.data_parallel_size):
            rank = []
            for pp_rank in range(self.pipeline_model_parallel_size):
                for tp_rank in range(self.tp_splits[pp_rank]):
                    rank.append(pre_global_sum[pp_rank] + dp_rank * self.tp_splits[pp_rank] + tp_rank)
            ranks.append(rank)
        return ranks
    
    def get_tp_dp_groups(self):
        pre_global_sum = self._get_pre_global_sum()

        ranks = []
        for pp_rank in range(self.pipeline_model_parallel_size):
            rank = []
            for dp_rank in range(self.data_parallel_size):
                for tp_rank in range(self.tp_splits[pp_rank]):
                    rank.append(pre_global_sum[pp_rank] + dp_rank * self.tp_splits[pp_rank] + tp_rank)
            ranks.append(rank)
        return ranks
    
    def get_cp_groups(self):
        world_size = sum(self.tp_splits) * self.policy[2]
        ranks = []
        for i in range(world_size):
            rank = []
            rank.append(i)
            ranks.append(rank)
        return ranks

    def get_ep_groups(self):
        pre_global_sum = self._get_pre_global_sum()
        ranks = []

        for pp_rank in range(self.pipeline_model_parallel_size):
            for dp_indepdent_rank in range(self.data_parallel_size // self.expert_parallel_size):
                for tp_rank in range(self.tp_splits[pp_rank]):
                    begin_rank = pre_global_sum[pp_rank] + dp_indepdent_rank * self.expert_parallel_size * self.tp_splits[pp_rank] + tp_rank
                    end_rank = pre_global_sum[pp_rank] + (dp_indepdent_rank + 1) * self.expert_parallel_size * self.tp_splits[pp_rank] + tp_rank
                    # print(f'begin_rank = {begin_rank}, end_rank = {end_rank}')
                    rank = [rank_id for rank_id in range(begin_rank, end_rank, self.tp_splits[pp_rank])]
                    ranks.append(rank)
        return ranks

    def get_tp_ep_groups(self):
        pre_global_sum = self._get_pre_global_sum()

        ranks = []
        for pp_rank in range(self.pipeline_model_parallel_size):
            for dp_indepent_rank in range(self.data_parallel_size // self.expert_parallel_size):
                # for tp_rank in range(self.tp_splits[pp_rank]):
                begin_rank = pre_global_sum[pp_rank] + dp_indepent_rank * self.expert_parallel_size * self.tp_splits[pp_rank]
                end_rank = pre_global_sum[pp_rank] + (dp_indepent_rank + 1) * self.expert_parallel_size * self.tp_splits[pp_rank]
                # print(f'begin_rank = {begin_rank}, end_rank = {end_rank}')
                rank = [rank_id for rank_id in range(begin_rank, end_rank)]
                ranks.append(rank)

        return ranks
    
    def get_pp_tp_ep_groups(self):
        pre_global_sum = self._get_pre_global_sum()

        ranks = []
        for dp_indepent_rank in range(self.data_parallel_size // self.expert_parallel_size):
            rank = []
            for pp_rank in range(self.pipeline_model_parallel_size):
                begin_rank = pre_global_sum[pp_rank] + dp_indepent_rank * self.expert_parallel_size * self.tp_splits[pp_rank]
                end_rank = pre_global_sum[pp_rank] + (dp_indepent_rank + 1) * self.expert_parallel_size * self.tp_splits[pp_rank]
                # print(f'begin_rank = {begin_rank}, end_rank = {end_rank}')
                for rank_id in range(begin_rank, end_rank): 
                    rank.append(rank_id)

            ranks.append(rank)

        return ranks

    def get_dp_independent_groups(self):
        pre_global_sum = self._get_pre_global_sum()

        ranks = []
        for pp_rank in range(self.pipeline_model_parallel_size):
            for  ep_rank in range(self.expert_parallel_size):
                for tp_rank in range(self.tp_splits[pp_rank]):
                    begin_rank = pre_global_sum[pp_rank] + ep_rank * self.tp_splits[pp_rank] + tp_rank
                    end_rank = pre_global_sum[pp_rank] + (self.data_parallel_size) * self.tp_splits[pp_rank] + tp_rank
                    # print(f'begin_rank = {begin_rank}, end_rank = {end_rank}')
                    ranks.append([rank_id for rank_id in range(begin_rank, end_rank, self.tp_splits[pp_rank] * self.expert_parallel_size)])
        
        return ranks
    
    def get_tp_size_by_rank(self, rank) -> int:
        all_ranks = self.get_tp_groups()
        for ranks in all_ranks:
            if rank in ranks:
                return len(ranks)

def get_global_plan_param():
    if GLOBAL_PLAN is None:
        return None

    return deepcopy(GLOBAL_PLAN)

def get_stratege_by_type(strategy_type: str) -> AbstractStrategy:
    if strategy_type == 'mcmc':
        return MCMCStrategy()
    else:
        return None

def getMegatronModelExcutePlan(model_name: str, world_size: int, layer_num: int, layer_info: Dict[str, Dict[str, object]], num_exports: int = 0, strategy_type: str = 'mcmc'):
    # pprint.pprint(f'layer_info is{layer_info}')
    print(f'world_size is {world_size}, layer_num = {layer_num}, model_name = {model_name}, num_exports= {num_exports}')
    
    stratege = get_stratege_by_type(strategy_type)
    assert stratege != None,"搜索策略不支持"

    context = AutoParallelContext(stratege)
    best_stratge, best_layers_pp_stratege, best_layers_tp_stratege = context.execute_auto_parallel(model_name=model_name, world_size=world_size, layer_num=layer_num, layer_info=layer_info, num_exports=num_exports)
    
    assert best_layers_tp_stratege != [],"gpu数量不够"

    print(f'pp_layers_per_partition is {best_layers_pp_stratege}, tp_splits is {best_layers_tp_stratege}')

    global GLOBAL_PLAN
    GLOBAL_PLAN = ExcutePlan(
        policy=best_stratge,
        pp_layers_per_partition=best_layers_pp_stratege,
        tp_splits=best_layers_tp_stratege,
        ep_size=best_stratge[3] if model_name in ['mixtral'] else 1,
    )

    return GLOBAL_PLAN

    # return ExcutePlan(
    #     policy=[5,4,1],
    #     # 能部分分到6层
    #     pp_layers_per_partition=[3,6,3,3,3],
    #     # 每部分都切4份
    #     tp_splits = [2, 2, 4, 2, 4]
    # )

    # return ExcutePlan(
    #     policy=[3,2,1],
    #     # 能部分分到6层
    #     pp_layers_per_partition=[6,3,3],
    #     # 每部分都切4份
    #     tp_splits = [4,2,2],
    #     ep_size=1
    # )

    # return ExcutePlan(
    #     policy=[2,4,2],
    #     # 能部分分到6层
    #     pp_layers_per_partition=[6,6],
    #     # 每部分都切4份
    #     tp_splits = [2,2]
    # )

def printMegatronTrace(later_info: Dict[str, Dict[str, object]]):
    for layer_name, megatron_node in later_info.items():
        print(f'layer_name: {layer_name}')
        for key, value in megatron_node.items():
            if key == 'input': print("input : ", value)
            elif key == 'output': print("output : ", value)
            elif key == 'input_shape': print("input_shape : ", value)
            elif key == 'output_shape' : print("out_shape : ", value)
            elif key == 'memory_allocated': print("memory_allocated : ", value)
            elif key == 'max_memory_allocated': print("max_memory_allocated : ", value)
            elif key == 'tokens_per_expert': print("tokens_per_expert", value)
            elif key == 'weight_shape': print("weight_shape : ", value)
            elif key == 'init_memory_allocated': print("init_memory_allocated : ", value)

if __name__ == '__main__':

    plan =  ExcutePlan(
        policy=[4,1,1],
        # 能部分分到6层
        pp_layers_per_partition=[2,2,2,2],
        # 每部分都切4份
        tp_splits = [1,1,1,1],
        ep_size=1
    )

    # print(plan.get_ep_groups())
    # print(plan.get_tp_ep_groups())
    # print(plan.get_pp_tp_ep_groups())
    # print(plan.get_dp_independent_groups())
    print(plan.get_forward_pp_rank())
    # print(plan.get_backword_pp_rank())
    # print(plan.get_tp_groups())
    # print(plan.get_dp_groups())
    # print(plan.get_tp_pp_groups())
    # res = global_sizes('mixtral', 8, 8, order = 'pp-tp-dp')
    # print(res)