
import json
import time
import math

import torch

from geesibling.adapters.pytorch.auto_parallel import set_kv_store
from geesibling.adapters.pytorch.auto_parallel.auto_parallel_profiling import DistributedPerformanceProfiler
from geesibling.adapters.pytorch.pipeline.pipeline.set_args import get_args

# from mindspeed.core.auto_parallel.auto_parallel_optimizer import SearchByGreyBox
# from mindspeed.core.auto_parallel.auto_parallel_memory import MemoryCostModel
# from mindspeed.core.auto_parallel.auto_parallel_profiling import (
#     DistributedMemoryProfiler,
#     DistributedOperateProfiler,
#     DistributedPerformanceProfiler
# )


# def filter_unvalid_configs(search_spaces):
#     def filter_unvalid_configs(search_space):
#         if not search_space:
#             return []
#
#         # # 找到整个搜索空间中tp和dp的最大值
#         # max_tp = max(config[1] for config in search_space)
#         # max_dp = max(config[2] for config in search_space)
#         #
#         # # 过滤掉tp或dp等于最大值的配置
#         # filtered_space = [
#         #     config for config in search_space
#         #     if config[1] != max_tp and config[2] != max_dp
#         # ]
#         #
#         # return filtered_space
#
#         # 找到整个搜索空间中tp和dp的最大值
#         max_tp = max(config[1] for config in search_space)
#         max_dp = max(config[2] for config in search_space)
#
#         # 过滤掉tp或dp等于最大值的配置
#         filtered_space = [
#             config for config in search_space
#             if config[1] != max_tp and config[2] != max_dp
#         ]
#     return search_spaces
def filter_unvalid_configs(search_space):
    if not search_space:
        return []

    # # 1. 过滤最大pp
    # # unique_pp = sorted({cfg[0] for cfg in search_space}, reverse=True)
    # # top_pp = unique_pp[:1]
    # # filtered_pp = [cfg for cfg in search_space if cfg[0] not in top_pp]
    #
    # # 2. 基于pp过滤后的结果，过滤最大tp
    # unique_tp = sorted({cfg[1] for cfg in search_space}, reverse=True)
    # top_tp = unique_tp[:1]
    # filtered_tp = [cfg for cfg in search_space if cfg[1] not in top_tp]
    #
    # # 3. 基于tp过滤后的结果，过滤最大dp
    # unique_dp = sorted({cfg[2] for cfg in filtered_tp}, reverse=True)
    # top_dp = unique_dp[:1]
    # filtered_dp = [cfg for cfg in filtered_tp if cfg[2] not in top_dp]

    return search_space

    # memory_model = MemoryCostModel()
    # fitting_configs = memory_model.get_fitting_configurations(search_spaces)        # get_fitting_configurations从搜索空间中筛选数值稳定的配置组合，用于后续内存分析
    # for config in fitting_configs:
    #     mem = DistributedMemoryProfiler().launch(config)    # # 实际运行测量内存
    #     if not math.isinf(mem):
    #         memory_model.profiled_configs.append(config)
    #         memory_model.profiled_configs_memory.append(mem)
    #
    # print(f"profiled_configs: {memory_model.profiled_configs}")
    # print(f"profiled_configs_mem: {memory_model.profiled_configs_memory}")
    #
    #
    # memory_model.fit_model()        # 构建线性回归模型，建立配置参数与内存的关系
    # valid_configs, valid_configs_memory = [], []
    # for config in search_spaces:
    #     cost_memory = memory_model.get_peak_memory(config)
    #     if not memory_model.is_oom(cost_memory):
    #         valid_configs.append(config)
    #         valid_configs_memory.append(cost_memory)
    # return valid_configs


def build_initial_spaces(args):
    world_size = args.nproc_per_node * args.nnodes
    device_count = args.nproc_per_node

    # TODO args.num_layers接收
    args.num_layers = 16

    solutions = []
    for pp in range(1, world_size + 1):
        # 检查 pp 是否满足条件
        if world_size % pp != 0 or args.num_layers % pp != 0:
            continue

        for i in range(device_count):
            tp = 2 ** i
            # 检查 tp 是否超过设备数或 world_size//pp
            if tp > device_count or tp > (world_size // pp):
                break
            # # 检查注意力头和查询组是否适配 tp
            # if (args.num_query_groups > 1 and args.num_query_groups % tp != 0) \
            #         or (args.num_attention_heads % tp != 0):
            #     break

            # 计算 dp 并检查全局批次大小是否适配
            dp = world_size // (pp * tp)
            if world_size % (pp * tp) != 0 or args.global_batch_size % dp != 0:
                continue

            # 遍历可能的 micro-batch 数目
            dp_group_batch_size = args.global_batch_size // dp
            for num_mb in range(1, dp_group_batch_size + 1):
                if dp_group_batch_size % num_mb != 0:
                    continue
                mbs = dp_group_batch_size // num_mb
                solutions.append([pp, tp, dp, mbs])

    return solutions


# def build_initial_spaces(args):
#     world_size = args.nproc_per_node * args.nnodes
#     device_count = args.nproc_per_node
#
#     solutions = []
#     for pp in range(1, world_size + 1):
#         if world_size % pp != 0 or args.num_layers % pp != 0:
#             continue
#
#         for i in range(device_count):
#             tp = 2 ** i
#             if tp > device_count or tp > (world_size // pp):
#                 break
#             if (args.num_query_groups > 1 and args.num_query_groups % tp != 0) \
#                 or (args.num_attention_heads % tp != 0):
#                 break
#
#             max_cp_size = world_size // (pp * tp)
#             for cp_size in range(1, max_cp_size + 1):
#                 if world_size % (pp * tp * cp_size) != 0 or \
#                         args.global_batch_size % (world_size // (pp * tp * cp_size)) != 0:
#                     continue
#
#                 for up in range(1, cp_size + 1):
#                     if cp_size % up != 0:
#                         continue
#                     cp = cp_size // up
#                     head, remainder = divmod(args.num_attention_heads, up * tp)
#                     if (head < 1 or remainder != 0) or (args.seq_length % (2 * cp) != 0):
#                         continue
#
#                     dp = world_size // (pp * tp * cp_size)
#                     dp_group_batch_size = args.global_batch_size // dp
#                     for num_mb in range(1, dp_group_batch_size + 1):
#                         if dp_group_batch_size % num_mb != 0:
#                             continue
#                         mbs = dp_group_batch_size // num_mb
#                         solutions.append([pp, tp, dp, cp, up, mbs])
#     return solutions


def monitor_train_task():
    while True:
        message = torch.tensor([0 for _ in range(5)], dtype=torch.int)  # [pp, tp, dp, mbs,task_type]
        torch.distributed.broadcast(message, 0)
        task_type = message[-1].item()
        config = [m.item() for m in message[:-1]]
        if task_type == -1:
            break
        elif task_type == 2:
            DistributedPerformanceProfiler().launch(config)


def export_results(config):
    results = {}
    results['optimal_parallel_strategy'] = {}
    results['optimal_parallel_strategy']['pipeline-model-parallel-size'] = config[0]
    results['optimal_parallel_strategy']['tensor-model-parallel-size'] = config[1]
    results['optimal_parallel_strategy']['data-parallel-size'] = config[2]
    results['optimal_parallel_strategy']['micro-batch-size'] = config[3]
    return json.dumps(results)



def search_optimal_configuration(args):
    set_kv_store(args)

    init_method = 'tcp://{}:{}'.format(args.master_addr, int(args.master_port) + 1)
    torch.distributed.init_process_group(
        backend=torch.distributed.Backend.GLOO,
        init_method=init_method,
        rank=args.node_rank,
        world_size=args.nnodes
    )

    if args.node_rank == 0:
        start_time = time.time()
        search_space = build_initial_spaces(args)
        search_space = filter_unvalid_configs(search_space)
        print(f"filter search_space: {len(search_space)}")
        print("\n".join(str(item) for item in search_space), flush=True)

        # # config, _ = SearchByGreyBox().search(get_args(), search_space)
        # # 模拟一个/多个搜索过程种的Lauch过程
        # config = [2,2,2,2]      # [pp,tp,dp,mbs]  global-batch-size = dp * mbs * num_of_mb
        # duration_time = DistributedPerformanceProfiler().launch(config)
        # print(f"duration_time: {duration_time}")
        # print(f"duration_time: {duration_time}")
        # print("正确开启Launch过程")
        # config2 = [4,2,1,2]
        # duration_time = DistributedPerformanceProfiler().launch(config2)
        # print(f"duration_time: {duration_time}")
        # print(f"duration_time: {duration_time}")

        # 存储有效配置及其耗时 {duration: config}
        valid_configs = {}

        # 遍历所有候选配置
        for config in search_space:
            # 运行性能测试
            duration = DistributedPerformanceProfiler().launch(config)
            print(f"Config {config} duration: {duration}", flush=True)

            # 只记录有效耗时（非无穷大）
            if duration != float('inf'):
                # 使用耗时作为key，自动覆盖相同耗时的配置
                valid_configs[duration] = config

        # 选择最优配置
        if valid_configs:
            min_duration = min(valid_configs.keys())
            best_config = valid_configs[min_duration]
        else:
            best_config = None
            min_duration = float('inf')

        print("best config:", best_config, flush=True)
        # TODO 根据搜索出来的config，计算num_mb。
        #  再启动一个正常的Lauch
        torch.distributed.broadcast(torch.tensor([-1 for _ in range(7)], dtype=torch.int), 0)   # TODO 让monitor_train_task监听结束

        # results = export_results(config)
        print(f"find optimal configuration: {1}, cost_time: {time.time() - start_time}")
    else:
        monitor_train_task()
