import json
import sys
from typing import Optional, Sequence, Tuple, Dict
import numpy as np
from get_model.model_computation_generator_by_layer import ModelGeneratorByLayer
from get_model.parse_hardware_config import parse_hardware_config
from get_model.parse_model_config import parse_yaml_modules
from solver.intra_solver_by_ILP.intra_solver_layer_computation import solve_without_all_gather, test_model_stage_solve, \
    test_model_stage_solve_dp
import logging
import csv
import os

import sys
import io
from typing import List, Tuple

model_config_path = "config/model_config_qwen2.yaml"
hardware_config_path = "config/hardware_config.yaml"

log_path="result/compute_cost_log.txt"

strategy_output_file = "experiment/system_output.txt"
complex_strategy_json_file="experiment/complex_strategy.json"

hardware_config, device_mesh = parse_hardware_config(hardware_config_path)
training_param, module_list, module_length = parse_yaml_modules(model_config_path)
_DISABLE_NUMBA = False
logger = logging.getLogger(__name__)

def maybe_numba_jit(func):
    """Decorator to mark a function as numba jitted if numba is available."""
    try:
        from numba import jit  # pylint: disable=import-outside-toplevel
        jitted_func = jit(nopython=True)(func)

        def wrapper(*args, **kwargs):
            if _DISABLE_NUMBA:
                return func(*args, **kwargs)
            return jitted_func(*args, **kwargs)

        return wrapper
    except ImportError:
        logger.warning("Install numba to jit and accelerate the function.")
        return func


import re
import json


def parse_strategy_output(output_text):
    """
    解析策略输出文本，生成JSON格式的并行配置
    """
    strategy_data = {"stages": []}
    current_stage = None

    # 按行处理输出文本
    for line in output_text.split('\n'):
        line = line.strip()

        # 解析阶段信息
        if line.startswith("Stage Id:"):
            current_stage = {
                "stage_id": int(line.split(':')[1].strip()),
                "strategies": []
            }
        elif line.startswith("Layers:") and current_stage:
            layers = line.split(':')[1].strip().split(' to ')
            current_stage["layer_range"] = [int(layers[0]), int(layers[1]) + 1]
        elif line.startswith("Submesh Shape:") and current_stage:
            shape_str = line.split(':')[1].strip()
            if shape_str.startswith('(') and shape_str.endswith(')'):
                shape = tuple(map(int, shape_str[1:-1].split(',')))
                current_stage["submesh_shape"] = list(shape)

        # 解析策略信息
        elif "IntraParallelStrategy" in line and current_stage:
            dp = int(re.search(r"DP=(\d+)", line).group(1))
            tp = int(re.search(r"TP=(\d+)", line).group(1))
            current_stage["strategies"].append({"dp": dp, "tp": tp})

        # 阶段结束处理
        elif line.startswith("=====") and current_stage:
            if "strategies" in current_stage:
                strategy_data["stages"].append(current_stage)
                current_stage = None

    return strategy_data


def save_strategy_json(strategy_output_file, complex_strategy_json_file):
    """
    将策略输出文本保存为JSON文件
    """
    strategy_config = parse_strategy_output(strategy_output_file)

    with open(complex_strategy_json_file, 'w') as f:
        json.dump(strategy_config, f, indent=2)

    print(f"策略配置已保存为 {complex_strategy_json_file}")

def capture_print_output(func, *args, **kwargs):
    """捕获函数的print输出并返回字符串"""
    old_stdout = sys.stdout
    new_stdout = io.StringIO()
    sys.stdout = new_stdout

    # 执行函数
    func(*args, **kwargs)

    # 恢复标准输出
    sys.stdout = old_stdout
    return new_stdout.getvalue()

def training_dp(num_modules,
                num_devices,
                num_microbatches,
                submesh_choices,
                compute_cost,
                max_n_succ_stages):
    """Auto stage dynamic programming."""
    # timers("stage-construction-dp").start()

    all_possible_stage_costs = np.sort(np.unique(compute_cost))
    best_cost = np.inf  # 初始化优化目标为无穷大
    best_solution = None
    last_max_stage_cost = 0.0  # 记录上一次使用的最大阶段成本

    gap = 1e-6
    # 检查 all_possible_stage_costs 非空
    assert len(all_possible_stage_costs), "no solution in auto stage construction."

    # 论文中枚举并固定 t_max 的操作
    for max_stage_cost in all_possible_stage_costs:
        # 论文中 early pruning 的两个操作
        if max_stage_cost * num_microbatches >= best_cost:
            break
        if max_stage_cost - last_max_stage_cost < gap:
            continue
        cost, solution = training_dp_impl(num_modules,
                                          num_devices,
                                          num_microbatches,
                                          submesh_choices,
                                          compute_cost,
                                          max_n_succ_stages,
                                          max_stage_cost)
        if cost < best_cost:
            best_cost = cost
            best_solution = solution
        last_max_stage_cost = max_stage_cost

    # print("best solution is : ", best_solution) # [((0, 2), (4, 8), 1), ((2, 9), (1, 8), 1), ((9, 16), (1, 8), 1), ((16, 18), (1, 8), 1), ((18, 21), (1, 8), 1)]
    # for stage_id, ((current_module, next_start_module), submesh_shape, _) in enumerate(best_solution):
    #     print(f"  Stage Id: {stage_id}")
    #     print(f"  Layers: {current_module} to {next_start_module-1}")
    #     print(f"  Submesh Shape: {submesh_shape}")
    #     test_model_stage_solve_dp(current_module, next_start_module, submesh_shape)

    """保存完整的策略输出到文件"""

    with open(strategy_output_file, 'w') as f:
        # 保存best_solution信息
        f.write(f"best solution is : {best_solution}\n")

        for stage_id, ((current_module, next_start_module), submesh_shape, _) in enumerate(best_solution):
            # 保存阶段基本信息
            f.write(f"  Stage Id: {stage_id}\n")
            f.write(f"  Layers: {current_module} to {next_start_module-1}\n")
            f.write(f"  Submesh Shape: {submesh_shape}\n")

            # 捕获test_model_stage_solve_dp的输出
            stage_output = capture_print_output(
                test_model_stage_solve_dp,
                current_module,
                next_start_module,
                submesh_shape
            )
            f.write(stage_output)

    print(f"策略输出已完整保存至 {strategy_output_file}")
    save_strategy_json(strategy_output_file, complex_strategy_json_file)

    return best_cost, best_solution

@maybe_numba_jit
def training_dp_impl(num_modules,
                     num_devices,
                     num_microbatches,
                     submesh_choices,
                     compute_cost,
                     max_n_succ_stages,
                     max_stage_cost):
    """The core implementation of the DP algorithm."""
    # 初始化为无穷大
    f = np.full((num_modules + 1, num_modules + 1, num_devices + 1),
                np.inf,
                dtype=np.float32)
    # 每个stage对应的t_max
    f_stage_max = np.full((num_modules + 1, num_modules + 1, num_devices + 1),
                          0.0,
                          dtype=np.float32)
    # f_argmin 用于存储使得 f 取得最小值的索引，用于回溯最优路径。
    # 它的每个元素是一个三元组 (k, m, n_config)，表示下一个开始层、子网格选择和自动分片配置
    f_argmin = np.full(
        (num_modules + 1,
         num_modules + 1,
         num_devices + 1,
         3),
        -1,
        dtype=np.int32)

    # 初始化dp数组
    # f(s, k, d; t_max)
    f[0, num_modules, 0] = 0

    for s in range(1, num_modules + 1):
        for i in range(num_modules - 1, -1, -1):
            for d in range(1, num_devices + 1):
                for k in range(num_modules, i, -1):
                    if i >= k:
                        continue  # 避免非法区间
                    for m, submesh in enumerate(submesh_choices):
                        n_submesh_devices = np.prod(np.array(submesh))
                        if n_submesh_devices <= d:
                            for n_config in range(1):
                                stage_cost = compute_cost[i, k - 1, m, n_config]

                                if stage_cost < 0 or np.isinf(stage_cost):
                                    continue  # 无效 cost 跳过

                                if s - 1 <= max_n_succ_stages[i, k - 1, m, n_config]:
                                    new_cost = f[s - 1, k, d - n_submesh_devices] + stage_cost
                                    # 条件满足，更新最小总成本
                                    if stage_cost <= max_stage_cost and new_cost < f[s, i, d]:
                                        f[s, i, d] = new_cost
                                        # 更新当前阶段的最大计算成本，以便后续计算总成本
                                        f_stage_max[s, i, d] = max(float(f_stage_max[s - 1, k, d - n_submesh_devices]),
                                                                   stage_cost)
                                        # 记录用于回溯最优路径的索引（包括下一个开始层、子网格选择和分片配置）
                                        f_argmin[s, i, d] = (k, m, n_config)

    # 遍历所有阶段，找到成本最低的 best_s 和最优总成本 best_total_cost
    best_s = -1
    best_total_cost = np.inf
    for s in range(1, num_modules + 1):
        if f[s, 0, num_devices] < best_total_cost:
            best_s = s
            best_total_cost = f[s, 0, num_devices]
    # 如果最优成本仍然是无穷大，说明没有找到可行的方案，返回无穷大和 None
    if np.isinf(best_total_cost):
        return np.inf, None

    # 计算最终总成本：考虑流水线延迟，即最大阶段成本乘以 num_microbatches - 1
    total_cost = f[best_s, 0, num_devices] + (num_microbatches - 1) * f_stage_max[best_s, 0, num_devices]

    current_s = best_s
    current_module = 0
    current_devices = num_devices



    # 通过 f_argmin 中记录的索引值找到下一个阶段的开始层、子网格选择和自动分片配置
    res = []
    while current_s > 0 and current_module < num_modules and current_devices > 0:
        next_start_module, submesh_choice, num_config = (
            f_argmin[current_s, current_module, current_devices])
        assert next_start_module != -1 and current_devices != -1
        res.append(
            ((int(current_module), int(next_start_module)), submesh_choices[submesh_choice],
             1))
        current_s -= 1
        current_module = next_start_module
        current_devices -= np.prod(np.array(submesh_choices[submesh_choice]))
    assert (current_s == 0 and current_module == num_modules and
            current_devices == 0)

    return total_cost, res

def get_submesh_choices(num_hosts: int, num_devices_per_host: int, space: str, manually_specified_submeshes: Optional[Sequence[Tuple[int,int]]] = None):
    """Gets the valid choices of submesh shapes."""
    # if global_config.overwrite_submesh_choices is not None:
    #     return global_config.overwrite_submesh_choices
    submesh_choices = []

    # smaller submeshes:
    i = 1
    while i <= num_devices_per_host:
        submesh_choices.append((1, i))
        i *= 2
    assert submesh_choices[-1][1] == num_devices_per_host, (
        "Only supports the cases where num_devices_per_host is power of two, "
        f"while now num_devices_per_host = {num_devices_per_host}")

    # larger meshes:
    if space == "all":
        for i in range(2, num_hosts + 1):
            submesh_choices.append((i, num_devices_per_host))
    elif space == "power_of_two":
        i = 2
        while i <= num_hosts:
            submesh_choices.append((i, num_devices_per_host))
            i *= 2
    elif space == "small_power_of_two":
        i = 2
        while i <= min(num_hosts, 4):
            submesh_choices.append((i, num_devices_per_host))
            i *= 2
    elif space == "manual":
        submesh_choices = manually_specified_submeshes
    else:
        raise ValueError(f"Invalid submesh space: {space}")

    return tuple(submesh_choices)

# def get_layer_ILP_result(cur_module_length):
#     """计算指定层数的 ILP 最优解"""
#     print(f"当前模块层数: {cur_module_length}")
#
#     modelGeneratorByLayer = ModelGeneratorByLayer()
#     computation = modelGeneratorByLayer.computation_graph(cur_module_length)
#
#     # 获取所有设备子网格
#     mesh_shape = [8, 8]
#     num_hosts, num_devices_per_host = mesh_shape
#     submesh_choices = get_submesh_choices(num_hosts, num_devices_per_host, "all")
#     print(submesh_choices)
#     submesh_shape = submesh_choices[3]
#
#     print(f"子网划分方案: {submesh_shape}")
#     objective = solve_without_all_gather(computation, submesh_shape)
#
#     print(f"ILP 计算结果: {objective}")
#     return objective

# def get_compute_costs(num_modules, num_submesh_choices):
#     """计算所有层级组合的计算成本"""
#     compute_cost = np.full((num_modules, num_modules, num_submesh_choices, 1), 1000000)
#
#     count = 0  # 记录 ILP 计算次数
#     print(f"模块总数: {num_modules}")
#
#     for i in range(num_modules):  # 遍历所有起始层
#         for k in range(i, num_modules):  # 遍历所有终止层
#             for m in range(num_submesh_choices):  # 遍历所有子网划分方案
#                 for n in range(1):  # 可能是并行搜索空间的索引
#                     compute_cost[i, k, m, n] = get_layer_ILP_result(k - i + 1)
#                     count += 1
#                     print(f"ILP 计算次数: {count}, 起始层: {i + 1}, 终止层: {k + 1}, 子网划分方案: {m}, ILP 结果: {compute_cost[i, k, m, n]}")
#
#     # print("最终计算成本矩阵:", compute_cost)
#     return compute_cost

def get_layer_ILP_result(start_layer, end_layer, submesh_shape):
    """
    给定层范围 [start_layer, end_layer) 和子网格形状，返回该子模块的 ILP 最优目标值
    """
    model_generator = ModelGeneratorByLayer()
    computation = model_generator.computation_stage_graph(start=start_layer, end=end_layer)

    objective = solve_without_all_gather(computation, submesh_shape)
    return objective

# 我发现这个compute_cost矩阵存在一些问题，有一些start_layer, end_layer, submesh_shape组合得到的结果是-1 请给我可能的原因，并且帮我优化一下省略不必要的组合下的计算

def get_compute_costs(num_modules, submesh_choices, intra_op_result_path=log_path):
    """
    构建 compute_cost 并将每次 ILP 计算结果记录到 TXT 文件。
    每行格式: [i,k] submesh=..., cost=...
    """
    num_submesh_choices = len(submesh_choices)
    compute_cost = np.full((num_modules, num_modules, num_submesh_choices, 1), -1 , dtype=np.float32)

    # 清空并打开 log 文件
    with open(intra_op_result_path, "w") as f:
        count = 0
        for i in range(num_modules):         # 起始层
            for k in range(i, num_modules):  # 终止层
                if i > k:
                    f.write(f"[Skip] Invalid layer range: Layer {i} > {k}\n")
                    continue  # 跳过非法区间

                for m, submesh_shape in enumerate(submesh_choices):
                    try:
                        objective = get_layer_ILP_result(i, k, submesh_shape)
                    except Exception as e:
                        objective = 1e9
                        f.write(f"[Error] ILP失败: Layer {i}~{k}, submesh={submesh_shape}, error={e}\n")

                    compute_cost[i, k, m, 0] = objective
                    f.write(f"[{count:03}] Layer {i}~{k}, Submesh={submesh_shape}, Cost={objective:.2f}\n")

                    count += 1

    return compute_cost



def inter_solver():
    """执行 stage construction 计算"""
    num_modules = module_length
    num_hosts = hardware_config["num_nodes"]
    num_devices_per_host = hardware_config["num_gpus_per_node"]
    num_devices = hardware_config["total_gpus"]
    num_micro_batches = 16

    submesh_choices = get_submesh_choices(num_hosts, num_devices_per_host, "power_of_two")
    num_submesh_choices = len(submesh_choices)

    np.random.seed(42)

    # 计算 ILP 计算成本
    compute_cost = get_compute_costs(num_modules, submesh_choices)
    print("compute_cost = ", compute_cost)

    # 设置 stage 内的最大连续层数
    max_n_succ_stages = np.full((num_modules, num_modules, num_submesh_choices, 1), 4)
    print("max_n_succ_stages = ", max_n_succ_stages)

    # 运行 stage construction 动态规划
    cost, _ = training_dp(num_modules, num_devices, num_micro_batches, submesh_choices, compute_cost, max_n_succ_stages)

if __name__ == "__main__":
    inter_solver()
