from collections import defaultdict
from enum import Enum, auto

import numpy as np


def find_factors(n):
    """ 找到 `n` 的所有因子 """
    factors = []
    for i in range(1, n + 1):
        if n % i == 0:
            factors.append(i)
    return factors


class LayerParallelSpec:
    def __init__(self, data_parallel_degree, tensor_parallel_degree,
                 data_parallel_mesh_dim=0, tensor_parallel_mesh_dim=1):
        """
        表示层的并行策略
        :param data_parallel_degree: 数据并行度
        :param tensor_parallel_degree: 张量并行度
        :param data_parallel_mesh_dim: 数据并行对应的设备维度
        :param tensor_parallel_mesh_dim: 张量并行对应的设备维度
        """
        self.data_parallel_degree = data_parallel_degree
        self.tensor_parallel_degree = tensor_parallel_degree
        self.data_parallel_mesh_dim = data_parallel_mesh_dim
        self.tensor_parallel_mesh_dim = tensor_parallel_mesh_dim

    @staticmethod
    def search_parallel_options(cluster_env, layer_type):
        """
        搜索所有满足 `DP * TP == num_devices` 的 `DP/TP` 组合
        :param cluster_env: 设备网格环境
        :param layer_type: 层的类型，例如 "Mlp", "Embedding" 等
        :return: List[LayerParallelSpec]，返回可能的 LayerParallelSpec 对象的集合
        """
        num_devices = cluster_env.num_devices  # 设备总数
        dp_factors = find_factors(num_devices)  # 计算所有 DP 的因子
        strategies = []

        for dp in dp_factors:
            tp = num_devices // dp  # 计算对应的 TP
            strategies.append(LayerParallelSpec(dp, tp, data_parallel_mesh_dim=0, tensor_parallel_mesh_dim=1))

        filtered_strategies = []

        # 搜索 Mlp 层的并行策略 (TP 高于 DP)
        if layer_type == "Mlp":
            filtered_strategies = [s for s in strategies if s.tensor_parallel_degree > s.data_parallel_degree]

        # 搜索 Embedding 层的并行策略 (DP 高于 TP)
        elif layer_type == "Embedding":
            filtered_strategies = [s for s in strategies if s.data_parallel_degree > s.tensor_parallel_degree]

        # 搜索 Attention 层的并行策略 (TP 至少等于 DP，且 DP > 1)
        elif layer_type == "Qwen2SdpaAttention":
            filtered_strategies = [s for s in strategies if s.tensor_parallel_degree >= s.data_parallel_degree > 1]

        # 搜索 LayerNorm 层的并行策略 (DP 最大, TP 最小)
        elif layer_type == "Norm":
            filtered_strategies = [max(strategies, key=lambda s: (s.data_parallel_degree, -s.tensor_parallel_degree))]

        return filtered_strategies
# 每一层的并行策略
class LayerStrategy:
    def __init__(self, name, intra_parallel_strategy):
        self.name = name
        self.intra_parallel_strategy = intra_parallel_strategy

class LayerCode(Enum):
    EMBEDDING = "Embedding"
    ATTENTION = "Qwen2SdpaAttention"
    MLP = "Mlp"
    NORM = "Norm"
    LAYER_NORM = "LayerNorm"
    RMS_NORM = "RMSNorm"

# (new)
class HloLayerComputation:
    cur_env = None
    def __init__(self):
        self.strategy_built = False
        self.layers = []
        self.count_layer = 0

    # (new):增加append_layer方法，记录layer的index
    # 指定layer的前序依赖层集合dependencies = []
    def append_layer(self, layer, dependencies = None):
        layer.index = len(self.layers)
        if dependencies:
            layer.dependencies.extend(dependencies)
        layer.prev_layers = dependencies or []
        self.layers.append(layer)

        return layer.index

    def layer_liveness_analysis(self):
        """
        层级活跃性分析：
        返回 liveness_dict，键为时间步（层编号），值为该时刻所有活跃层的集合
        """
        liveness_dict = {}
        live_set = set()

        for t in range(len(self.layers) - 1, -1, -1):
            layer = self.layers[t]

            # 当前层会输出结果 → 添加到活跃集
            live_set.add(layer)

            # 当前层依赖的前置层 → 也要活跃
            for dep in layer.dependencies:
                live_set.add(dep)

            # 记录当前时刻的活跃层
            liveness_dict[t] = set(live_set)

            # 当前层执行完后可释放
            live_set.remove(layer)

        return liveness_dict

    def build_strategy_and_cost(self, cluster_env, solver_option):
        if self.strategy_built:
            for layer in self.layers:
                layer.strategies = []
                layer.compute_costs = []
                layer.communication_costs = []
                layer.memory_costs = []
                layer.resharding_costs = []

        for layer in self.layers:
            layer.build_strategy_and_cost(cluster_env, solver_option)

        # # 计算层间依赖 & 重新分片成本
        # for layer in self.layers:
        #     for prev_layer in layer.dependencies:
        #         resharding_costs = []
        #         for p in range(len(prev_layer.layerStrategies)):
        #             for q in range(len(layer.layerStrategies)):
        #                 resharding_costs.append(layer.resharding_costs[q][p])
        #         layer.resharding_costs.append(resharding_costs)


        # 在完成所有策略和成本计算后，将 self.strategy_built 设为 True，表示策略和成本已经成功生成
        self.strategy_built = True

    def __enter__(self):
        assert HloLayerComputation.cur_env is None
        HloLayerComputation.cur_env = self

    def __exit__(self, *args, **kwargs):
        HloLayerComputation.cur_env = None

    def __str__(self):
        strs = []
        for i, layer in enumerate(self.layers):
            strs.append(f"{i:2d}: " + str(layer))
        return "\n".join(strs)

layer_code_ct = defaultdict(int)

class HLOLayer:
    def __init__(self, layer_code, input_shape):
        """
        HLO 层基类：只关心输入和输出的形状，不关心计算逻辑
        :param layer_code: 层类型 (如 "Linear", "Conv2D")
        :param input_shape: 输入张量形状 (batch_size, ...)
        """
        self.layer_code = layer_code  # 层类型
        self.input_shape = input_shape  # 输入张量的形状
        self.name = f"{str(layer_code)[10:].lower()}.{layer_code_ct[layer_code]}"
        layer_code_ct[layer_code] += 1

        self.index = -1  # 应该保持每个layer初始化index为-1，代表尚未在计算图中注册

        # 记录层间关系
        self.dependencies = []
        self.prev_layers = []

        # cost
        self.layerStrategies = []
        self.compute_costs = []
        self.communication_costs = []
        self.memory_costs = []
        self.resharding_costs = []

    def _compute_mlp_cost(self, tp_degree: int) -> float:
        """计算MLP层的计算成本"""
        batch_size, seq_len, hidden_dim = self.input_shape
        return (batch_size * seq_len * hidden_dim * 4) / tp_degree

    def _compute_mlp_comm_cost(self, dp_degree: int, tp_degree: int) -> float:
        """计算MLP层的通信成本"""
        batch_size, seq_len, hidden_dim = self.input_shape
        # all-reduce + all-gather
        return (batch_size * seq_len * hidden_dim * 2) * ((dp_degree - 1) / dp_degree + (tp_degree - 1) / tp_degree)

    def _compute_mlp_memory_cost(self, dp_degree: int, tp_degree: int) -> float:
        """计算MLP层的内存成本"""
        batch_size, seq_len, hidden_dim = self.input_shape
        return (batch_size * seq_len * hidden_dim * 4) / (dp_degree * tp_degree)

    def _compute_attention_cost(self, tp_degree: int) -> float:
        """计算Attention层的计算成本"""
        batch_size, seq_len, hidden_dim = self.input_shape
        return (batch_size * seq_len * seq_len * hidden_dim) / tp_degree

    def _compute_attention_comm_cost(self, dp_degree: int, tp_degree: int) -> float:
        """计算Attention层的通信成本"""
        batch_size, seq_len, hidden_dim = self.input_shape
        return (batch_size * seq_len * hidden_dim) * ((dp_degree - 1) / dp_degree + (tp_degree - 1) / tp_degree)

    def _compute_attention_memory_cost(self, dp_degree: int, tp_degree: int) -> float:
        """计算Attention层的内存成本"""
        batch_size, seq_len, hidden_dim = self.input_shape
        return (batch_size * seq_len * seq_len * hidden_dim) / (dp_degree * tp_degree)

    def _compute_embedding_cost(self, dp_degree: int) -> float:
        """计算Embedding层的计算成本"""
        batch_size, seq_len = self.input_shape
        return (batch_size * seq_len * self.input_shape[-1]) / dp_degree

    def _compute_embedding_comm_cost(self, dp_degree: int, tp_degree: int) -> float:
        """计算Embedding层的通信成本"""
        batch_size, seq_len = self.input_shape
        return (batch_size * seq_len * self.input_shape[-1]) * ((dp_degree - 1) / dp_degree + (tp_degree - 1) / tp_degree)

    def _compute_embedding_memory_cost(self, dp_degree: int, tp_degree: int) -> float:
        """计算Embedding层的内存成本"""
        batch_size, seq_len = self.input_shape
        return (batch_size * seq_len * self.input_shape[-1]) / (dp_degree * tp_degree)

    def _compute_norm_cost(self) -> float:
        """计算Norm层的计算成本"""
        batch_size, seq_len, hidden_dim = self.input_shape
        return batch_size * seq_len * hidden_dim

    def _compute_norm_comm_cost(self, dp_degree: int) -> float:
        """计算Norm层的通信成本"""
        batch_size, seq_len, hidden_dim = self.input_shape
        return (batch_size * seq_len * hidden_dim) * (dp_degree - 1) / dp_degree

    def _compute_norm_memory_cost(self, dp_degree: int) -> float:
        """计算Norm层的内存成本"""
        batch_size, seq_len, hidden_dim = self.input_shape
        return (batch_size * seq_len * hidden_dim) / dp_degree

    # 构建该层执行的计算、通信、显存开销
    def build_strategy_and_cost(self, cluster_env, solver_option):
        # 构建hloLayer策略和开销
        # 1、判断当前的 dp*tp和gpu数目是否符合
        # 2、计算开销，通过IntraParallelStrategy类计算
        raise NotImplementedError

class HloSdqaAttentionLayer(HLOLayer):
    def __init__(self, input_shape, num_heads: int):
        super().__init__(LayerCode.ATTENTION, input_shape)
        """
        Self-Attention 层，支持并行计算
        :param input_shape: 输入张量 (batch_size, seq_len, hidden_dim)
        :param num_heads: 头数 (num_heads)
        """
        batch_size, seq_len, hidden_dim = input_shape

        assert hidden_dim % num_heads == 0, "hidden_dim 必须能被 num_heads 整除"
        head_dim = hidden_dim // num_heads

        # 形状
        self.input_shape = (batch_size, seq_len, hidden_dim)
        self.qkv_shape = (batch_size, seq_len, num_heads, head_dim)  # q, k, v 形状
        self.attn_matrix_shape = (batch_size, num_heads, seq_len, seq_len)  # 注意力矩阵 A
        self.output_shape = (batch_size, seq_len, hidden_dim)

        self.tensor_shape_list = [self.input_shape,
                                  self.qkv_shape, self.qkv_shape, self.qkv_shape,
                                  self.qkv_shape, self.qkv_shape,
                                  self.output_shape,
                                  self.attn_matrix_shape, self.attn_matrix_shape]

        self.num_heads = num_heads
        self.head_dim = head_dim

    def build_strategy_and_cost(self, cluster_env, solver_option):
        """ 计算 Self-Attention 的显存占用和通信量 """
        memory_cost = compute_tensor_memory_cost(self.tensor_shape_list, dtype=4)
        # intra_parallel_strategy_list[dp, tp]
        intra_parallel_strategy_list = LayerParallelSpec.search_parallel_options(cluster_env, "Qwen2SdpaAttention")
        for q, intra_parallel_strategy in enumerate(intra_parallel_strategy_list):
            name = (f"IntraParallelStrategy(DP={intra_parallel_strategy.data_parallel_degree}, TP={intra_parallel_strategy.tensor_parallel_degree},"
                    f"DP_mesh_dim = {intra_parallel_strategy.data_parallel_mesh_dim}, TP_mesh_dim={intra_parallel_strategy.tensor_parallel_mesh_dim})")
            self.layerStrategies.append(LayerStrategy(name, intra_parallel_strategy))

            self.compute_costs.append(0)
            self.communication_costs.append(cluster_env.all_reduce_cost(memory_cost, 0))
            self.memory_costs.append(memory_cost)

            # === 对前驱层中最后一层计算重分片代价 ===
            if not self.prev_layers:
                self.resharding_costs.append([])  # 无前驱
                continue

            prev_layer = self.prev_layers[-1]  # 只取最后一个前驱
            row = []
            for p, prev_strategy in enumerate(prev_layer.layerStrategies):
                cost = cluster_env.resharding_cost(
                    self.output_shape,
                    prev_strategy.intra_parallel_strategy,
                    intra_parallel_strategy
                )
                row.append(cost)
            self.resharding_costs.append(row)  # [q][p]


    def __str__(self):
        return "layer name:" + self.name + " input shape:" + str(self.input_shape)

class HloMLP(HLOLayer):
    def __init__(self, input_shape, intermediate_size):
        super().__init__(LayerCode.MLP, input_shape)
        """
        MLP 层：包含两层全连接 (Linear) + 激活函数
        :param input_shape: 输入张量 (batch_size, seq_len, hidden_dim)
        """
        batch_size, seq_len, hidden_dim = input_shape  # 解析输入形状


        # 定义输入/隐藏/输出形状
        self.input_shape = (batch_size, seq_len, hidden_dim)
        self.hidden_shape = (batch_size, seq_len, intermediate_size)
        self.output_shape = (batch_size, seq_len, hidden_dim)

        # MLP 线性层的权重
        self.weight1_shape = (hidden_dim, intermediate_size)  # 第一层权重
        self.weight2_shape = (intermediate_size, hidden_dim)  # 第二层权重
        self.tensor_shape_list = [
            self.input_shape, self.hidden_shape, self.output_shape,
            self.weight1_shape, self.weight2_shape
        ]

    def build_strategy_and_cost(self, cluster_env, solver_option):
        """ 计算 MLP 的显存占用和通信量 """
        memory_cost = compute_tensor_memory_cost(self.tensor_shape_list, dtype=4)
        # intra_parallel_strategy_list[dp, tp]
        intra_parallel_strategy_list = LayerParallelSpec.search_parallel_options(cluster_env, "Mlp")
        for q, intra_parallel_strategy in enumerate(intra_parallel_strategy_list):
            name = (
                f"IntraParallelStrategy(DP={intra_parallel_strategy.data_parallel_degree}, TP={intra_parallel_strategy.tensor_parallel_degree},"
                f"DP_mesh_dim = {intra_parallel_strategy.data_parallel_mesh_dim}, TP_mesh_dim={intra_parallel_strategy.tensor_parallel_mesh_dim})")
            self.layerStrategies.append(LayerStrategy(name, intra_parallel_strategy))

            self.compute_costs.append(0)
            self.communication_costs.append(cluster_env.all_reduce_cost(memory_cost, 0))
            self.memory_costs.append(memory_cost)

            # === 对前驱层中最后一层计算重分片代价 ===
            if not self.prev_layers:
                self.resharding_costs.append([])  # 无前驱
                continue

            prev_layer = self.prev_layers[-1]  # 只取最后一个前驱
            row = []
            for p, prev_strategy in enumerate(prev_layer.layerStrategies):
                cost = cluster_env.resharding_cost(
                    self.output_shape,
                    prev_strategy.intra_parallel_strategy,
                    intra_parallel_strategy
                )
                row.append(cost)
            self.resharding_costs.append(row)  # [q][p]


    def __str__(self):
        # return f"{self.name} = MLPLayer()"
        return "layer name:" + self.name + " input shape:" + str(self.input_shape)

class HloEmbeddingLayer(HLOLayer):
    def __init__(self, input_shape, vocab_size: int, embedding_dim: int):
        super().__init__(LayerCode.EMBEDDING, input_shape)
        """
        嵌入层（Embedding Layer）
        :param input_shape: 输入张量 (batch_size, seq_len)，存储 token 索引
        :param vocab_size: 词汇表大小
        :param embedding_dim: 词向量维度
        """
        batch_size, seq_len = input_shape  # 解析输入形状

        # 定义输入、输出、嵌入矩阵的形状
        self.input_shape = (batch_size, seq_len)
        self.weight_shape = (vocab_size, embedding_dim)  # 嵌入矩阵
        self.output_shape = (batch_size, seq_len, embedding_dim)

        self.tensor_shape_list = [self.input_shape, self.weight_shape, self.output_shape]

    def build_strategy_and_cost(self, cluster_env, solver_option):
        """ 计算嵌入层的计算、通信和显存开销 """
        # 计算显存占用（输入索引、权重、输出）
        memory_cost = compute_tensor_memory_cost(self.tensor_shape_list, dtype=4)
        # intra_parallel_strategy_list[dp, tp]
        intra_parallel_strategy_list = LayerParallelSpec.search_parallel_options(cluster_env, "Embedding")
        for q, intra_parallel_strategy in enumerate(intra_parallel_strategy_list):
            name = (
                f"IntraParallelStrategy(DP={intra_parallel_strategy.data_parallel_degree}, TP={intra_parallel_strategy.tensor_parallel_degree},"
                f"DP_mesh_dim = {intra_parallel_strategy.data_parallel_mesh_dim}, TP_mesh_dim={intra_parallel_strategy.tensor_parallel_mesh_dim})")
            self.layerStrategies.append(LayerStrategy(name, intra_parallel_strategy))

            self.compute_costs.append(0)
            self.communication_costs.append(cluster_env.all_reduce_cost(memory_cost, 0))
            self.memory_costs.append(memory_cost)

            # === 对前驱层中最后一层计算重分片代价 ===
            if not self.prev_layers:
                self.resharding_costs.append([])  # 无前驱
                continue

            prev_layer = self.prev_layers[-1]  # 只取最后一个前驱
            row = []
            for p, prev_strategy in enumerate(prev_layer.layerStrategies):
                cost = cluster_env.resharding_cost(
                    self.output_shape,
                    prev_strategy.intra_parallel_strategy,
                    intra_parallel_strategy
                )
                row.append(cost)
            self.resharding_costs.append(row)  # [q][p]


    def __str__(self):
        # return f"{self.name} = EmbeddingLayer()"
        return "layer name:" + self.name + " input shape:" + str(self.input_shape)

class HloNorm(HLOLayer):
    def __init__(self, layer_code, input_shape, norm_type):
        """
        归一化层基类 (LayerNorm, RMSNorm)
        :param input_shape: 输入张量 (batch_size, seq_len, hidden_dim)
        :param norm_type: 归一化类型
        """
        super().__init__(layer_code, input_shape)
        self.layer_code = layer_code
        batch_size, seq_len, hidden_dim = input_shape  # 解析输入形状

        self.input_shape = (batch_size, seq_len, hidden_dim)
        self.output_shape = (batch_size, seq_len, hidden_dim)  # 归一化后形状不变
        self.param_shape = (hidden_dim,)  # gamma 和 beta

        self.tensor_shape_list = [self.input_shape, self.output_shape, self.param_shape]

        self.norm_type = norm_type


    def build_strategy_and_cost(self, cluster_env, solver_option):
        memory_cost = compute_tensor_memory_cost(self.tensor_shape_list, dtype=4)
        # intra_parallel_strategy_list[dp, tp]
        intra_parallel_strategy_list = LayerParallelSpec.search_parallel_options(cluster_env, "Norm")
        for q, intra_parallel_strategy in enumerate(intra_parallel_strategy_list):
            name = (
                f"IntraParallelStrategy(DP={intra_parallel_strategy.data_parallel_degree}, TP={intra_parallel_strategy.tensor_parallel_degree},"
                f"DP_mesh_dim = {intra_parallel_strategy.data_parallel_mesh_dim}, TP_mesh_dim={intra_parallel_strategy.tensor_parallel_mesh_dim})")
            self.layerStrategies.append(LayerStrategy(name, intra_parallel_strategy))

            self.compute_costs.append(0)
            self.communication_costs.append(cluster_env.all_reduce_cost(memory_cost, 0))
            self.memory_costs.append(memory_cost)

            # === 对前驱层中最后一层计算重分片代价 ===
            if not self.prev_layers:
                self.resharding_costs.append([])  # 无前驱
                continue

            prev_layer = self.prev_layers[-1]  # 只取最后一个前驱
            row = []
            for p, prev_strategy in enumerate(prev_layer.layerStrategies):
                cost = cluster_env.resharding_cost(
                    self.output_shape,
                    prev_strategy.intra_parallel_strategy,
                    intra_parallel_strategy
                )
                row.append(cost)
            self.resharding_costs.append(row)  # [q][p]


class HloLayerNorm(HloNorm):
    def __init__(self, input_shape):
        """ 层归一化 (LayerNorm) """
        super().__init__(LayerCode.LAYER_NORM, input_shape, "LayerNorm")

    def __str__(self):
        # return f"{self.name} = LayerNorm()"
        return "layer name:" + self.name + " input shape:" + str(self.input_shape)

class HloRMSNorm(HloNorm):
    def __init__(self, input_shape):
        """ RMS 归一化 (RMSNorm) """
        super().__init__(LayerCode.RMS_NORM, input_shape, "RMSNorm")
    def __str__(self):
        # return f"{self.name} = RMSNorm()"
        return "layer name:" + self.name + " input shape:" + str(self.input_shape)


# 计算shape矩阵存储占用的显存
def compute_tensor_memory_cost(tensor_shape_list, dtype = 4):
    memory_cost = sum(np.prod(shape) for shape in tensor_shape_list) * dtype
    return memory_cost