"""ILP Solver"""

import multiprocessing
import time
import warnings
import numpy as np

# A constant to represent infinity
INFINITY_COST = 1e13

class CostGraph:
    def __init__(self, node_lens, edges, edge_costs, to_merge_pair):
        self.node_lens = node_lens
        self.adjacency = dict()  # map a node to its neighbors
        self.edge_costs = dict()  # map an edge to its cost matrix
        self.reindexing_vector = dict()  # map a node to its reindexing vector
        self.merged_to = dict()  # map an merged node to its destination
        self.to_merge_pair = to_merge_pair  # the input follow pairs

        for i in range(len(node_lens)):
            self.adjacency[i] = set()

        # For redundant edges, we will overwrite the results with
        # the last value
        for ((i, j), cost) in zip(edges, edge_costs):
            cost = np.reshape(cost, (self.node_lens[i], self.node_lens[j]))

            self.add_edge_cost(i, j, cost)

    def get_edge_cost(self, i, j):
        if i <= j:
            return self.edge_costs[(i, j)]
        else:
            return self.edge_costs[(j, i)].transpose()

    def add_edge_cost(self, i, j, cost):
        if i > j:
            i, j = j, i
            cost = cost.transpose()

        if (i, j) in self.edge_costs:
            assert i in self.adjacency[j]
            assert j in self.adjacency[i]
            self.edge_costs[(i, j)] += cost
        else:
            self.adjacency[i].add(j)
            self.adjacency[j].add(i)
            self.edge_costs[(i, j)] = cost

    def remove_edge(self, i, j):
        if i > j:
            i, j = j, i

        assert j in self.adjacency[i]
        assert i in self.adjacency[j]
        assert (i, j) in self.edge_costs

        self.adjacency[i].remove(j)
        self.adjacency[j].remove(i)
        del self.edge_costs[(i, j)]

    def merge_node(self, src, dst):
        """Merge node src to node dst"""
        # print(f"merge {src} to {dst}")
        assert dst in self.adjacency[src]
        assert src in self.adjacency[dst]
        assert dst not in self.merged_to
        assert src != dst

        edge_cost = self.get_edge_cost(dst, src)

        # Find the strategy to follow greedily
        reindexing = []
        candidates = list(range(self.node_lens[src]))
        count = 0
        for i in range(self.node_lens[dst]): #  src 节点的可用策略数
            # Pick the strategy with the lowest cost to follow.
            # If there are multiple strategies with the same lowest costs,
            # prefer to follow "replicated", which has the largest index.
            keys = [(edge_cost[i][j], -j) for j in range(self.node_lens[src])]
            candidates.sort(key=lambda j: keys[j]) # 排序的依据是 keys 列表中每个候选策略对应的边成本。排序后，最小成本的策略将在列表的前面
            count += 1
            print(count, f"Candidates: {candidates}")
            reindexing.append(candidates[0]) # candidates[0] 不存在
            # if not candidates:
            #     reindexing.append(0)
            #     continue
            # else:
            #     reindexing.append(candidates[0])

        self.merged_to[src] = dst
        self.reindexing_vector[src] = reindexing

        # Merge edge cost matrix
        adj_list = list(self.adjacency[src])
        for adj in adj_list:
            if adj == dst:
                continue
            added_edge_cost = np.empty((self.node_lens[dst], self.node_lens[adj]))
            for i in range(self.node_lens[dst]):
                j = reindexing[i]
                edge_cost_src_adj = self.get_edge_cost(src, adj)
                for k in range(self.node_lens[adj]):
                    added_edge_cost[i][k] = edge_cost_src_adj[j][k] + edge_cost[i][j]

            self.add_edge_cost(dst, adj, added_edge_cost)

        # Remove edges
        for adj in adj_list:
            self.remove_edge(src, adj)

    def query_destination(self, node):
        if node in self.merged_to:
            old_dst = self.merged_to[node]
            new_dst = self.query_destination(old_dst)
            if old_dst != new_dst:
                # Compress path
                old_reindexing_vector = self.reindexing_vector[node]
                new_reindexing_vector = []
                for i in range(self.node_lens[new_dst]):
                    new_reindexing_vector.append(
                        old_reindexing_vector[self.reindexing_vector[old_dst][i]])

                self.reindexing_vector[node] = new_reindexing_vector
                self.merged_to[node] = new_dst
            return new_dst
        else:
            return node

    # 合并计算节点，简化计算图
    def simplify(self):
        for (src, dst) in self.to_merge_pair:
            assert src not in self.merged_to
            dst = self.query_destination(dst)
            if src != dst:
                self.merge_node(src, dst)

    def export_result(self):
        E = []
        r = []
        s_follow = []

        for i in range(len(self.node_lens)):
            # 如果节点 i 被合并，则调用 self.query_destination(i) 来查找该节点最终合并到的节点，并将其添加到 s_follow
            if i in self.merged_to:
                s_follow.append(self.query_destination(i))
            else:
                s_follow.append(-1)

        for ((i, j), v) in self.edge_costs.items(): # edge_costs=((src, dst), cost)
            v = v.reshape(-1)
            E.append((i, j))
            r.append(v)

            assert len(v) == self.node_lens[i] * self.node_lens[j]

        return s_follow, E, r, self.reindexing_vector

    def __str__(self):
        ret = ""
        for i in range(len(self.node_lens)):
            ret += f"Node {i}: {self.node_lens[i]}\n"

        edges = list(self.edge_costs.keys())
        edges.sort()

        for (i, j) in edges:
            ret += f"Edge {(i, j)}:\n"
            ret += str(self.edge_costs[(i, j)]) + "\n"

        return ret

class SolverOption:
    def __init__(self):
        self.force_batch_dim_to_mesh_dim = None
        self.forward_backward_sep_id = None
        self.force_all_reduce_cost = None
        self.force_all_gather_cost = None # 1e8
        self.force_reduce_scatter_cost = None

# TODO(new):改造ILP方法
def solve_layer_auto_sharding(layerComputation, cluster_env, solver_option=None):
    print("===== Hlo Layer Computation =====")
    print(layerComputation, "\n")

    print("===== Liveness Analysis =====")
    liveness_dict = layerComputation.layer_liveness_analysis()
    for i in range(len(layerComputation.layers)):
        names = [layer.name for layer in liveness_dict[i]]
        names.sort()
        print(f"Time: {i}, Live set: {names}")

    if solver_option is None:
        solver_option = SolverOption()

    # Build strategies and costs
    layerComputation.build_strategy_and_cost(cluster_env, solver_option)

    # ILP问题求解的参数
    # N -- 层数
    # M -- 单gpu显存容量
    N = len(layerComputation.layers)
    print("N(len(layerComputation.layers): ", N)
    M = cluster_env.memory_per_device
    print("M(memory_per_device): ", M)

    E = [] # 计算图的边 (src, dst)

    s_len = []  # 每层的并行策略空间
    E = []  # 计算图的边 (src, dst)
    c = []  # 计算成本
    d = []  # 通信成本
    m = []  # 显存占用
    r = []  # 重新分片成本

    for i in range(N):
        layer = layerComputation.layers[i]
        s_len.append(len(layer.layerStrategies))
        c.append(layer.compute_costs)
        d.append(layer.communication_costs)
        m.append(layer.memory_costs)

        # TODO(new):记录层间依赖（计算图边）
        '''
        for prev_layer in layer.dependencies:
            E.append((prev_layer.index, layer.index))  # 层间依赖关系

            # 计算重新分片成本
            resharding_costs = []
            for p in range(len(layerComputation.layers[prev_layer.index].strategies)):
                for q in range(len(layerComputation.layers[layer.index].strategies)):
                    resharding_costs.append(layer.resharding_costs[q][p])
            r.append(resharding_costs)
        '''

    # TODO(new):通过合并层简化计算图
    '''
    cost_graph = CostGraph(s_len, E, r, follow_pair=[])
    cost_graph.simplify()
    s_follow, E, r, reindexing_vector = cost_graph.export_result()
    '''

    # TODO(new):重新索引
    '''
    for src, dst in enumerate(s_follow):
        if dst >= 0:
            s_len[src] = len(reindexing_vector[src])
            c[src] = np.array(c[src])[reindexing_vector[src]]
            d[src] = np.array(d[src])[reindexing_vector[src]]
            m[src] = np.array(m[src])[reindexing_vector[src]]
    '''

    # ILP 求解
    s_val, objective, status = call_layer_solver(N, M, s_len, c, d, m)


    # TODO(new):包含/不包含 重分片情况下的切分策略
    '''
    for i in range(N):
        if s_follow[i] < 0:
            stra_idx = s_val[i]
            name = layerComputation.layers[i].strategies[stra_idx].name
            spec = layerComputation.layers[i].strategies[stra_idx].output_spec
        else:
            dst = s_follow[i]
            stra_idx = reindexing_vector[i][s_val[i]]
            name = layerComputation.layers[i].strategies[stra_idx].name + f" follow {dst}"
            spec = layerComputation.layers[i].strategies[stra_idx].output_spec

        print(f"Layer {i}: {layerComputation.layers[i]}  Strategy: {name} Spec: {spec}")
    '''
    # 打印 Sharding 结果
    print("===== Intra Parallel Strategy =====")
    for i in range(N):
        stra_idx = s_val[i]
        name = layerComputation.layers[i].layerStrategies[stra_idx].name
        # intra_parallel_strategy = layerComputation.layers[i].layerStrategies[stra_idx].intra_parallel_strategy
        print(f"Layer {i}: {layerComputation.layers[i]}  Strategy: {name}")

    # 打印内存使用
    print("===== Memory Usage =====")
    for t in range(N):
        mem = sum(m[i][s_val[i]] for i in range(N))
        if t == N-1:
            print(f"Layer {t}, memory: {mem / 1024 ** 2: .2f} MB")

    return objective

def solve_layer_auto_sharding_with_resharding(layerComputation, cluster_env, solver_option=None):
    print("===== Hlo Layer Computation =====")
    print(layerComputation, "\n")

    print("===== Liveness Analysis =====")
    liveness_dict = layerComputation.layer_liveness_analysis()
    for i in range(len(layerComputation.layers)):
        names = [layer.name for layer in liveness_dict[i]]
        names.sort()
        print(f"Time: {i}, Live set: {names}")

    if solver_option is None:
        solver_option = SolverOption()

    # Build strategies and costs
    layerComputation.build_strategy_and_cost(cluster_env, solver_option)

    # ILP问题求解的参数
    # N -- 层数
    # M -- 单gpu显存容量
    N = len(layerComputation.layers)
    print("N(len(layerComputation.layers): ", N)
    M = cluster_env.memory_per_device
    print("M(memory_per_device): ", M)

    s_len = []  # 每层的并行策略空间
    E = []  # 计算图的边 (src, dst)
    c = []  # 计算成本
    d = []  # 通信成本
    m = []  # 显存占用
    r = []  # 重新分片成本
    L = []

    for i in range(N):
        layer = layerComputation.layers[i]
        s_len.append(len(layer.layerStrategies))
        c.append(layer.compute_costs)
        d.append(layer.communication_costs)
        m.append(layer.memory_costs)
        L.append([layer.index for layer in liveness_dict[i]])

        # 记录层间依赖（计算图边）
        for prev_layer in layer.dependencies:
            # 添加图边：prev_layer → layer
            E.append((prev_layer.index, layer.index))

            # 记录该边的重分片成本矩阵（flatten）
            costs = []
            for p in range(len(prev_layer.layerStrategies)):
                for q in range(len(layer.layerStrategies)):
                    # 注意：resharding_costs 是 [q][p] 结构
                    costs.append(layer.resharding_costs[q][p])
            r.append(costs)

    # ILP 求解
    s_val, e_val, objective, status = call_layer_solver_with_resharding(N, M, s_len, E, L, c, d, m, r)


    # TODO(new):包含/不包含 重分片情况下的切分策略
    '''
    for i in range(N):
        if s_follow[i] < 0:
            stra_idx = s_val[i]
            name = layerComputation.layers[i].strategies[stra_idx].name
            spec = layerComputation.layers[i].strategies[stra_idx].output_spec
        else:
            dst = s_follow[i]
            stra_idx = reindexing_vector[i][s_val[i]]
            name = layerComputation.layers[i].strategies[stra_idx].name + f" follow {dst}"
            spec = layerComputation.layers[i].strategies[stra_idx].output_spec

        print(f"Layer {i}: {layerComputation.layers[i]}  Strategy: {name} Spec: {spec}")
    '''
    # 打印 Sharding 结果
    print("===== Intra Parallel Strategy =====")
    for i in range(N):
        stra_idx = s_val[i]
        name = layerComputation.layers[i].layerStrategies[stra_idx].name
        # intra_parallel_strategy = layerComputation.layers[i].layerStrategies[stra_idx].intra_parallel_strategy
        print(f"Layer {i}: {layerComputation.layers[i]}  Strategy: {name}")

    # 打印内存使用
    print("===== Memory Usage =====")
    for t in range(N):
        mem = sum(m[i][s_val[i]] for i in range(N))
        print(f"Layer {t}, memory: {mem / 1024 ** 2: .2f} MB")
        # if t == N-1:
        #     print(f"Layer {t}, memory: {mem / 1024 ** 2: .2f} MB")

    return objective

def solve_layer_auto_sharding_with_resharding_without_print(layerComputation, cluster_env, solver_option=None):
    # print("===== Hlo Layer Computation =====")
    # print(layerComputation, "\n")

    # print("===== Liveness Analysis =====")
    liveness_dict = layerComputation.layer_liveness_analysis()
    for i in range(len(layerComputation.layers)):
        names = [layer.name for layer in liveness_dict[i]]
        names.sort()
        # print(f"Time: {i}, Live set: {names}")

    if solver_option is None:
        solver_option = SolverOption()

    # Build strategies and costs
    layerComputation.build_strategy_and_cost(cluster_env, solver_option)

    # ILP问题求解的参数
    # N -- 层数
    # M -- 单gpu显存容量
    N = len(layerComputation.layers)
    # print("N(len(layerComputation.layers): ", N)
    M = cluster_env.memory_per_device
    # print("M(memory_per_device): ", M)

    s_len = []  # 每层的并行策略空间
    E = []  # 计算图的边 (src, dst)
    c = []  # 计算成本
    d = []  # 通信成本
    m = []  # 显存占用
    r = []  # 重新分片成本
    L = []

    for i in range(N):
        layer = layerComputation.layers[i]
        s_len.append(len(layer.layerStrategies))
        c.append(layer.compute_costs)
        d.append(layer.communication_costs)
        m.append(layer.memory_costs)
        L.append([layer.index for layer in liveness_dict[i]])

        # 记录层间依赖（计算图边）
        for prev_layer in layer.dependencies:
            # 添加图边：prev_layer → layer
            E.append((prev_layer.index, layer.index))

            # 记录该边的重分片成本矩阵（flatten）
            costs = []
            for p in range(len(prev_layer.layerStrategies)):
                for q in range(len(layer.layerStrategies)):
                    # 注意：resharding_costs 是 [q][p] 结构
                    costs.append(layer.resharding_costs[q][p])
            r.append(costs)

    # ILP 求解
    s_val, e_val, objective, status = call_layer_solver_with_resharding(N, M, s_len, E, L, c, d, m, r)


    # TODO(new):包含/不包含 重分片情况下的切分策略
    '''
    for i in range(N):
        if s_follow[i] < 0:
            stra_idx = s_val[i]
            name = layerComputation.layers[i].strategies[stra_idx].name
            spec = layerComputation.layers[i].strategies[stra_idx].output_spec
        else:
            dst = s_follow[i]
            stra_idx = reindexing_vector[i][s_val[i]]
            name = layerComputation.layers[i].strategies[stra_idx].name + f" follow {dst}"
            spec = layerComputation.layers[i].strategies[stra_idx].output_spec

        print(f"Layer {i}: {layerComputation.layers[i]}  Strategy: {name} Spec: {spec}")
    '''
    # 打印 Sharding 结果
    print("===== Intra Parallel Strategy =====")
    for i in range(N):
        stra_idx = s_val[i]
        name = layerComputation.layers[i].layerStrategies[stra_idx].name
        # intra_parallel_strategy = layerComputation.layers[i].layerStrategies[stra_idx].intra_parallel_strategy
        print(f"Layer {i}: {layerComputation.layers[i]}  Strategy: {name}")

    # 打印内存使用
    # print("===== Memory Usage =====")
    for t in range(N):
        mem = sum(m[i][s_val[i]] for i in range(N))
        # print(f"Layer {t}, memory: {mem / 1024 ** 2: .2f} MB")
        # if t == N-1:
        #     print(f"Layer {t}, memory: {mem / 1024 ** 2: .2f} MB")

    return objective


# 将ILP求解器的参数转化为numpy数组
def call_layer_solver(N, M, s_len, c, d, m):
    """Serialize python lists to flatten numpy arraies and call solver"""
    # Serialize strategy lengths
    s_len_np = np.array(s_len, dtype=np.int32)
    # s_follow_np = np.array(s_follow, dtype=np.int32)

    # Serialize edge set
    # len_edges = len(E)
    # E_np = np.empty((len_edges, 2), dtype=np.int32)
    # for (idx, (i, j)) in enumerate(E):
    #     E_np[idx][:] = [i, j]

    # Serialize alias set
    # len_aliases = len(A)
    # A_np = np.empty((len_aliases, 2), dtype=np.int32)
    # for (idx, (i, j)) in enumerate(A):
    #     A_np[idx][:] = [i, j]

    # Serialize liveness set
    # len_liveness_set = N + sum(len(v) for v in L)
    # L_np = np.empty((len_liveness_set,), dtype=np.int32)
    # L_np[0:N] = [len(v) for v in L]
    # L_np[N:] = [x for v in L for x in v]

    # Serialize node costs
    len_node_costs = sum(len(v) for v in c)
    c_np = np.empty((len_node_costs,), dtype=np.float32)
    d_np = np.empty((len_node_costs,), dtype=np.float32)
    m_np = np.empty((len_node_costs,), dtype=np.float32)
    c_np[:] = [x for v in c for x in v]
    d_np[:] = [x for v in d for x in v]
    m_np[:] = [x for v in m for x in v]

    # Serialize edge costs
    # len_edge_costs = sum(len(vec) for vec in r)
    # r_np = np.empty((len_edge_costs,), dtype=np.float32)
    # r_np[:] = [x for vec in r for x in vec]

    # # Serialize alias costs
    # len_alias_costs = sum(len(vec) for vec in v)
    # v_np = np.empty((len_alias_costs,), dtype=np.float32)
    # v_np[:] = [x for vec in v for x in vec]

    # Serialize init value
    s_init_np = None

    return _call_layer_solver_serialized_args(
        N, M, s_len_np, c_np, d_np, m_np, s_init_np)


# 将ILP求解器的参数转化为numpy数组
def call_layer_solver_with_resharding(N, M, s_len, E, L, c, d, m, r):
    """Serialize python lists to flatten numpy arraies and call solver1111"""
    # Serialize strategy lengths
    s_len_np = np.array(s_len, dtype=np.int32)

    # Serialize edge set
    len_edges = len(E)
    E_np = np.empty((len_edges, 2), dtype=np.int32)
    for (idx, (i, j)) in enumerate(E):
        E_np[idx][:] = [i, j]

    # Serialize liveness set
    len_liveness_set = N + sum(len(v) for v in L)
    L_np = np.empty((len_liveness_set,), dtype=np.int32)
    L_np[0:N] = [len(v) for v in L]
    L_np[N:] = [x for v in L for x in v]

    # Serialize node costs
    len_node_costs = sum(len(v) for v in c)
    c_np = np.empty((len_node_costs,), dtype=np.float32)
    d_np = np.empty((len_node_costs,), dtype=np.float32)
    m_np = np.empty((len_node_costs,), dtype=np.float32)
    c_np[:] = [x for v in c for x in v]
    d_np[:] = [x for v in d for x in v]
    m_np[:] = [x for v in m for x in v]

    # Serialize edge costs
    len_edge_costs = sum(len(vec) for vec in r)
    r_np = np.empty((len_edge_costs,), dtype=np.float32)
    r_np[:] = [x for vec in r for x in vec]


    # Serialize init value
    s_init_np = None

    return _call_layer_solver_serialized_args_with_resharding(
        N, M, s_len_np, E_np, L_np,
        c_np, d_np, m_np, r_np, s_init_np)



# 改造之后的层级ILP求解器
def _call_layer_solver_serialized_args(
        N, M, s_len_np, c_np, d_np, m_np, s_init_np=None):
    """Call the solver with serialized arguments.使用序列化参数调用求解器
    N——计算图节点数量
    M——内存使用的限制
    """
    # pylint: disable=invalid-name
    global last_s_val, last_objective

    import pulp
    from pulp import LpVariable, LpProblem, LpMinimize, lpSum, lpDot, LpStatus
    tic = time.time()

    for x in [s_len_np, c_np, d_np, m_np]:
        assert isinstance(x, np.ndarray)
    assert len(s_len_np) == N, "s_len_np" # 15

    def get_non_zero_index(binary_vector):
        """找到矩阵中的非零元素的索引，输入的是二进制的向量"""
        ct = 0
        ret = None
        for i, elem in enumerate(binary_vector):
            if pulp.value(elem):
                ret = i
                ct += 1

        assert ct == 1  # 只能有一个索引的值=1
        return ret

    # 0. Unpack flatten numpy arrays
    s_len = s_len_np

    c = []  # 计算成本列表
    d = []  # 通信成本列表
    m = []  # 内存使用列表
    pt = 0
    for i in range(N):
        length = s_len[i] #
        c.append(c_np[pt:pt + length])
        d.append(d_np[pt:pt + length])
        m.append(m_np[pt:pt + length])
        pt += length
    assert pt == len(c_np), f"{pt} == {len(c_np)}"
    assert pt == len(d_np), f"{pt} == {len(d_np)}"
    assert pt == len(m_np), f"{pt} == {len(m_np)}"

    # 1. Create variables
    s = []  # 节点的决策向量  (0,0,0,1,0,0,......,0)
    num_nodes = 0  # 用于跟踪独立的节点数量。如果某个节点继承了其他节点的策略，那么它不会增加 num_nodes 计数

    # 这部分代码逐一处理每个节点 i。
    # s_len[i] --- 第i个节点的策略向量的长度
    for i in range(N):
        # 如果当前节点的分片数为 1（即没有多种分片策略可选），它的决策变量直接设为 [1]，表示该节点选择唯一的分片策略。
        if s_len[i] == 1:
            s.append([1])
        # 如果当前节点有多个分片选项，则为该节点创建一个二进制决策变量矩阵。
        # LpVariable.matrix 用于为节点 i 创建一个大小为 s_len[i] 的二进制变量矩阵，这些变量表示节点可以选择的分片策略。
        else:
            num_nodes += 1
            s.append(
                LpVariable.matrix(f"s[{i}]", (range(s_len[i]),),
                                  cat="Binary"))

    # 2. 设置ILP求解的初始值，暖启动，加速求解效率，加快收敛
    if s_init_np is not None:
        s_init = s_init_np.reshape((-1, 3))
        for (idx, value, fix) in s_init:
            for i in range(len(s[idx])):
                s[idx][i].setInitialValue(i == value)
                if fix:
                    s[idx][i].fixValue()

    prob = LpProblem("myProblem", LpMinimize)

    # TODO(NEW):目前只有第一项
    obj = 0
    for i in range(N):
        obj += lpDot(s[i], c[i]) + lpDot(s[i], d[i])
    prob += obj

    # 4. 约束和限制
    # (a). specified by `cat="Binary"`

    # (b) 每个节点只能选择一个分片策略
    for i in range(N):
        prob += lpSum(s[i]) == 1

    # (c) 为节点 t 设置约束条件，确保总内存使用量不超过内存限制 M
    if M > 0:
        for t in range(N):
            mem = 0
            # mem += lpSum(s[i][j] * m[i][j] for j in range(len(s[i])))
            prob += mem <= M

    # 求解设置和执行
    verbose = False
    msg = verbose
    time_limit = 600
    assert "PULP_CBC_CMD" in pulp.listSolvers(onlyAvailable=True), (
        "Please install ILP solvers by 'sudo apt install coinor-cbc'")

    solver = pulp.PULP_CBC_CMD(mip=True,
                               msg=msg,
                               timeLimit=time_limit,
                               threads=multiprocessing.cpu_count())
    prob.solve(solver)

    status = prob.status
    objective = pulp.value(prob.objective)
    objective = float(objective) if objective is not None else -1.0
    if verbose:
        print(f"ILP Status: {LpStatus[status]}\tObjective: {objective}\t"
              f"Time: {time.time() - tic}")
        print(f"#nodes: {num_nodes}")

    if prob.status in [pulp.LpStatusInfeasible]:
        raise RuntimeError(
            "Cannot run the function under the given memory budget. "
            "Please increase the memory budget.")

    # Get and check results
    # 存储第一项 每个节点的最优分片选择
    s_val = np.full((N,), -1, dtype=np.int32)
    for i in range(N):
        print("s[i]的值为： ", s[i])
        s_val[i] = get_non_zero_index(s[i]) # 这里的s[i]有问题，每一个都是[1]



    print("s_val的值为：", s_val)

    last_s_val = s_val
    last_objective = objective

    if objective > INFINITY_COST:
        warnings.warn("Detect unexpected behaviors in the auto-sharding pass.")

    return s_val, objective, status


def _call_layer_solver_serialized_args_with_resharding(N, M, s_len_np, E_np, L_np,
                                  c_np, d_np, m_np, r_np, s_init_np=None):
    """Call the solver with serialized arguments.使用序列化参数调用求解器
    N——计算图节点数量
    M——内存使用的限制
    """
    # pylint: disable=invalid-name
    global last_s_val, last_objective

    import pulp
    from pulp import LpVariable, LpProblem, LpMinimize, lpSum, lpDot, LpStatus
    tic = time.time()

    for x in [s_len_np, E_np, L_np, c_np, d_np, m_np, r_np]:
        assert isinstance(x, np.ndarray)
    assert len(s_len_np) == N, "s_len_np"

    def get_non_zero_index(binary_vector):
        """找到矩阵中的非零元素的索引，输入的是二进制的向量"""
        ct = 0
        ret = None
        for i, elem in enumerate(binary_vector):
            if pulp.value(elem):
                ret = i
                ct += 1
        assert ct == 1  # 只能有一个索引的值=1
        return ret

    # 0. Unpack flatten numpy arrays
    s_len = s_len_np

    # E -- 计算图中的边
    # 这一部分代码确保每条边都有唯一的重新分片通信成本，并存储到列表 r 中。
    E = E_np.reshape((-1, 2))  # noqa
    r = []  # 重新分片的通信成本 resharding cost
    pt = 0  # 指针，帮助在 r_np 数组中切片
    edge_set = set()
    for (i, j) in E:
        # 策略空间大小
        prod_length = s_len[i] * s_len[j]

        if (i, j) in edge_set:
            raise ValueError(f"Duplicated edges: {(i, j)}")

        edge_set.add((i, j))
        r.append(r_np[pt:pt + prod_length])
        pt += prod_length
    assert pt == len(r_np)

    # 构造L -- 活跃性分析成本
    L = []  # noqa
    pt = N
    for i in range(N):
        length = L_np[i]
        L.append(L_np[pt:pt + length])
        pt += length
    assert pt == len(L_np)

    c = []  # 计算成本列表
    d = []  # 通信成本列表
    m = []  # 内存使用列表
    pt = 0
    for i in range(N):
        length = s_len[i]
        c.append(c_np[pt:pt + length])
        d.append(d_np[pt:pt + length])
        m.append(m_np[pt:pt + length])
        pt += length
    assert pt == len(c_np), f"{pt} == {len(c_np)}"
    assert pt == len(d_np), f"{pt} == {len(d_np)}"
    assert pt == len(m_np), f"{pt} == {len(m_np)}"

    # 1. Create variables
    s = []  # 节点的决策向量  (0,0,0,1,0,0,......,0)
    e = []  # 边的决策向量  (0,0,0,1,0,0,......,0)

    num_nodes = 0  # 用于跟踪独立的节点数量。如果某个节点继承了其他节点的策略，那么它不会增加 num_nodes 计数

    # 这部分代码逐一处理每个节点 i。
    # s_len[i] --- 第i个节点的策略向量的长度
    for i in range(N):
        if s_len[i] == 1:
            s.append([1])
        else:
            num_nodes += 1
            s.append(LpVariable.matrix(f"s[{i}]", (range(s_len[i]),), cat="Binary"))

    # 遍历每条边 (i, j)，E 是存储边的列表，idx 是当前边的索引。
    num_edges = 0  # 需要创建决策变量的边数

    for (idx, (i, j)) in enumerate(E):
        # 如果节点 i 没有多种分片策略（即它的决策变量长度为 1），则它的通信选择完全由节点 j 决定。
        # 因此将节点 j 的分片策略作为当前边的决策变量，添加到 e 中。
        if len(s[i]) == 1:
            e.append(s[j])
        # 同样地，如果节点 j 没有多种分片策略，
        # 则将节点 i 的策略作为当前边的通信决策变量，添加到 e 中。
        elif len(s[j]) == 1:
            e.append(s[i])
        # 如果两个节点都有多种分片策略，则创建一个新的二进制变量矩阵 e[{i},{j}]，表示从节点 i 到节点 j 的通信选择。
        # 矩阵的大小为 len(s[i]) * len(s[j])，即考虑节点 i 和 j 所有分片组合的通信决策。
        else:
            num_edges += 1
            e.append(
                LpVariable.matrix(f"e[{i},{j}]",
                                  (range(len(s[i]) * len(s[j])),),
                                  cat="Binary"))
        assert len(e[idx]) == len(r[idx])
    # idx：节点的索引，表示哪个节点的策略需要设置初始值。
    # value：节点选择的初始策略（决策变量的值）。
    # fix：一个布尔值，指示是否需要将该节点的策略固定为指定的初始值。

    # 2. 设置ILP求解的初始值，暖启动，加速求解效率，加快收敛
    if s_init_np is not None:
        s_init = s_init_np.reshape((-1, 3))
        for (idx, value, fix) in s_init:
            for i in range(len(s[idx])):
                s[idx][i].setInitialValue(i == value)
                if fix:
                    s[idx][i].fixValue()

    prob = LpProblem("myProblem", LpMinimize)

    # 第一项
    obj = 0
    for i in range(N):
        obj += lpDot(s[i], c[i]) + lpDot(s[i], d[i])
    # 第二项
    for i in range(len(E)):
        obj += lpDot(e[i], r[i])
    prob += obj

    # 4. 约束和限制
    # (a). specified by `cat="Binary"`

    # (b) 每个节点只能选择一个分片策略
    for i in range(N):
        prob += lpSum(s[i]) == 1

    # (c) 为节点 t 设置约束条件，确保总内存使用量不超过内存限制 M
    if M > 0:
        for t in range(N):
            mem = 0
            for i in L[t]:
                mem += lpSum(s[i][j] * m[i][j] for j in range(len(s[i])))
            prob += mem <= M

    # (d). specified by `cat="Binary"`

    for (idx, (i, j)) in enumerate(E):
        if s_len[i] == 1 or s_len[j] == 1:
            continue

        # (e) 每条边只能选择一个重新分片策略
        prob += lpSum(e[idx]) == 1

        # (f) 确保通信一致性（行约束）
        for row in range(len(s[i])):
            C = len(s[j])  # noqa
            prob += lpSum(
                e[idx][row * C + col] for col in range(0, C)) <= s[i][row]

        # (g)
        for col in range(len(s[j])):
            R = len(s[i])  # noqa
            C = len(s[j])  # noqa
            prob += lpSum(
                e[idx][row * C + col] for row in range(0, R)) <= s[j][col]


    # 求解设置和执行
    verbose = False
    msg = verbose
    time_limit  = 600
    assert "PULP_CBC_CMD" in pulp.listSolvers(onlyAvailable=True), (
        "Please install ILP solvers by 'sudo apt install coinor-cbc'")

    solver = pulp.PULP_CBC_CMD(mip=True,
                               msg=msg,
                               timeLimit=time_limit,
                               threads=multiprocessing.cpu_count())
    prob.solve(solver)

    status = prob.status
    objective = pulp.value(prob.objective)
    objective = float(objective) if objective is not None else -333.0
    if verbose:
        print(f"ILP Status: {LpStatus[status]}\tObjective: {objective}\t"
              f"Time: {time.time() - tic}")
        print(f"#nodes: {num_nodes},  #edges: {num_edges}")

    if prob.status in [pulp.LpStatusInfeasible]:
        raise RuntimeError(
            "Cannot run the function under the given memory budget. "
            "Please increase the memory budget.")

    # Get and check results
    # 存储第一项 每个节点的最优分片选择
    s_val = np.full((N,), -1, dtype=np.int32)
    for i in range(N):
        s_val[i] = get_non_zero_index(s[i])

    # 存储第二项的最优选择
    e_val = np.full((len(E),), -1, dtype=np.int32)

    for (idx, (i, j)) in enumerate(E):
        e_val[idx] = get_non_zero_index(e[idx])
        i_spec_index = e_val[idx] // len(s[j])
        j_spec_index = e_val[idx] % len(s[j])
        assert i_spec_index == s_val[i], f"e_val[{i}][{j}]"
        assert j_spec_index == s_val[j], f"e_val[{i}][{j}]"
        if verbose and r[idx][e_val[idx]] > 0:
            print(f"Edge cost {(i, j)} : {r[idx][e_val[idx]]}")

    # print("                                                       ")
    # print("wjq : ------e_val = ", e_val, "--------s_val = ", s_val)
    # print("                                                       ")

    last_s_val = s_val
    last_objective = objective

    if objective > INFINITY_COST:
        warnings.warn("Detect unexpected behaviors in the auto-sharding pass.")

    return s_val, e_val, objective, status

if __name__ == "__main__":
    pass