from networkx import stochastic_block_model
import matplotlib.pyplot as plt
import numpy as np
from network import QKDNetwork
from rounding import solver
from copy import deepcopy
import networkx as nx
from sa import SA
import random
import pickle

absolute_path = ""
candidate_path_num_k = 10

class XMultiClusterQKDNetwork:
    # 传入网络拓扑和划分好的簇
    def __init__(self, graph: nx.Graph, clusterListArr: list[list[int]], read_from_pkl=False, read_from_template=False, template_file_names = [], sinle_cluster_size = 8, cluster_number = 6):
        self.graph = graph
        self.maxCplexAlpha = 0
        self.maxSPFAlpha = 0
        self.maxOnlineGreedyAlpha = 0
        self.clusters: list[XCluster] = []
        self.setNetXFlowZero()
        self.clusterTopoGraph = nx.Graph()
        self.clusterListArr = clusterListArr

        self.spf_path_length_count = {1:0,2:0,3:0,4:0,5:0,6:0,7:0,8:0,9:0,10:0,11:0,12:0}
        self.online_greedy_path_length_count = {1:0,2:0,3:0,4:0,5:0,6:0,7:0,8:0,9:0,10:0,11:0,12:0}
        self.cplex_path_length_count = {1:0,2:0,3:0,4:0,5:0,6:0,7:0,8:0,9:0,10:0,11:0,12:0}

        # clusterListArr的结构为 [[1,2,3,4],[5,6,7,8],[9,10,11,12]] 代表三个簇
        # 使用每个簇的子图，构建网络
        if(not read_from_template):
            for i in range(len(clusterListArr)):
                if(read_from_pkl):
                    # 从文件中读入XCluster，其中包括了Oblivious方案
                    with open(f"{absolute_path}XCluster{i+1}.pkl", "rb") as file:
                        cluster = pickle.load(file)
                        self.clusters.append(cluster)
                else:
                    cluster = XCluster(i + 1, clusterListArr[i], self.graph)
                    # 持久化存一下 XCluster 对象
                    with open(f"{absolute_path}XCluster{i+1}.pkl", "wb") as file:
                        pickle.dump(cluster, file)
                    self.clusters.append(cluster)
            self.createClusterLink()
            self.calculateBridgeNodeByMultiFlow()
        else:
            for i in clusterListArr:
                if(len(i)!=sinle_cluster_size):
                    print(f"请指定节点数量为{sinle_cluster_size}的簇")
                    return
            if(len(clusterListArr) != len(template_file_names)):
                raise Exception("簇的数量和模板数量不匹配！")
            for i in range(len(clusterListArr)):
                cluster = XCluster(
                    i + 1, 
                    clusterListArr[i], 
                    self.graph, 
                    read_from_template=True, 
                    template_txt_name=template_file_names[i]
                )
                self.clusters.append(cluster)
            self.createClusterLink()
            self.calculateBridgeNodeByMultiFlow()
    
    def getResourceUtilization(self):
        # 返回链路资源利用率 [cplex利用率, shortest_path利用率, online_greedy利用率]
        cplex_arr = []
        spf_arr = []
        og_arr = []
        for _, _, data in self.graph.edges(data=True):
            cplex_arr.append(data["cplex_flow"]/data["weight"]*(1/self.maxCplexAlpha))
            spf_arr.append(data["spf_flow"]/data["weight"]*(1/self.maxSPFAlpha))
            og_arr.append(data["online_greedy_flow"]/data["weight"]*(1/self.maxOnlineGreedyAlpha))
        return [np.mean(cplex_arr), np.mean(spf_arr), np.mean(og_arr)]
    
    def calculate_jain_fairness(self, users):
        n = len(users)
        sum_x = sum(users)
        sum_x_squared = sum(map(lambda x: x**2, users))
        jain_index = (sum_x**2) / (n * sum_x_squared)
        return jain_index
    
    def getJainsIndex(self):
        # 返回链路公平性 [cplex利用率, shortest_path利用率, online_greedy利用率]
        cplex_arr = []
        spf_arr = []
        og_arr = []
        for _, _, data in self.graph.edges(data=True):
            cplex_arr.append(data["cplex_flow"]/data["weight"])
            spf_arr.append(data["spf_flow"]/data["weight"])
            og_arr.append(data["online_greedy_flow"]/data["weight"])
        return [
            self.calculate_jain_fairness(cplex_arr), 
            self.calculate_jain_fairness(spf_arr), 
            self.calculate_jain_fairness(og_arr)
        ]
    
    def getResourceUtilization(self):
        # 返回链路资源利用率 [cplex利用率, shortest_path利用率, online_greedy利用率]
        cplex_arr = []
        spf_arr = []
        og_arr = []
        for _, _, data in self.graph.edges(data=True):
            cplex_arr.append(data["cplex_flow"]/data["weight"]*(1/self.maxCplexAlpha))
            spf_arr.append(data["spf_flow"]/data["weight"]*(1/self.maxSPFAlpha))
            og_arr.append(data["online_greedy_flow"]/data["weight"]*(1/self.maxOnlineGreedyAlpha))
        return [np.mean(cplex_arr), np.mean(spf_arr), np.mean(og_arr)]
                
    def showPathLengthCount(self):
        print("spf: ", self.spf_path_length_count)
        print("online: ", self.online_greedy_path_length_count)
        print("cplex: ", self.cplex_path_length_count)
    
    def dfs(self, plan_in_cluster, across_cluster, current_cluster_id, current_node, end_cluster_id, end_node, path, all_paths):
        current_cluster = {}
        for tmp in plan_in_cluster:
            if(tmp["cluster_id"]==current_cluster_id):
                current_cluster = tmp

        if current_cluster_id == end_cluster_id:
            # 到达目标簇，记录路径
            for edge in current_cluster['edge_list']:
                if edge[0] == current_node and edge[1] == end_node:
                    all_paths.append(path + [(current_cluster_id, edge[0], edge[1], edge[2], edge[3])])
            return
        for edge in current_cluster['edge_list']:
            if edge[0] == current_node:
                # 遍历当前簇的所有边
                for bridge in current_cluster['bridge_list']:
                    if bridge[0] == edge[1]:
                        # 找到桥接边，继续DFS
                        next_cluster_id = across_cluster[across_cluster.index(current_cluster_id)+1]
                        next_node = bridge[1]
                        self.dfs(plan_in_cluster, across_cluster, next_cluster_id, next_node, end_cluster_id, end_node, path + [(current_cluster_id, edge[0], edge[1], edge[2], edge[3])], all_paths)
                    
    def get_all_paths(self, plan_in_cluster, across_cluster, start_cluster_id, start_node_id, end_cluster_id, end_node_id):
        all_paths = []
        self.dfs(plan_in_cluster, across_cluster, start_cluster_id, start_node_id, end_cluster_id, end_node_id, [], all_paths)
        return all_paths
    

    def getMultiCandidateFlowPathWithAcrossCluster(self, k: int, startClusterID: int, startNodeID: int, endClusterID: int, endNodeID: int):
        across_cluster = nx.shortest_path(self.clusterTopoGraph, startClusterID, endClusterID)
        return self.getMultiCandidateFlowPath(across_cluster ,candidate_path_num_k, startClusterID, startNodeID, endClusterID, endNodeID)

    def getMultiCandidateFlowPath(self, across_cluster: list, k: int, startClusterID: int, startNodeID: int, endClusterID: int, endNodeID: int):
        # 获取某个源目节点对之间的 k 条候选路径
        # (簇号,起始UID,终止UID)...
        # 获取中继节点列表
        # 功能：给定起始/终止簇和对应的节点ID，求出中继节点方案
        # 返回：[[{clusterID, startNode, endNode}...]...]
        # across_cluster = nx.shortest_path(self.clusterTopoGraph, startClusterID, endClusterID)
        plan_in_cluster = []  # 每个簇里面的方案
        startCandidateNodesID = [startNodeID]
        
        for i in range(len(across_cluster) - 1):
            cluster_1 = across_cluster[i]
            cluster_2 = across_cluster[i + 1]
            edge_set = set()
            bridge_set = set()
            for startCandidateNodeID in startCandidateNodesID:
                for adjacentInfo in self.clusters[cluster_1 - 1].adjacentClusterList:
                    if adjacentInfo.adjacent_cluster_id == cluster_2:
                        _capacity = self.getPathCapacityInsideCluster(cluster_1, startCandidateNodeID, adjacentInfo.node_id)
                        _length = self.getPathLengthInsideCluster(cluster_1, startCandidateNodeID, adjacentInfo.node_id)
                        edge_set.add((
                            startCandidateNodeID, 
                            adjacentInfo.node_id, 
                            _capacity if _capacity else 1000000, #容量
                            _length if _length else 0, #长度
                        ))
            startCandidateNodesID = []
            for adjacentInfo in self.clusters[cluster_1 - 1].adjacentClusterList:
                if adjacentInfo.adjacent_cluster_id == cluster_2:
                    bridge_set.add((
                        adjacentInfo.node_id, 
                        adjacentInfo.adjacent_node_id,
                        self.graph[adjacentInfo.node_uid][adjacentInfo.adjacent_node_uid]['weight']
                    ))
                    startCandidateNodesID.append(adjacentInfo.adjacent_node_id)
            plan_in_cluster.append({
                "cluster_id": cluster_1,
                "edge_list": list(edge_set),
                "bridge_list": list(bridge_set),
            })
        end_cluster_edge_set = set()
        for endClusterStartNode in startCandidateNodesID:
            _end_capacity = self.getPathCapacityInsideCluster(endClusterID, endClusterStartNode, endNodeID)
            _end_length = self.getPathLengthInsideCluster(endClusterID, endClusterStartNode, endNodeID)
            end_cluster_edge_set.add((
                endClusterStartNode, 
                endNodeID, 
                _end_capacity if _end_capacity else 1000000, #容量
                _end_length if _end_length else 0, #长度
            ))
        plan_in_cluster.append({
            "cluster_id": endClusterID,
            "edge_list": list(end_cluster_edge_set),
            "bridge_list": [],
        })

        all_paths = self.get_all_paths(plan_in_cluster, across_cluster, startClusterID, startNodeID, endClusterID, endNodeID)
        
        def calculate_ratio(sublist):
            A = sum(t[3] for t in sublist)
            B = sum(t[4] for t in sublist)
            ratio = A / B if B != 0 else 10000000
            return ratio

        ratios = [(sublist, calculate_ratio(sublist)) for sublist in all_paths]
        sorted_ratios = sorted(ratios, key=lambda x: x[1], reverse=True)
        top_k = [sublist for sublist, _ in sorted_ratios[:k]]

        # 返回前k条路径
        return top_k

    
    def calculateBridgeNodeByMultiFlow(self):
        # 通过流模拟，来计算中继节点
        # 输入：对于给定的 [(簇号,节点ID), (簇号,节点ID)]
        # 输出：给出唯一确定的一条relay path：[(簇号,起点ID,终点ID), (簇号,起点ID,终点ID), (簇号,起点ID,终点ID)...]
        # 流程： 1. 对于所有的跨簇SD对，选出K条候选路径（最短路、最大容量路）
        #       2. 针对每条候选路径跑相同的流量
        #       3. 找到Congestion Ratio最大的边，剔除这条边上最大流量的候选路径
        #       4. 反复剔除候选路径，直到该路径是某个SD对唯一的relay path
        print("开始计算中继节点的Oblivious方案")
        k = candidate_path_num_k #计算k条候选路径
        # 对于所有的簇对
        p = []
        for startClusterID in range(1, len(self.clusterListArr)+1):
            for endClusterID in range(1, len(self.clusterListArr)+1):
                if(startClusterID==endClusterID):
                    continue
                for startNodeUniqueID in self.clusterListArr[startClusterID-1]:
                    for endNodeUniqueID in self.clusterListArr[endClusterID-1]:
                        if(startNodeUniqueID==endNodeUniqueID):
                            continue
                        startNodeID = self.clusters[startClusterID-1].getNodeIDFromUniqueID(startNodeUniqueID)
                        endNodeID = self.clusters[endClusterID-1].getNodeIDFromUniqueID(endNodeUniqueID)
                        k_candidate_paths = self.getMultiCandidateFlowPathWithAcrossCluster(candidate_path_num_k, startClusterID, startNodeID, endClusterID, endNodeID)
                        p.append({
                            "start_cluster_id": startClusterID,
                            "end_cluster_id": endClusterID,
                            "start_id": startNodeID,
                            "start_uid": startNodeUniqueID,
                            "end_id": endNodeID,
                            "end_uid": endNodeUniqueID,
                            "candidate_paths": k_candidate_paths,
                        })
        # [(簇号, 开始节点, 终止节点, 容量, 路径长度), (簇号, 开始节点, 终止节点, 容量, 路径长度)...]
        self.multi_flow_paths = p

        def requestFlowForCandidatePath(candidate_path, flow):
            # 输入：
            # [
            # (clusterID, startID, endID, capacity, pathLength), 
            # (clusterID, startID, endID, capacity, pathLength), 
            # (clusterID, startID, endID, capacity, pathLength)...
            # ]
            if(len(candidate_path)==0):
                return
            bridgeEndCluster = candidate_path[0]
            # 先对第一个簇施加流量
            try:
                if(bridgeEndCluster[1]!=bridgeEndCluster[2]):
                    cluster_id = bridgeEndCluster[0]
                    route = self.getPathInsideCluster(cluster_id, bridgeEndCluster[1], bridgeEndCluster[2])
                    for i in route:
                        start_uid = self.clusters[cluster_id-1].getUniqueIDFromNodeID(i[0])
                        end_uid = self.clusters[cluster_id-1].getUniqueIDFromNodeID(i[1])
                        self.graph[start_uid][end_uid]["candidate_flow"] += flow
            except Exception as e:
                print("666")
            # 再对其他bridge和簇施加流量
            for j in range(1, len(candidate_path)):
                start_cluster_link = candidate_path[j-1]
                end_cluster_link = candidate_path[j]
                bridge_link_start_uid = self.clusters[start_cluster_link[0]-1].getUniqueIDFromNodeID(start_cluster_link[2])
                bridge_link_end_uid = self.clusters[end_cluster_link[0]-1].getUniqueIDFromNodeID(end_cluster_link[1])
                self.graph[bridge_link_start_uid][bridge_link_end_uid]["candidate_flow"] += flow

                # bridgeEndCluster = candidate_path[0]
                if(end_cluster_link[1]!=end_cluster_link[2]):
                    cluster_id = end_cluster_link[0]
                    route = self.getPathInsideCluster(cluster_id, end_cluster_link[1], end_cluster_link[2])
                    try:
                        for i in route:
                            start_uid = self.clusters[cluster_id-1].getUniqueIDFromNodeID(i[0])
                            end_uid = self.clusters[cluster_id-1].getUniqueIDFromNodeID(i[1])
                            self.graph[start_uid][end_uid]["candidate_flow"] += flow
                    except Exception as e:
                        print("666")

        def removeMaxCRCandidatePath():
            # 移除最拥塞的候选路
            for multi_flow_path in self.multi_flow_paths:
                if(len(multi_flow_path["candidate_paths"])<=1):
                    continue
                max_cr_candidate_path_idx = -1
                max_candidate_path_cr = -1
                for candidate_path_idx, candidate_path in enumerate(multi_flow_path["candidate_paths"]):
                    if(len(candidate_path)==0):
                        continue
                    max_cr = -1
                    bridgeEndCluster = candidate_path[0]
                    # 先对第一个簇施加流量
                    if(bridgeEndCluster[1]!=bridgeEndCluster[2]):
                        cluster_id = bridgeEndCluster[0]
                        route = self.getPathInsideCluster(cluster_id, bridgeEndCluster[1], bridgeEndCluster[2])
                        for i in route:
                            start_uid = self.clusters[cluster_id-1].getUniqueIDFromNodeID(i[0])
                            end_uid = self.clusters[cluster_id-1].getUniqueIDFromNodeID(i[1])
                            cr = self.graph[start_uid][end_uid]["candidate_flow"] / self.graph[start_uid][end_uid]["weight"]
                            if(cr>max_cr):
                                max_cr = cr
                    # 再对其他bridge和簇施加流量
                    for j in range(1, len(candidate_path)):
                        start_cluster_link = candidate_path[j-1]
                        end_cluster_link = candidate_path[j]
                        bridge_link_start_uid = self.clusters[start_cluster_link[0]-1].getUniqueIDFromNodeID(start_cluster_link[2])
                        bridge_link_end_uid = self.clusters[end_cluster_link[0]-1].getUniqueIDFromNodeID(end_cluster_link[1])
                        cr = self.graph[bridge_link_start_uid][bridge_link_end_uid]["candidate_flow"] / self.graph[bridge_link_start_uid][bridge_link_end_uid]["weight"]
                        if(cr>max_cr):
                            max_cr = cr
                        if(end_cluster_link[1]!=end_cluster_link[2]):
                            cluster_id = end_cluster_link[0]
                            route = self.getPathInsideCluster(cluster_id, end_cluster_link[1], end_cluster_link[2])
                            for i in route:
                                start_uid = self.clusters[cluster_id-1].getUniqueIDFromNodeID(i[0])
                                end_uid = self.clusters[cluster_id-1].getUniqueIDFromNodeID(i[1])
                                cr = self.graph[start_uid][end_uid]["candidate_flow"] / self.graph[start_uid][end_uid]["weight"]
                                if(cr>max_cr):
                                    max_cr = cr
                    if(max_cr>max_candidate_path_cr):
                        max_cr_candidate_path_idx = candidate_path_idx
                        max_candidate_path_cr = max_cr
                del multi_flow_path["candidate_paths"][max_cr_candidate_path_idx]


        for _ in range(k):
            # 清除网络负载
            self.setNetXFlowZero()
            # 施加流量
            for tmp_multi_flow_path in self.multi_flow_paths:
                for tmp_multi_flow in tmp_multi_flow_path["candidate_paths"]:
                    # for i in range(10):
                    flow = random.randint(45, 55)
                    requestFlowForCandidatePath(tmp_multi_flow, flow)
            # 如果 candidate paths 数量大于1，则去掉一条 congestion ratio 最大的
            removeMaxCRCandidatePath()
        

    def getClusterNum(self):
        return len(self.clusters)
        
    def setNetXFlowZero(self):
        for _, _, data in self.graph.edges(data=True):
            data['spf_flow'] = 0
            data['online_greedy_flow'] = 0
            data['cplex_flow'] = 0
            data['candidate_flow'] = 0
        for i in self.clusters:
            i.clearCplexLinkLoad()
        self.maxCplexAlpha = 0
        self.maxOnlineGreedyAlpha = 0
        self.maxSPFAlpha = 0

    # ⭐ 入口
    def requestFlow(
            self, 
            startClusterID: int, 
            startNodeID: int, 
            endClusterID: int, 
            endNodeID: int, 
            flow: float,
            brokenLinks: list,
        ):
        # print(f"破损边: {brokenLinks}")
        self.getSPFResult(startClusterID, startNodeID, endClusterID, endNodeID, flow, brokenLinks)
        self.getOnlineGreedyResult(startClusterID, startNodeID, endClusterID, endNodeID, flow, brokenLinks)
        self.getCplexResult(startClusterID, startNodeID, endClusterID, endNodeID, flow, brokenLinks)

    # 求最短路 Congestion Ratio
    def getSPFResult(
            self, 
            startClusterID: int, 
            startNodeID: int, 
            endClusterID: int, 
            endNodeID: int, 
            flow: float, 
            brokenLinks: list,
        ):
        uniqueStartID = self.clusters[startClusterID - 1].getNode(startNodeID).uniqueID
        uniqueEndID = self.clusters[endClusterID - 1].getNode(endNodeID).uniqueID
        H = nx.Graph() #一个临时图
        H.add_nodes_from(self.graph.nodes(data=True))
        # print("SPF有破损边")
        H.add_edges_from((u, v, d) for u, v, d in self.graph.edges(data=True) if([u, v] not in brokenLinks and [v, u] not in brokenLinks))
        path = nx.shortest_path(H, source=uniqueStartID, target=uniqueEndID)
        if(len(path)!=1 and len(path)!=0):
            with open("print_path/spf_paths.txt", "+a") as file:
                file.write(f"{(startClusterID, startNodeID, endClusterID, endNodeID)}, {path}\n")
                if(not self.spf_path_length_count[len(path)-1]):
                    self.spf_path_length_count[len(path)-1] = 1
                else:
                    self.spf_path_length_count[len(path)-1] += 1
        for idx in range(len(path) - 1):
            link_start_id = path[idx]
            link_end_id = path[idx + 1]
            self.graph[link_start_id][link_end_id]["spf_flow"] += flow
            link_alpha = self.graph[link_start_id][link_end_id]["spf_flow"] / self.graph[link_start_id][link_end_id]["weight"]
            self.maxSPFAlpha = link_alpha if link_alpha > self.maxSPFAlpha else self.maxSPFAlpha

    # 获取一条瓶颈最大的流
    def one_path_from_yen_k_shortest_paths(self, source, target, k):
        def dijkstra_path(G, source, target):
            return nx.dijkstra_path(G, source, target, "weight")
        G = self.graph
        paths = []
        candidates = []
        shortest_path = dijkstra_path(G, source, target)
        if not shortest_path:
            return paths
        paths.append(shortest_path)
        path_costs = [nx.path_weight(G, shortest_path, "weight")]
        for i in range(1, k):
            for j in range(len(paths[-1]) - 1):
                spur_node = paths[-1][j]
                root_path = paths[-1][:j + 1]
                removed_edges = []
                for path in paths:
                    if len(path) > j and path[:j + 1] == root_path:
                        u, v = path[j], path[j + 1]
                        if G.has_edge(u, v):
                            removed_edges.append((u, v, G[u][v]))
                            G.remove_edge(u, v)
                try:
                    spur_path = dijkstra_path(G, spur_node, target)
                    if spur_path:
                        total_path = root_path[:-1] + spur_path
                        total_path_cost = nx.path_weight(G, total_path, "weight")
                        candidates.append((total_path_cost, total_path))
                except nx.NetworkXNoPath:
                    pass
                for u, v, edge_attr in removed_edges:
                    G.add_edge(u, v, **edge_attr)
            if not candidates:
                break
            candidates.sort()
            paths.append(candidates[0][1])
            path_costs.append(candidates[0][0])
            candidates.pop(0)
        max_flow_capacity = -1000000
        max_flow_path = []
        for path in paths:
            min_flow_capacity = 1000000
            for i in range(len(path) - 1):
                start_node = path[i]
                end_node = path[i+1]
                remain_resource = self.graph[start_node][end_node]["weight"]-self.graph[start_node][end_node]["online_greedy_flow"]
                if(remain_resource<min_flow_capacity):
                    min_flow_capacity=remain_resource
            if(min_flow_capacity > max_flow_capacity):
                max_flow_path = path
                max_flow_capacity = min_flow_capacity
        return max_flow_path

    def getOnlineGreedyResult(
            self, 
            startClusterID: int, 
            startNodeID: int, 
            endClusterID: int, 
            endNodeID: int, 
            flow: float, 
            brokenLinks: list,
        ):
        uniqueStartID = self.clusters[startClusterID - 1].getNode(startNodeID).uniqueID
        uniqueEndID = self.clusters[endClusterID - 1].getNode(endNodeID).uniqueID
        H = nx.Graph()  # 临时图
        H.add_nodes_from(self.graph.nodes(data=True))
        H.add_edges_from((u, v, d) for u, v, d in self.graph.edges(data=True) if([u, v] not in brokenLinks and [v, u] not in brokenLinks))
        # print("Online Greedy有破损边")

        path = self.one_path_from_yen_k_shortest_paths(uniqueStartID, uniqueEndID, 10)
        
        if(len(path)!=1 and len(path)!=0):
            with open("print_path/online_greedy_paths.txt", "+a") as file:
                file.write(f"{(startClusterID, startNodeID, endClusterID, endNodeID)}, {path}\n")
                if(not self.online_greedy_path_length_count[len(path)-1]):
                    self.online_greedy_path_length_count[len(path)-1] = 1
                else:
                    self.online_greedy_path_length_count[len(path)-1] += 1

        for idx in range(len(path) - 1):
            link_start_id = path[idx]
            link_end_id = path[idx + 1]
            if(not self.graph[link_start_id][link_end_id]["online_greedy_flow"]):
                self.graph[link_start_id][link_end_id]["online_greedy_flow"] = flow
            else:
                self.graph[link_start_id][link_end_id]["online_greedy_flow"] += flow
            link_alpha = self.graph[link_start_id][link_end_id]["online_greedy_flow"] / self.graph[link_start_id][link_end_id]["weight"]
            self.maxOnlineGreedyAlpha = link_alpha if link_alpha > self.maxOnlineGreedyAlpha else self.maxOnlineGreedyAlpha
    
    
    def addCplexFlow(self, clusterID: int, startID: int, endID: int, brokenLinks: list, flow):
        # 簇起始ID，簇终止ID
        if(startID != endID):
            targetCluster = self.clusters[clusterID - 1]
            targetCluster.requestCplexFlow(startID, endID, flow, brokenLinks)
            for item in targetCluster.getRouteOfSD(startID,endID):
                startUID = targetCluster.getUniqueIDFromNodeID(item[0])
                endUID = targetCluster.getUniqueIDFromNodeID(item[1])
                self.graph[startUID][endUID]['cplex_flow'] = targetCluster.getLinkLoad(item[0], item[1])
            t = targetCluster.getCplexAlpha()
            if(t > self.maxCplexAlpha):
                self.maxCplexAlpha = t
    
    # 获取Oblivious方式下路径的瓶颈流
    def getPathCapacityInsideCluster(self, clusterID, nodeID1, nodeID2):
        for i in self.clusters[clusterID-1].obliviousPlanList:
            if(i.startID==nodeID1 and i.endID==nodeID2):
                max_capacity = 0
                for relay_path in i.route:
                    link_start_cid, link_end_cid = relay_path[0], relay_path[1]
                    tmp_capacity = self.clusters[clusterID-1].getLinkCapacity(link_start_cid, link_end_cid)
                    max_capacity = tmp_capacity if tmp_capacity > max_capacity else max_capacity
                return max_capacity
    
    def getPathInsideCluster(self, clusterID, nodeID1, nodeID2):
        for i in self.clusters[clusterID-1].obliviousPlanList:
            if(i.startID==nodeID1 and i.endID==nodeID2):
                return i.route

    def getPathLengthInsideCluster(self, clusterID, nodeID1, nodeID2):
        for i in self.clusters[clusterID-1].obliviousPlanList:
            if(i.startID==nodeID1 and i.endID==nodeID2):
                return len(i.route)
            if(i.startID==nodeID2 and i.endID==nodeID1):
                return len(i.route)
            
    def getBridgeNodesByMultiFlow(
            self, 
            startClusterID: int, 
            startNodeID: int, 
            endClusterID: int, 
            endNodeID: int
        ):
        # 获取中继节点列表
        # 功能：给定起始/终止簇和对应的节点ID，求出中继节点方案
        # 返回：[{clusterID, startNode, endNode}, {clusterID, startNode, endNode}...]
        for tmp in self.multi_flow_paths:
            if(tmp["start_cluster_id"]==startClusterID and
               tmp["end_cluster_id"]==endClusterID and
               tmp["start_id"] == startNodeID and
               tmp["end_id"] == endNodeID):
                return tmp["candidate_paths"][0]

    def addCplexBetweenClusterLinkLoad(self, routing_list: list, flow):
        for i in range(len(routing_list)-1):
            startCluster = self.clusters[routing_list[i][0] - 1]
            endCluster = self.clusters[routing_list[i+1][0] - 1]
            link_start_id = startCluster.getUniqueIDFromNodeID(routing_list[i][2])
            link_end_id = endCluster.getUniqueIDFromNodeID(routing_list[i+1][1])
            if(not self.graph[link_start_id][link_end_id]["cplex_flow"]):
                self.graph[link_start_id][link_end_id]["cplex_flow"] = flow
            else:
                self.graph[link_start_id][link_end_id]["cplex_flow"] += flow
            link_alpha = self.graph[link_start_id][link_end_id]["cplex_flow"] / self.graph[link_start_id][link_end_id]["weight"]
            self.maxCplexAlpha = link_alpha if link_alpha > self.maxCplexAlpha else self.maxCplexAlpha
    
    def getTotalPathOfCplex(self, routing_list):
        path = []
        for i in range(len(routing_list)-1):
            _clusterID, _startNode, _endNode = routing_list[i][0], routing_list[i][1], routing_list[i][2]
            for plan in self.clusters[_clusterID-1].obliviousPlanList:
                if(plan.startID == _startNode and plan.endID == _endNode):
                    path.extend([
                        [
                            self.clusters[_clusterID-1].getUniqueIDFromNodeID(relay_path[0]),
                            self.clusters[_clusterID-1].getUniqueIDFromNodeID(relay_path[1]),
                        ] for relay_path in plan.route
                    ])
            startCluster = self.clusters[routing_list[i][0] - 1]
            endCluster = self.clusters[routing_list[i+1][0] - 1]
            link_start_id = startCluster.getUniqueIDFromNodeID(routing_list[i][2])
            link_end_id = endCluster.getUniqueIDFromNodeID(routing_list[i+1][1])
            path.append([link_start_id, link_end_id])
        _endClusterID, _endClusterStartNode, _endClusterEndNode = routing_list[-1][0], routing_list[-1][1], routing_list[-1][2]
        for plan in self.clusters[_endClusterID-1].obliviousPlanList:
            if(plan.startID == _endClusterStartNode and plan.endID == _endClusterEndNode):
                path.extend([
                    [
                        self.clusters[_endClusterID-1].getUniqueIDFromNodeID(relay_path[0]),
                        self.clusters[_endClusterID-1].getUniqueIDFromNodeID(relay_path[1]),
                    ] for relay_path in plan.route
                ])
        return path
    
    def getCplexResult(
            self, 
            startClusterID: int, 
            startNodeID: int, 
            endClusterID: int, 
            endNodeID: int, 
            flow: float, 
            brokenLinks: list,
        ):
        if(startClusterID==endClusterID):
            self.addCplexFlow(startClusterID, startNodeID, endNodeID, brokenLinks, flow)
            
            # 统计路径长度
            path = []
            for plan in self.clusters[startClusterID-1].obliviousPlanList:
                if(plan.startID == startNodeID and plan.endID == endNodeID):
                    path.extend([
                        [
                            self.clusters[startClusterID-1].getUniqueIDFromNodeID(relay_path[0]),
                            self.clusters[startClusterID-1].getUniqueIDFromNodeID(relay_path[1]),
                        ] for relay_path in plan.route
                    ])
            
            if(len(path)!=0):
                with open("print_path/cplex_paths.txt", "+a") as file:
                    file.write(f"{(startClusterID, startNodeID, endClusterID, endNodeID)}, {path}\n")
                    if(not self.cplex_path_length_count[len(path)]):
                        self.cplex_path_length_count[len(path)] = 1
                    else:
                        self.cplex_path_length_count[len(path)] += 1

        else:
            routing_list = self.getBridgeNodesByMultiFlow(startClusterID, startNodeID, endClusterID, endNodeID)

            # 统计路径长度
            path = self.getTotalPathOfCplex(routing_list)
            with open("print_path/cplex_paths.txt", "+a") as file:
                file.write(f"{(startClusterID, startNodeID, endClusterID, endNodeID)}, {path}\n")
                if(not self.cplex_path_length_count[len(path)]):
                    self.cplex_path_length_count[len(path)] = 1
                else:
                    self.cplex_path_length_count[len(path)] += 1

            for i in routing_list:
                _clusterID, _startNode, _endNode = i[0], i[1], i[2]
                self.addCplexFlow(_clusterID, _startNode, _endNode, brokenLinks, flow)
            self.addCplexBetweenClusterLinkLoad(routing_list, flow)


    def showClusterTopo(self):
        nx.draw(
            self.clusterTopoGraph, 
            with_labels=True, 
            node_color='red', 
            node_size=200, 
            font_size=10, 
            font_weight='bold'
        )
        plt.show()
    
    def addEdgeToTopoGraph(self, edge):
        # 构建簇的整体拓扑
        tmp_g = self.clusterTopoGraph
        if not tmp_g.has_edge(*edge) and not tmp_g.has_edge(edge[1], edge[0]) and edge[0] != edge[1]:
            self.clusterTopoGraph.add_edge(*edge)

    def createClusterLink(self):
        # 为每个簇的邻接簇列表，添加邻接簇的信息
        for idx, item in enumerate(self.clusterListArr):
            for nodeUID in item:
                for adjNodeUID in self.graph.neighbors(nodeUID):
                    for idx1, item1 in enumerate(self.clusterListArr):
                        if(adjNodeUID in item1 and idx != idx1):
                            clusterID1 = self.clusterListArr[idx].index(nodeUID) + 1
                            clusterID2 = self.clusterListArr[idx1].index(adjNodeUID) + 1
                            self.clusters[idx].adjacentClusterList.append(XCluster.adjacentCluster(
                                idx1+1, 
                                clusterID1, 
                                nodeUID, 
                                clusterID2, 
                                adjNodeUID
                            ))
                            self.addEdgeToTopoGraph((idx + 1, idx1 + 1))


class XCluster:
    def __init__(
        self, 
        cluster_id: int , 
        subGraphNodeIDs: list[int], 
        graph: nx.Graph, 
        read_from_template = False, #是否从模板中读取已经求解好的数据
        template_txt_name = "", #簇模板txt的路径
    ):
        self.cluster_id = cluster_id
        self.nodeNum: int = len(subGraphNodeIDs)
        self.subGraphNodeIDs = subGraphNodeIDs
        self.template_txt_name = template_txt_name

        self.nodeList: list[XCluster.Node] = []
        self.linkList: list[XCluster.Link] = []
        
        self.obliviousPlanList: list[XCluster.ObliviousPlan] = []
        self.adjacentClusterList: list[XCluster.adjacentCluster] = [] # 暂时置空，需要的时候再连边

        # 通过子图的节点ID，构建Graph
        self.clusterGraph = nx.Graph()
        for nodeID in subGraphNodeIDs:
            self.clusterGraph.add_node(nodeID)
        for edge in graph.edges:
            if edge[0] in subGraphNodeIDs and edge[1] in subGraphNodeIDs:
                # 增加边以及权重
                self.clusterGraph.add_edge(edge[0], edge[1], weight = graph[edge[0]][edge[1]]['weight'])

        if(not read_from_template):
            self.readDataFromNetworkxGraph()
            print("开始求解子图LP")
            self.sloveClusterLP()
        else:
            if(len(self.template_txt_name) == 0):
                raise Exception("请指定模板的名称")
            self.readDataFromTxt()
    
    class Link:
        def __init__(self, startNodeID: int, endNodeID: int, capacity: int):
            self.startNodeID = startNodeID
            self.endNodeID = endNodeID
            self.capacity = capacity
            self.cplex_load = 0

    class Node:
        def __init__(self, nodeID: int, uniqueID: int):
            self.nodeID = nodeID        # 节点ID
            self.uniqueID = uniqueID
            self.adjacentNodes = []     # 相邻节点ID
            self.capacity = 0           # 节点负载量：节点相邻的链路容量之和

    class PathLoad:
        # 用户判断源目节点对之间Oblivious路径的复杂情况
        def __init__(self, m: int, n: int, load_index: float):
            self.m = m
            self.n = n
            self.load_index = load_index

    class ObliviousPlan:
        class fixFlow:
            def __init__(self, link: list[int], ratio: float):
                self.link = link
                self.ratio = ratio

        def __init__(self, startID: int, endID: int, route: list, fixPath: list): # Oblivious方案
            self.startID = startID
            self.endID = endID
            self.route = route
            # [ { link: [i, j, a, b],  ratio: 0.3442 } ]
            self.fixPath: list[XCluster.ObliviousPlan.fixFlow] = fixPath
        
    class adjacentCluster:
        # 相邻簇节点 簇ID 节点ID 簇连点ID
        def __init__(self, adjacent_cluster_id: int, node_id: int, node_uid: int, adjacent_node_id: int, adjacent_node_uid: int):
            self.adjacent_cluster_id = adjacent_cluster_id
            self.node_id = node_id
            self.node_uid = node_uid
            self.adjacent_node_id = adjacent_node_id
            self.adjacent_node_uid = adjacent_node_uid

    def getRouteOfSD(self, startID, endID):
        for plan in self.obliviousPlanList:
            # 命中分流计划
            if(plan.startID == startID and plan.endID == endID):
                return plan.route

    def showCluster(self):
        with open(self.txt_path, 'r') as file:
            lines = file.readlines()
        n = int(lines[0])
        G = nx.Graph()
        G.add_nodes_from(range(1, n + 1))
        edge_nums = int(lines[2])
        for i in range(3, 3 + edge_nums):
            start, end, weight = map(int, lines[i].split())
            G.add_edge(start, end, weight=weight)
        fig, ax = plt.subplots(figsize=(50, 50))
        pos = nx.spring_layout(G, seed=41)
        nx.draw(G, pos, ax=ax, with_labels=True, node_color='red', node_size=300, font_size=8, alpha=1)
        edge_labels = nx.get_edge_attributes(G, 'weight')
        nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels, font_size=6)
        plt.show()
    
    def getCplexAlpha(self) -> float:
        # 获取网络负载系数 max { Link Load / Link Capacity }
        alpha = 0
        for link in self.linkList:
            if(link.cplex_load / link.capacity > alpha):
                alpha = link.cplex_load / link.capacity
        return alpha

    def requestCplexFlow(self, startID, endID, flow, brokenLinks):
        # brokenLinks（UniqueID）
        brokenLinksID = [[self.getNodeIDFromUniqueID(i[0]), self.getNodeIDFromUniqueID(i[1])] for i in brokenLinks]
        for plan in self.obliviousPlanList:
            # 命中分流计划
            if(plan.startID == startID and plan.endID == endID):
                for relay_link in plan.route:
                    route_start_node_id = relay_link[0] #非UniqueID
                    route_end_node_id = relay_link[1]   #非UniqueID
                    if([route_start_node_id, route_end_node_id] in brokenLinksID or [route_end_node_id, route_start_node_id] in brokenLinksID):
                        # print("分流路")
                        # 走分流路
                        for fix in plan.fixPath:
                            if(fix.link[0]==route_start_node_id and fix.link[1]==route_end_node_id):
                                self.addLinkLoad(fix.link[2], fix.link[3], flow * fix.ratio)
                    else:
                        # 走主干道
                        self.addLinkLoad(route_start_node_id, route_end_node_id, flow)

    def addLinkLoad(self, startID, endID, load):
        for link in self.linkList:
            if link.startNodeID == startID and link.endNodeID == endID:
                link.cplex_load += load
            if link.startNodeID == endID and link.endNodeID == startID:
                link.cplex_load += load

    def clearCplexLinkLoad(self):
        for link in self.linkList:
            link.cplex_load = 0
    
    def getLinkLoad(self, startID, endID):
        for link in self.linkList:
            if link.startNodeID == startID and link.endNodeID == endID:
                return link.cplex_load
            if link.startNodeID == endID and link.endNodeID == startID:
                return link.cplex_load
        return -1

    def getLinkCapacity(self, startID, endID):
        for link in self.linkList:
            if link.startNodeID == startID and link.endNodeID == endID:
                return link.capacity
            if link.startNodeID == endID and link.endNodeID == startID:
                return link.capacity
        return -1

    def getNode(self, nodeId) -> Node:
        if nodeId - 1 >= len(self.nodeList) or nodeId - 1 < 0:
            print(f"获取节点失败 {self.cluster_id}: {nodeId}")
            return self.Node(-1,-1)
        return self.nodeList[nodeId - 1]

    def getNodeIDFromUniqueID(self, uniqueID: int) -> int:
        for node in self.nodeList:
            if node.uniqueID == uniqueID:
                return node.nodeID
        return -1
    
    def getUniqueIDFromNodeID(self, nodeID: int) -> int:
        for node in self.nodeList:
            if node.nodeID == nodeID:
                return node.uniqueID
        return -1
    
    def readDataFromNetworkxGraph(self):
        G = self.clusterGraph
        # 从networkx图中读入网络簇
        self.nodeNum = G.number_of_nodes()
        for idx, item in enumerate(G.nodes()):
            self.nodeList.append(self.Node(idx+1, uniqueID=item))
        for edge in G.edges(data=True):
            startNode = self.getNodeIDFromUniqueID(edge[0])
            endNode = self.getNodeIDFromUniqueID(edge[1])
            capacity = edge[2]['weight']
            self.nodeList[startNode - 1].adjacentNodes.append(endNode)
            self.nodeList[endNode - 1].adjacentNodes.append(startNode)
            self.linkList.append(self.Link(startNode, endNode, capacity))
    
    def sloveClusterLP(self):
        G = nx.Graph()
        G.add_nodes_from(range(1, self.nodeNum + 1))
        for link in self.linkList:
            G.add_edge(link.startNodeID, link.endNodeID, weight=link.capacity)
        # 用LP方法求解网络簇
        # 1. 读入数据
        # 2. 构建LP
        # 3. 求解LP
        # 4. 输出结果
        qkNet = QKDNetwork(useNetworkx = True, networkxGraph = G)
        x_ret, p_ret, _ = solver(qkNet, len(qkNet.nodeList), {})

        # routePath.append([link_start_node, link_end_node])

        # fixPath.append(Cluster.ObliviousPlan.fixFlow(
        #     [link_start_node, link_end_node, link_i, link_j],
        #     link_ratio
        # ))

        r = range(1, len(qkNet.nodeList) + 1)
        for m in r:
            for n in r:
                path_length = 0
                path_nodes = []
                for i in r:
                    for j in r:
                        if(x_ret[m][n][i][j] != 0):
                            path_length += x_ret[m][n][i][j]
                            path_nodes.append([i, j])
                if(path_length != 0):
                    fixPath: list[XCluster.ObliviousPlan.fixFlow] = []
                    for path_link in path_nodes:
                        # path_link [起点, 终点]
                        i_tmp = path_link[0]
                        j_tmp = path_link[1]
                        str_tmp = f"{i_tmp} {j_tmp} "
                        for link in qkNet.linkListCapacity:
                            ratio = p_ret[m][n][i_tmp][j_tmp][link.startNode][link.endNode]
                            if(ratio != 0):
                                str_tmp += f"{link.startNode} {link.endNode} {ratio} "
                                fixPath.append(XCluster.ObliviousPlan.fixFlow(
                                    [i_tmp, j_tmp, link.startNode, link.endNode],
                                    ratio=ratio
                                ))
                            ratio = p_ret[m][n][i_tmp][j_tmp][link.endNode][link.startNode]
                            if(ratio != 0):
                                str_tmp += f"{link.endNode} {link.startNode} {ratio} "
                                fixPath.append(XCluster.ObliviousPlan.fixFlow(
                                    [i_tmp, j_tmp, link.endNode, link.startNode],
                                    ratio=ratio
                                ))
                    tmp = XCluster.ObliviousPlan(
                        m, 
                        n, 
                        path_nodes, 
                        fixPath
                    )
                    self.obliviousPlanList.append(tmp)

    def readDataFromTxt(self):
        path = f"{absolute_path}template/{self.template_txt_name}"
        # 从txt文件中读入网络簇
        with open(path, 'r') as file:
            lines = file.readlines()
            numNodes = int(lines[0]) #节点数量
            self.nodeNum = numNodes
            nodeIDs = [i for i in range(1, numNodes + 1)] #节点ID
            for idx, nodeID in enumerate(nodeIDs):
                self.nodeList.append(self.Node(nodeID, self.subGraphNodeIDs[idx]))
            param = lines[1].split() #簇的参数
            self.alpha = float(param[0])
            self.beta = float(param[1])
            # self.seed = int(param[2])
            numLinks = int(lines[2]) #边数量

            # 初始化边以及边容量
            for i in range(3, 3 + numLinks):
                linkData = lines[i].split()
                startNode = int(linkData[0])
                endNode = int(linkData[1])
                self.nodeList[startNode - 1].adjacentNodes.append(endNode)
                self.nodeList[endNode - 1].adjacentNodes.append(startNode)
                capacity = int(linkData[2])
                link = self.Link(startNode, endNode, capacity)
                self.linkList.append(link)
            
            sd_num = numNodes * (numNodes - 1)
            cursor = 3 + numLinks
            for i in range(sd_num):
                route = lines[cursor].split() #路径长度-源节点1-目的节点1
                routeLength = int(route[0]) #路径长度
                startID = int(route[1])
                endID = int(route[2])
                routePath = []
                fixPath: list[XCluster.ObliviousPlan.fixFlow] = []
                for _ in range(routeLength):
                    cursor = cursor + 1
                    fix_line = lines[cursor].split() #边点1-边点2-点a-点b-fixRatio-点c-点d-fixRatio
                    link_start_node = int(fix_line[0])
                    link_end_node = int(fix_line[1])
                    routePath.append([link_start_node, link_end_node])
                    fix_num = int((len(fix_line) - 2) / 3)
                    for j in range(fix_num):
                        fix_cursor = 2 + j * 3
                        link_i = int(fix_line[fix_cursor])
                        link_j = int(fix_line[fix_cursor + 1])
                        link_ratio = float(fix_line[fix_cursor + 2])
                        fixPath.append(XCluster.ObliviousPlan.fixFlow(
                            [link_start_node, link_end_node, link_i, link_j],
                            link_ratio
                        ))
                # print(routePath)
                cursor = cursor + 1
                tmp = XCluster.ObliviousPlan(
                    startID, 
                    endID, 
                    routePath, 
                    fixPath
                )
                self.obliviousPlanList.append(tmp)

# 非重叠社区
class NonOverlappingClusters:
    def __init__(self, read_from_txt = False) -> None:
        self.G = nx.Graph()
        # 生成一个网络，以供划分
        # self.randomEdgeWithNodeNum(50, 200)
        if(not read_from_txt):
            self.addGWithClusterShape([8 for _ in range(12)], 0.4, 0.04)
            self.removeScatterNode() # 移除散点
            self.sa = SA(deepcopy(self.G))
            self.clusterListArr = []
            self.splitNet()
            self.trimScatterNodes()
            nx.write_graphml(self.G, f"{absolute_path}entire_network.graphml")
            with open(f"{absolute_path}entire_network_split.txt", "w") as file:
                for row in self.clusterListArr:
                    file.write(" ".join(map(str, row)) + "\n")
        else:
            self.G = nx.read_graphml(f"{absolute_path}entire_network.graphml")
            id_mapping = {str(node): int(node) for node in self.G.nodes()}
            self.G = nx.relabel_nodes(self.G, id_mapping)
            loaded_array = []
            with open(f"{absolute_path}entire_network_split.txt", "r") as file:
                for line in file:
                    loaded_array.append(list(map(int, line.split())))
            self.clusterListArr = loaded_array
    
    def trimScatterNodes(self):
        # 移除划分之后簇的散点
        for node_arr_idx, node_arr in enumerate(self.clusterListArr):
            subgraph = self.G.subgraph(node_arr).copy()
            while True:
                degree_one_nodes = [node for node in subgraph.nodes() if (subgraph.degree(node) == 1 or subgraph.degree(node) == 0)]
                if not degree_one_nodes:
                    break
                self.G.remove_nodes_from(degree_one_nodes)
                subgraph.remove_nodes_from(degree_one_nodes)
                self.clusterListArr[node_arr_idx] = list(subgraph.nodes())
        self.clusterListArr = [sub_array for sub_array in self.clusterListArr if sub_array]
    
    def getGraph(self):
        return self.G
    
    def showSubFig(self):
        communities = self.clusterListArr
        if not communities:
            return
        pos = nx.spring_layout(self.G, seed=42)
        for _, community in enumerate(communities):
            if not community:
                continue
            community = community
            subgraph = self.G.subgraph(community)
            if len(subgraph) == 0:
                continue
            # 提取子图布局
            sub_pos = {node: pos[node] for node in subgraph.nodes()}

            plt.figure(figsize=(5, 5))
            nx.draw(subgraph, sub_pos, with_labels=True)
            plt.show()

    def showGraph(self):
        communities = self.clusterListArr
        community_map = {}
        for idx, community in enumerate(communities):
            for node in community:
                community_map[node] = idx
        color_map = [community_map[node] for node in self.G.nodes()]
        pos = {}
        radius = 1
        angles = np.linspace(0, 2 * np.pi, len(communities), endpoint=False)
        community_centers = radius * np.column_stack([np.cos(angles), np.sin(angles)])
        for idx, community in enumerate(communities):
            subgraph = self.G.subgraph(community)
            community_pos = nx.spring_layout(subgraph, scale=0.5)  # scale小一些以保持社区紧凑
            community_pos = {node: pos + community_centers[idx] for node, pos in community_pos.items()}
            pos.update(community_pos)
            # 展示子图
            # nx.draw(subgraph, community_pos, with_labels=True)
            # plt.show()
        plt.figure(figsize=(7.5, 7.5))
        nx.draw(self.G, pos, node_color=color_map, with_labels=True, node_size=300)
        nx.draw_networkx_edges(self.G, pos, alpha=0.3)
        plt.axis('off')  # 隐藏坐标轴
        plt.show()

    def splitNet(self) -> list[list[int]]:
        # 将网络分割成簇，导出簇的节点列表，当然，列表中有重复的部分
        self.clusterListArr = self.sa.run()

    
    def removeScatterNode(self):
        # 去掉度为1和度数为0的节点
        degree = self.G.degree()
        single_degree_nodes = [node for node, degree in degree if (degree == 1 or degree == 0)]
        self.G.remove_nodes_from(single_degree_nodes)

    def randomEdgeWithNodeNum(self, node_num: int, edge_num: int):
        # node_num = 30
        # edge_num = 80
        self.G.add_nodes_from(range(1, node_num+1))
        while self.G.number_of_edges() < edge_num:
            node1 = random.choice(range(1, node_num+1))
            node2 = random.choice(range(1, node_num+1))
            if node1 != node2 and not self.G.has_edge(node1, node2):
                self.G.add_edge(node1, node2, weight=self.randomWeight())
        for node in self.G.nodes:
            if self.G.degree(node) == 0:
                random_node = random.choice(range(node_num))
                self.G.add_edge(node, random_node, weight=self.randomWeight())

    def addGWithClusterShape(self, sizes: list[int], p_intra: float, p_inter: float):
        # p_intra = 0.3  # 簇内的节点之间的连边概率
        # p_inter = 0.01  # 簇间的节点之间的连边概率
        p_matrix = [[p_intra if i == j else p_inter for j in range(1,len(sizes)+1)] for i in range(1,len(sizes)+1)]
        self.G.add_edges_from(stochastic_block_model(sizes, p_matrix).edges())
        # 为G的每条边分配随机的权重
        for u, v in self.G.edges:
            self.G[u][v]['weight'] = self.randomWeight()
    
    def randomWeight(self) ->int :
        return random.randint(20, 100)

