{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from cluster_qkd_networkx_cluster import NonOverlappingClusters, XMultiClusterQKDNetwork\n",
    "import random\n",
    "import time\n",
    "import matplotlib.pyplot as plt\n",
    "from log import clearContent\n",
    "import numpy as np\n",
    "\n",
    "read_from_cache = False #是否从文本文件中读入数据\n",
    "t = NonOverlappingClusters(read_from_txt=read_from_cache) #生成一个网络\n",
    "\n",
    "def isHasSmallCluster():\n",
    "    for i in t.clusterListArr:\n",
    "        if(len(i) < 6):\n",
    "            return True\n",
    "    return False\n",
    "\n",
    "while(isHasSmallCluster()):\n",
    "    t = NonOverlappingClusters(read_from_txt=read_from_cache) #重新生成一个网络\n",
    "\n",
    "for i in t.clusterListArr:\n",
    "    print(f\"{len(i)}：{i}\")\n",
    "t.showGraph()\n",
    "t.showSubFig()\n",
    "\n",
    "qkdnx = XMultiClusterQKDNetwork(t.getGraph(), t.clusterListArr, read_from_pkl=read_from_cache) #是否从pkl文件里面读XCluster\n",
    "# qkdnx.showClusterTopo()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "from itertools import combinations\n",
    "\n",
    "compareTxt = \"compare.txt\"\n",
    "clearContent(compareTxt)\n",
    "\n",
    "for _ in range(20):\n",
    "    end_time = time.time()\n",
    "\n",
    "    cplex_alphas = []\n",
    "    spf_alphas = []\n",
    "    online_greedy_alphas = []\n",
    "\n",
    "    def generateBrokenLinkList() -> list:\n",
    "        broken_list = []\n",
    "        for node_ids in qkdnx.clusterListArr:\n",
    "            candidate_pair = []\n",
    "            if(len(node_ids)<3):\n",
    "                 continue\n",
    "            for u, v in combinations(node_ids, 2):\n",
    "                if t.getGraph().has_edge(u, v):\n",
    "                    candidate_pair.append([u,v])\n",
    "            broken_list.append(random.choice(candidate_pair))\n",
    "        return broken_list\n",
    "\n",
    "    # 跑100轮，测均值\n",
    "    for i in range(2000):\n",
    "        clusterNum = qkdnx.getClusterNum()\n",
    "        _startClusterID = random.randint(1, clusterNum)\n",
    "        _endClusterID = random.randint(1, clusterNum)\n",
    "        # while (_startClusterID == _endClusterID):\n",
    "        # _startClusterID = random.randint(1, clusterNum)\n",
    "        # _endClusterID = random.randint(1, clusterNum)\n",
    "        # startNodeID, endNodeID = getDistinctClusterNode(_startClusterID, _endClusterID)\n",
    "        startNodeID = random.randint(1, len(qkdnx.clusterListArr[_startClusterID-1]))\n",
    "        endNodeID = random.randint(1, len(qkdnx.clusterListArr[_endClusterID-1]))\n",
    "        qkdnx.requestFlow(\n",
    "            startClusterID = _startClusterID, \n",
    "            startNodeID = startNodeID, \n",
    "            endClusterID = _endClusterID, \n",
    "            endNodeID = endNodeID, \n",
    "            flow = random.randint(0, 50), \n",
    "            # 在有多条边破损的情况下\n",
    "            brokenLinks = generateBrokenLinkList(), \n",
    "        )\n",
    "    # 保留一个副本\n",
    "    cplex_alphas.append(qkdnx.maxCplexAlpha)\n",
    "    spf_alphas.append(qkdnx.maxSPFAlpha)\n",
    "    online_greedy_alphas.append(qkdnx.maxOnlineGreedyAlpha)\n",
    "    \n",
    "    with open(compareTxt, \"a\") as file:\n",
    "            file.write(f\"{np.mean(cplex_alphas)} {np.mean(spf_alphas)} {np.mean(online_greedy_alphas)}\\n\")\n",
    "\n",
    "    # 清除负载\n",
    "    qkdnx.setNetXFlowZero()\n",
    "    print(\"重置Alpha\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "data = []\n",
    "with open(compareTxt, \"r\") as file:\n",
    "    for line in file:\n",
    "        values = line.strip().split()\n",
    "        data.append([float(values[0]), float(values[1]), float(values[2])])\n",
    "x = list(range(1, len(data) + 1))\n",
    "y1 = [item[0] for item in data]\n",
    "y2 = [item[1] for item in data]\n",
    "y3 = [item[2] for item in data]\n",
    "plt.plot(x, y1, label='Cplex')  # 第一条折线，对应数据中的第一列\n",
    "plt.plot(x, y2, label='Spf')  # 第二条折线，对应数据中的第二列\n",
    "plt.plot(x, y3, label='OnlineGreedy')  # 第二条折线，对应数据中的第三列\n",
    "plt.title('Congestion Ratio vs. Test Round')\n",
    "plt.xlabel('Test Round')\n",
    "plt.ylabel('Congestion Ratio')\n",
    "plt.legend()\n",
    "plt.show"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "data_array = np.array(data)\n",
    "column_sums = data_array.sum(axis=0)\n",
    "column_means = data_array.mean(axis=0)\n",
    "print(\"每一列的和:\", column_sums)\n",
    "print(\"每一列的平均值:\", column_means)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "cplex",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
