{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f114aebf",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/pytorch1.8/lib/python3.9/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "True"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import numpy as np\n",
    "import torch\n",
    "import itertools\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch.nn import functional as F\n",
    "from torch.utils.data import DataLoader,Dataset\n",
    "import math\n",
    "import random\n",
    "from functools import partial\n",
    "import argparse\n",
    "from tqdm import tqdm\n",
    "from torch.nn.functional import normalize\n",
    "import scipy.sparse as sp\n",
    "from numpy.linalg import inv\n",
    "import copy\n",
    "from torch.optim.lr_scheduler import _LRScheduler\n",
    "import warnings\n",
    "import torch.nn.functional as F\n",
    "from torch.nn.parameter import Parameter\n",
    "from torch.nn.modules.module import Module\n",
    "torch.cuda.empty_cache()\n",
    "device=torch.device('cuda:0' if torch.cuda.is_available() else \"cpu\")\n",
    "warnings.filterwarnings(\"ignore\")\n",
    "torch.cuda.is_available()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8d5e2079-0fd7-437f-95a3-f28495e51105",
   "metadata": {},
   "source": [
    "# process_data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "id": "f2394487-5b7b-4e32-b5b9-34f578f60d10",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(973, 973)\n"
     ]
    }
   ],
   "source": [
    "p=None\n",
    "feature_cd = torch.load('./data_circ/dataset/fea_cd_case.pt')  #torch.Size([973, 1527])\n",
    "adj_cd = torch.load('./data_circ/dataset/adj_cd_case.pt')  #torch.Size([973, 973])\n",
    "\n",
    "c = 0.15  #ppr中的c\n",
    "k1_cc = torch.tensor(20)    #原始图采样节点数\n",
    "k1_dd = torch.tensor(20)\n",
    "k1_cd = torch.tensor(4)\n",
    "k1_dc = torch.tensor(4)\n",
    "k2 = 0      #粗图采样节点数\n",
    "cc_shape=834\n",
    "dd_shape=138\n",
    "\n",
    "power_adj_list_cd = [copy.deepcopy(adj_cd)]\n",
    "for m in range(2):\n",
    "    power_adj_list_cd.append(power_adj_list_cd[0]@power_adj_list_cd[m])      #(3,972,972)\n",
    "\n",
    "#Sampling heuristics: 0,1,2\n",
    "one = power_adj_list_cd[0]  #一阶邻接矩阵\n",
    "two = power_adj_list_cd[1]    #二阶邻接矩阵\n",
    "three = power_adj_list_cd[2]    #二阶邻接矩阵\n",
    "\n",
    "\n",
    "eigen_adj_cd5_copy1 = copy.deepcopy(one)\n",
    "eigen_adj_cd5_copy2 = copy.deepcopy(one)\n",
    "eigen_adj_cd5_copy3 = copy.deepcopy(one)\n",
    "eigen_adj_cd5_copy4 = copy.deepcopy(one)\n",
    "\n",
    "eigen_adj1_cd5_copy1 = copy.deepcopy(two)\n",
    "eigen_adj1_cd5_copy2 = copy.deepcopy(two)\n",
    "eigen_adj1_cd5_copy3 = copy.deepcopy(two)\n",
    "eigen_adj1_cd5_copy4 = copy.deepcopy(two)\n",
    "\n",
    "eigen_adj2_cd5_copy1 = copy.deepcopy(three)\n",
    "eigen_adj2_cd5_copy2 = copy.deepcopy(three)\n",
    "eigen_adj2_cd5_copy3 = copy.deepcopy(three)\n",
    "eigen_adj2_cd5_copy4 = copy.deepcopy(three)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "a12d0bdb-1ee1-470e-891c-6cf7e0c17e2d",
   "metadata": {},
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'cc_shape' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[4], line 8\u001b[0m\n\u001b[0;32m      3\u001b[0m data_list_dd_case \u001b[38;5;241m=\u001b[39m []          \u001b[38;5;66;03m#空列表存储子图样本和相关数据，五折\u001b[39;00m\n\u001b[0;32m      7\u001b[0m sub_data_list_cc \u001b[38;5;241m=\u001b[39m []      \u001b[38;5;66;03m#存储当前节点子图样本和相关数据\u001b[39;00m\n\u001b[1;32m----> 8\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m \u001b[38;5;28mid\u001b[39m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(\u001b[43mcc_shape\u001b[49m):       \u001b[38;5;66;03m#遍历原始图所有节点\u001b[39;00m\n\u001b[0;32m      9\u001b[0m     s \u001b[38;5;241m=\u001b[39m eigen_adj_cd5_copy1[\u001b[38;5;28mid\u001b[39m]\n\u001b[0;32m     10\u001b[0m     s[\u001b[38;5;28mid\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m0\u001b[39m\n",
      "\u001b[1;31mNameError\u001b[0m: name 'cc_shape' is not defined"
     ]
    }
   ],
   "source": [
    "#保存的是大矩阵里的下标\n",
    "data_list_cc_case = []          #空列表存储子图样本和相关数据，五折\n",
    "data_list_dd_case = []          #空列表存储子图样本和相关数据，五折\n",
    "\n",
    "\n",
    "\n",
    "sub_data_list_cc = []      #存储当前节点子图样本和相关数据\n",
    "for id in range(cc_shape):       #遍历原始图所有节点\n",
    "    s = eigen_adj_cd5_copy1[id]\n",
    "    s[id] = 0\n",
    "    s[834:] = 0\n",
    "    #即第一次采样仅使用一阶邻居进行采样，后续采用到自适应采样策略\n",
    "    if p is not None:           #p是各种策略权重\n",
    "        s1 = eigen_adj1_cd5_copy1[id]\n",
    "        s2 = eigen_adj2_cd5_copy1[id]\n",
    "        s1[id] = 0\n",
    "        s2[id] = 0\n",
    "        s1[834:] = 0\n",
    "        s2[834:] = 0\n",
    "        #得到最终采样概率\n",
    "        s = p[0]*s/(s.sum()+1e-5) + p[1]*s1/(s1.sum()+1e-5) + p[2]*s2/(s2.sum()+1e-5)\n",
    "    # sample_num1 = np.minimum(k1_cc, (s > 0).sum())     #原始图采样节点数取14和一阶邻居正例最小值\n",
    "    sample_num1 = torch.min(k1_cc, (s > 0).sum())\n",
    "    if sample_num1 > 0:\n",
    "        #随机选择sample_num1个采样节点，不允许重复，且概率更大更有可能被选中\n",
    "        #sample_index1 = np.random.choice(a=np.arange(fea[i].shape[0]), size=sample_num1, replace=False, p=s/s.sum())\n",
    "        sample_index1 = s.argsort()[-(sample_num1):].cpu()\n",
    "    else:\n",
    "        #不进行采样\n",
    "        sample_index1 = np.array([], dtype=int)\n",
    "    #创建一个包含当前节点、sample_index1 和 top_neighbor_index 的列表，这些将成为子图的节点特征。\n",
    "    #采样够14个节点就行，不够的话用top邻居补上\n",
    "    node_feature_id = torch.cat([torch.tensor([id, ]), torch.tensor(sample_index1, dtype=int),\n",
    "                            torch.ones(k1_cc-sample_num1, dtype=int)*1527])\n",
    "    #如果这个条件为真（即相等），那么程序会继续执行。\n",
    "    # 如果条件为假（即不相等），则会引发 AssertionError，程序将停止执行。    \n",
    "    assert len(node_feature_id) == k1_cc+1\n",
    "    sub_data_list_cc.append(node_feature_id) \n",
    "\n",
    "\n",
    "#将所有子图样本添加到 data_list 中，构建了数据集的一部分。\n",
    "sub_data_list_cd = []      #存储当前节点子图样本和相关数据\n",
    "for id in range(cc_shape):       #遍历原始图所有节点\n",
    "    s = eigen_adj_cd5_copy2[id]\n",
    "    s[:834]=0\n",
    "    #即第一次采样仅使用一阶邻居进行采样，后续采用到自适应采样策略\n",
    "    if p is not None:           #p是各种策略权重\n",
    "        s1 = eigen_adj1_cd5_copy2[id]\n",
    "        s2 = eigen_adj2_cd5_copy2[id]\n",
    "        s1[:834]=0\n",
    "        s2[:834]=0\n",
    "        #得到最终采样概率\n",
    "        s = p[0]*s/(s.sum()+1e-5) + p[1]*s1/(s1.sum()+1e-5) + p[2]*s2/(s2.sum()+1e-5)\n",
    "    # sample_num1 = np.minimum(k1_cd, (s > 0).sum())     #原始图采样节点数取14和一阶邻居正例最小值\n",
    "    sample_num1 = torch.min(k1_cd, (s > 0).sum())\n",
    "    if sample_num1 > 0:\n",
    "        #随机选择sample_num1个采样节点，不允许重复，且概率更大更有可能被选中\n",
    "        #sample_index1 = np.random.choice(a=np.arange(fea[i].shape[0]), size=sample_num1, replace=False, p=s/s.sum())\n",
    "        sample_index1 = s.argsort()[-(sample_num1):].cpu()\n",
    "    else:\n",
    "        #不进行采样\n",
    "        sample_index1 = np.array([], dtype=int)\n",
    "    #创建一个包含当前节点、sample_index1 和 top_neighbor_index 的列表，这些将成为子图的节点特征。\n",
    "    #采样够14个节点就行，不够的话用top邻居补上\n",
    "    node_feature_id = torch.cat([torch.tensor(sample_index1, dtype=int),\n",
    "                            torch.ones(k1_cd-sample_num1, dtype=int)*1527])\n",
    "    #如果这个条件为真（即相等），那么程序会继续执行。\n",
    "    # 如果条件为假（即不相等），则会引发 AssertionError，程序将停止执行。\n",
    "    assert len(node_feature_id) == k1_cd\n",
    "    sub_data_list_cd.append(node_feature_id)  \n",
    "result_list = [torch.cat((tensor1, tensor2), dim=0) for tensor1, tensor2 in zip(sub_data_list_cc, sub_data_list_cd)]\n",
    "data_list_cc_case.append(result_list)\n",
    "    \n",
    "    \n",
    "    \n",
    "\n",
    "sub_data_list_dd = []      #存储当前节点子图样本和相关数据\n",
    "for id in range(dd_shape):       #遍历原始图所有节点\n",
    "    s = eigen_adj_cd5_copy3[id+834]\n",
    "    s[id+834] = 0\n",
    "    s[:834]=0\n",
    "    #即第一次采样仅使用一阶邻居进行采样，后续采用到自适应采样策略\n",
    "    if p is not None:           #p是各种策略权重\n",
    "        s1 = eigen_adj1_cd5_copy3[id+834]\n",
    "        s2 = eigen_adj2_cd5_copy3[id+834]\n",
    "        s1[id+834] = 0\n",
    "        s1[:834]=0\n",
    "        s2[id+834] = 0\n",
    "        s2[:834]=0\n",
    "        #得到最终采样概率\n",
    "        s = p[0]*s/(s.sum()+1e-5) + p[1]*s1/(s1.sum()+1e-5) + p[2]*s2/(s2.sum()+1e-5)\n",
    "    # sample_num1 = np.minimum(k1_dd, (s > 0).sum())     #原始图采样节点数取14和一阶邻居正例最小值\n",
    "    sample_num1 = torch.min(k1_dd, (s > 0).sum())\n",
    "    if sample_num1 > 0:\n",
    "        #随机选择sample_num1个采样节点，不允许重复，且概率更大更有可能被选中\n",
    "        #sample_index1 = np.random.choice(a=np.arange(fea[i].shape[0]), size=sample_num1, replace=False, p=s/s.sum())\n",
    "        sample_index1 = s.argsort()[-(sample_num1):].cpu()\n",
    "    else:\n",
    "        #不进行采样\n",
    "        sample_index1 = np.array([], dtype=int)\n",
    "    #创建一个包含当前节点、sample_index1 和 top_neighbor_index 的列表，这些将成为子图的节点特征。\n",
    "    #采样够14个节点就行，不够的话用top邻居补上\n",
    "    node_feature_id = torch.cat([torch.tensor([id+834, ]), torch.tensor(sample_index1, dtype=int),\n",
    "                            torch.ones(k1_dd-sample_num1, dtype=int)*1527])\n",
    "    #如果这个条件为真（即相等），那么程序会继续执行。\n",
    "    # 如果条件为假（即不相等），则会引发 AssertionError，程序将停止执行。\n",
    "    assert len(node_feature_id) == k1_dd+1\n",
    "    sub_data_list_dd.append(node_feature_id)    \n",
    "#将所有子图样本添加到 data_list 中，构建了数据集的一部分。\n",
    "\n",
    " #将所有子图样本添加到 data_list 中，构建了数据集的一部分。\n",
    "sub_data_list_dc = []      #存储当前节点子图样本和相关数据\n",
    "for id in range(dd_shape):       #遍历原始图所有节点\n",
    "    s = eigen_adj_cd5_copy4[id+834]\n",
    "    s[834:] = 0\n",
    "    #即第一次采样仅使用一阶邻居进行采样，后续采用到自适应采样策略\n",
    "    if p is not None:           #p是各种策略权重\n",
    "        s1 = eigen_adj1_cd5_copy4[id+834]\n",
    "        s2 = eigen_adj2_cd5_copy4[id+834]\n",
    "        s1[:834] = 0\n",
    "        s2[:834] = 0\n",
    "        #得到最终采样概率\n",
    "        s = p[0]*s/(s.sum()+1e-5) + p[1]*s1/(s1.sum()+1e-5) + p[2]*s2/(s2.sum()+1e-5)\n",
    "    # sample_num1 = np.minimum(k1_dc, (s > 0).sum())     #原始图采样节点数取14和一阶邻居正例最小值\n",
    "    sample_num1 = torch.min(k1_dc, (s > 0).sum())\n",
    "    if sample_num1 > 0:\n",
    "        #随机选择sample_num1个采样节点，不允许重复，且概率更大更有可能被选中\n",
    "        #sample_index1 = np.random.choice(a=np.arange(fea[i].shape[0]), size=sample_num1, replace=False, p=s/s.sum())\n",
    "        sample_index1 = s.argsort()[-(sample_num1):].cpu()\n",
    "    else:\n",
    "        #不进行采样\n",
    "        sample_index1 = np.array([], dtype=int)\n",
    "    #创建一个包含当前节点、sample_index1 和 top_neighbor_index 的列表，这些将成为子图的节点特征。\n",
    "    #采样够14个节点就行，不够的话用top邻居补上\n",
    "    node_feature_id = torch.cat([torch.tensor(sample_index1, dtype=int),\n",
    "                            torch.ones(k1_dc-sample_num1, dtype=int)*1527])\n",
    "    #如果这个条件为真（即相等），那么程序会继续执行。\n",
    "    # 如果条件为假（即不相等），则会引发 AssertionError，程序将停止执行。\n",
    "    assert len(node_feature_id) == k1_dc\n",
    "    sub_data_list_dc.append(node_feature_id)  \n",
    "result_list = [torch.cat((tensor1, tensor2), dim=0) for tensor1, tensor2 in zip(sub_data_list_dd, sub_data_list_dc)]\n",
    "data_list_dd_case.append(result_list)\n",
    "\n",
    "torch.save(data_list_cc_case,'./data_circ/dataset/data_cc_case.pt')\n",
    "torch.save(data_list_dd_case,'./data_circ/dataset/data_dd_case.pt')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "40195423-49d8-4f46-8fb1-d70050bcd093",
   "metadata": {},
   "source": [
    "# sample"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "id": "c212fca5-8dfd-4ccd-9dec-d820fe534ec7",
   "metadata": {},
   "outputs": [],
   "source": [
    "def node_sampling(p=None):\n",
    "    feature_cd = torch.load('./data_circ/dataset/fea_cd_case.pt')  #torch.Size([973, 1527])\n",
    "    adj_cd = torch.load('./data_circ/dataset/adj_cd_case.pt')  #torch.Size([972, 972])\n",
    "\n",
    "    c = 0.15  #ppr中的c\n",
    "    k1_cc = torch.tensor(20)    #原始图采样节点数\n",
    "    k1_dd = torch.tensor(20)\n",
    "    k1_cd = torch.tensor(4)\n",
    "    k1_dc = torch.tensor(4)\n",
    "    k2 = 0      #粗图采样节点数\n",
    "    cc_shape=834\n",
    "    dd_shape=138\n",
    "\n",
    "    power_adj_list_cd5=[copy.deepcopy(adj_cd)]\n",
    "    for m in range(2):\n",
    "        power_adj_list_cd5.append(power_adj_list_cd5[0]*power_adj_list_cd5[m])      #(5,3,972,972)\n",
    "\n",
    "    #Sampling heuristics: 0,1,2\n",
    "    one = power_adj_list_cd5[0]  #一阶邻接矩阵\n",
    "    two = power_adj_list_cd5[1]    #二阶邻接矩阵\n",
    "    three = power_adj_list_cd5[2]    #二阶邻接矩阵\n",
    "\n",
    "\n",
    "    eigen_adj_cd5_copy1 = copy.deepcopy(one)\n",
    "    eigen_adj_cd5_copy2 = copy.deepcopy(one)\n",
    "    eigen_adj_cd5_copy3 = copy.deepcopy(one)\n",
    "    eigen_adj_cd5_copy4 = copy.deepcopy(one)\n",
    "\n",
    "    eigen_adj1_cd5_copy1 = copy.deepcopy(two)\n",
    "    eigen_adj1_cd5_copy2 = copy.deepcopy(two)\n",
    "    eigen_adj1_cd5_copy3 = copy.deepcopy(two)\n",
    "    eigen_adj1_cd5_copy4 = copy.deepcopy(two)\n",
    "\n",
    "    eigen_adj2_cd5_copy1 = copy.deepcopy(three)\n",
    "    eigen_adj2_cd5_copy2 = copy.deepcopy(three)\n",
    "    eigen_adj2_cd5_copy3 = copy.deepcopy(three)\n",
    "    eigen_adj2_cd5_copy4 = copy.deepcopy(three)\n",
    "\n",
    "    \n",
    "    #保存的是大矩阵里的下标\n",
    "    data_list_cc_case = []          #空列表存储子图样本和相关数据，五折\n",
    "    data_list_dd_case = []          #空列表存储子图样本和相关数据，五折\n",
    "\n",
    "\n",
    "    sub_data_list_cc = []      #存储当前节点子图样本和相关数据\n",
    "    for id in range(cc_shape):       #遍历原始图所有节点\n",
    "        s = eigen_adj_cd5_copy1[id]\n",
    "        s[id] = 0\n",
    "        s[834:] = 0\n",
    "        #即第一次采样仅使用一阶邻居进行采样，后续采用到自适应采样策略\n",
    "        if p is not None:           #p是各种策略权重\n",
    "            s1 = eigen_adj1_cd5_copy1[id]\n",
    "            s2 = eigen_adj2_cd5_copy1[id]\n",
    "            s1[id] = 0\n",
    "            s2[id] = 0\n",
    "            s1[834:] = 0\n",
    "            s2[834:] = 0\n",
    "            #得到最终采样概率\n",
    "            s = p[0]*s/(s.sum()+1e-5) + p[1]*s1/(s1.sum()+1e-5) + p[2]*s2/(s2.sum()+1e-5)\n",
    "        # sample_num1 = np.minimum(k1_cc, (s > 0).sum())     #原始图采样节点数取14和一阶邻居正例最小值\n",
    "        sample_num1 = torch.min(k1_cc, (s > 0).sum())\n",
    "        if sample_num1 > 0:\n",
    "            #随机选择sample_num1个采样节点，不允许重复，且概率更大更有可能被选中\n",
    "            #sample_index1 = np.random.choice(a=np.arange(fea[i].shape[0]), size=sample_num1, replace=False, p=s/s.sum())\n",
    "            sample_index1 = s.argsort()[-(sample_num1):].cpu()\n",
    "        else:\n",
    "            #不进行采样\n",
    "            sample_index1 = np.array([], dtype=int)\n",
    "        #创建一个包含当前节点、sample_index1 和 top_neighbor_index 的列表，这些将成为子图的节点特征。\n",
    "        #采样够14个节点就行，不够的话用top邻居补上\n",
    "        node_feature_id = torch.cat([torch.tensor([id, ]), torch.tensor(sample_index1, dtype=int),\n",
    "                                torch.ones(k1_cc-sample_num1, dtype=int)*1527])\n",
    "        #如果这个条件为真（即相等），那么程序会继续执行。\n",
    "        # 如果条件为假（即不相等），则会引发 AssertionError，程序将停止执行。    \n",
    "        assert len(node_feature_id) == k1_cc+1\n",
    "        sub_data_list_cc.append(node_feature_id) \n",
    "\n",
    "\n",
    "    #将所有子图样本添加到 data_list 中，构建了数据集的一部分。\n",
    "    sub_data_list_cd = []      #存储当前节点子图样本和相关数据\n",
    "    for id in range(cc_shape):       #遍历原始图所有节点\n",
    "        s = eigen_adj_cd5_copy2[id]\n",
    "        s[:834]=0\n",
    "        #即第一次采样仅使用一阶邻居进行采样，后续采用到自适应采样策略\n",
    "        if p is not None:           #p是各种策略权重\n",
    "            s1 = eigen_adj1_cd5_copy2[id]\n",
    "            s2 = eigen_adj2_cd5_copy2[id]\n",
    "            s1[:834]=0\n",
    "            s2[:834]=0\n",
    "            #得到最终采样概率\n",
    "            s = p[0]*s/(s.sum()+1e-5) + p[1]*s1/(s1.sum()+1e-5) + p[2]*s2/(s2.sum()+1e-5)\n",
    "        # sample_num1 = np.minimum(k1_cd, (s > 0).sum())     #原始图采样节点数取14和一阶邻居正例最小值\n",
    "        sample_num1 = torch.min(k1_cd, (s > 0).sum())\n",
    "        if sample_num1 > 0:\n",
    "            #随机选择sample_num1个采样节点，不允许重复，且概率更大更有可能被选中\n",
    "            #sample_index1 = np.random.choice(a=np.arange(fea[i].shape[0]), size=sample_num1, replace=False, p=s/s.sum())\n",
    "            sample_index1 = s.argsort()[-(sample_num1):].cpu()\n",
    "        else:\n",
    "            #不进行采样\n",
    "            sample_index1 = np.array([], dtype=int)\n",
    "        #创建一个包含当前节点、sample_index1 和 top_neighbor_index 的列表，这些将成为子图的节点特征。\n",
    "        #采样够14个节点就行，不够的话用top邻居补上\n",
    "        node_feature_id = torch.cat([torch.tensor(sample_index1, dtype=int),\n",
    "                                torch.ones(k1_cd-sample_num1, dtype=int)*1527])\n",
    "        #如果这个条件为真（即相等），那么程序会继续执行。\n",
    "        # 如果条件为假（即不相等），则会引发 AssertionError，程序将停止执行。\n",
    "        assert len(node_feature_id) == k1_cd\n",
    "        sub_data_list_cd.append(node_feature_id)  \n",
    "    result_list = [torch.cat((tensor1, tensor2), dim=0) for tensor1, tensor2 in zip(sub_data_list_cc, sub_data_list_cd)]\n",
    "    data_list_cc_case.append(result_list)\n",
    "\n",
    "\n",
    "\n",
    "    sub_data_list_dd = []      #存储当前节点子图样本和相关数据\n",
    "    for id in range(dd_shape):       #遍历原始图所有节点\n",
    "        s = eigen_adj_cd5_copy3[id+834]\n",
    "        s[id+834] = 0\n",
    "        s[:834]=0\n",
    "        #即第一次采样仅使用一阶邻居进行采样，后续采用到自适应采样策略\n",
    "        if p is not None:           #p是各种策略权重\n",
    "            s1 = eigen_adj1_cd5_copy3[id+834]\n",
    "            s2 = eigen_adj2_cd5_copy3[id+834]\n",
    "            s1[id+834] = 0\n",
    "            s1[:834]=0\n",
    "            s2[id+834] = 0\n",
    "            s2[:834]=0\n",
    "            #得到最终采样概率\n",
    "            s = p[0]*s/(s.sum()+1e-5) + p[1]*s1/(s1.sum()+1e-5) + p[2]*s2/(s2.sum()+1e-5)\n",
    "        # sample_num1 = np.minimum(k1_dd, (s > 0).sum())     #原始图采样节点数取14和一阶邻居正例最小值\n",
    "        sample_num1 = torch.min(k1_dd, (s > 0).sum())\n",
    "        if sample_num1 > 0:\n",
    "            #随机选择sample_num1个采样节点，不允许重复，且概率更大更有可能被选中\n",
    "            #sample_index1 = np.random.choice(a=np.arange(fea[i].shape[0]), size=sample_num1, replace=False, p=s/s.sum())\n",
    "            sample_index1 = s.argsort()[-(sample_num1):].cpu()\n",
    "        else:\n",
    "            #不进行采样\n",
    "            sample_index1 = np.array([], dtype=int)\n",
    "        #创建一个包含当前节点、sample_index1 和 top_neighbor_index 的列表，这些将成为子图的节点特征。\n",
    "        #采样够14个节点就行，不够的话用top邻居补上\n",
    "        node_feature_id = torch.cat([torch.tensor([id+834, ]), torch.tensor(sample_index1, dtype=int),\n",
    "                                torch.ones(k1_dd-sample_num1, dtype=int)*1527])\n",
    "        #如果这个条件为真（即相等），那么程序会继续执行。\n",
    "        # 如果条件为假（即不相等），则会引发 AssertionError，程序将停止执行。\n",
    "        assert len(node_feature_id) == k1_dd+1\n",
    "        sub_data_list_dd.append(node_feature_id)    \n",
    "    #将所有子图样本添加到 data_list 中，构建了数据集的一部分。\n",
    "\n",
    "     #将所有子图样本添加到 data_list 中，构建了数据集的一部分。\n",
    "    sub_data_list_dc = []      #存储当前节点子图样本和相关数据\n",
    "    for id in range(dd_shape):       #遍历原始图所有节点\n",
    "        s = eigen_adj_cd5_copy4[id+834]\n",
    "        s[834:] = 0\n",
    "        #即第一次采样仅使用一阶邻居进行采样，后续采用到自适应采样策略\n",
    "        if p is not None:           #p是各种策略权重\n",
    "            s1 = eigen_adj1_cd5_copy4[id+834]\n",
    "            s2 = eigen_adj2_cd5_copy4[id+834]\n",
    "            s1[:834] = 0\n",
    "            s2[:834] = 0\n",
    "            #得到最终采样概率\n",
    "            s = p[0]*s/(s.sum()+1e-5) + p[1]*s1/(s1.sum()+1e-5) + p[2]*s2/(s2.sum()+1e-5)\n",
    "        # sample_num1 = np.minimum(k1_dc, (s > 0).sum())     #原始图采样节点数取14和一阶邻居正例最小值\n",
    "        sample_num1 = torch.min(k1_dc, (s > 0).sum())\n",
    "        if sample_num1 > 0:\n",
    "            #随机选择sample_num1个采样节点，不允许重复，且概率更大更有可能被选中\n",
    "            #sample_index1 = np.random.choice(a=np.arange(fea[i].shape[0]), size=sample_num1, replace=False, p=s/s.sum())\n",
    "            sample_index1 = s.argsort()[-(sample_num1):].cpu()\n",
    "        else:\n",
    "            #不进行采样\n",
    "            sample_index1 = np.array([], dtype=int)\n",
    "        #创建一个包含当前节点、sample_index1 和 top_neighbor_index 的列表，这些将成为子图的节点特征。\n",
    "        #采样够14个节点就行，不够的话用top邻居补上\n",
    "        node_feature_id = torch.cat([torch.tensor(sample_index1, dtype=int),\n",
    "                                torch.ones(k1_dc-sample_num1, dtype=int)*1527])\n",
    "        #如果这个条件为真（即相等），那么程序会继续执行。\n",
    "        # 如果条件为假（即不相等），则会引发 AssertionError，程序将停止执行。\n",
    "        assert len(node_feature_id) == k1_dc\n",
    "        sub_data_list_dc.append(node_feature_id)  \n",
    "    result_list = [torch.cat((tensor1, tensor2), dim=0) for tensor1, tensor2 in zip(sub_data_list_dd, sub_data_list_dc)]\n",
    "    data_list_dd_case.append(result_list)\n",
    "\n",
    "    return data_list_cc_case,data_list_dd_case\n",
    "\n",
    "# p=[0.25,0.25,0.25]\n",
    "# data_list_cc, data_list_dd = node_sampling(p)\n",
    "# print(len(data_list_cc))#(5,834,25)\n",
    "# print(len(data_list_dd))#(5,138,25)\n",
    "# print(len(data_list_dd[0]))#(5,138,25)\n",
    "# print(len(data_list_dd[0][0]))#(5,138,25)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "30eacb7d-039c-4107-b50e-9bd92d2d5be1",
   "metadata": {},
   "source": [
    "# model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "b1942c02",
   "metadata": {},
   "outputs": [],
   "source": [
    "class GraphConvolution(nn.Module):\n",
    "    def __init__(self, in_features, out_features):\n",
    "        super(GraphConvolution, self).__init__()\n",
    "        self.weight = nn.Parameter(torch.FloatTensor(in_features, out_features))\n",
    "        self.reset_parameters()\n",
    "\n",
    "    def reset_parameters(self):\n",
    "        nn.init.kaiming_uniform_(self.weight)\n",
    "\n",
    "    def forward(self, input, adj):\n",
    "        support = torch.mm(input, self.weight)\n",
    "        output = torch.mm(adj, support)\n",
    "        return output\n",
    "\n",
    "class SelfAttention(nn.Module):\n",
    "    def __init__(self, in_features, out_features, heads=1):\n",
    "        super(SelfAttention, self).__init__()\n",
    "        self.heads = heads\n",
    "        self.query = nn.Linear(in_features, out_features)\n",
    "        self.key = nn.Linear(in_features, out_features)\n",
    "        self.value = nn.Linear(in_features, out_features)\n",
    "\n",
    "    def forward(self, x):\n",
    "        batch_size = 1\n",
    "        Q = self.query(x)\n",
    "        K = self.key(x)\n",
    "        V = self.value(x)\n",
    "\n",
    "        Q = Q.view(batch_size, -1, self.heads, Q.size(-1) // self.heads)\n",
    "        K = K.view(batch_size, -1, self.heads, K.size(-1) // self.heads)\n",
    "        V = V.view(batch_size, -1, self.heads, V.size(-1) // self.heads)\n",
    "\n",
    "        Q = Q.permute(0, 2, 1, 3)\n",
    "        K = K.permute(0, 2, 1, 3)\n",
    "        V = V.permute(0, 2, 1, 3)\n",
    "\n",
    "        attention = torch.matmul(Q, K.transpose(-2, -1)) / (Q.size(-1) ** 0.5)\n",
    "        attention = torch.softmax(attention, dim=-1)\n",
    "\n",
    "        out = torch.matmul(attention, V)\n",
    "        out = out.permute(0, 2, 1, 3).contiguous()\n",
    "        out = out.view( -1, self.heads * (Q.size(-1)))\n",
    "\n",
    "        return out\n",
    "\n",
    "class MLP(nn.Module):\n",
    "    def __init__(self, in_features, out_features):\n",
    "        super(MLP, self).__init__()\n",
    "        self.fc1 = nn.Linear(in_features, 512)\n",
    "        self.relu = nn.ReLU()\n",
    "        self.fc2 = nn.Linear(512, out_features)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.relu(self.fc1(x))\n",
    "        x = self.fc2(x)\n",
    "        return x\n",
    "    \n",
    "\n",
    "class GraphAttentionLayer(nn.Module):\n",
    "    def __init__(self, in_features, out_features, dropout, alpha,concat=True):\n",
    "        super(GraphAttentionLayer, self).__init__()\n",
    "        self.dropout = dropout\n",
    "        self.in_features = in_features\n",
    "        self.out_features = out_features\n",
    "        self.alpha = alpha\n",
    "        self.concat = concat\n",
    "\n",
    "        \n",
    "        self.Wcc = nn.Linear(in_features, out_features)\n",
    "        self.Wcd = nn.Linear(in_features, out_features)\n",
    "        self.Wdd = nn.Linear(in_features, out_features)\n",
    "        self.Wdc = nn.Linear(in_features, out_features)\n",
    "        self.a = nn.Linear(out_features * 2, 1)\n",
    "        nn.init.xavier_normal_(self.Wcc.weight)\n",
    "        nn.init.xavier_normal_(self.Wcd.weight)\n",
    "        nn.init.xavier_normal_(self.Wdd.weight)\n",
    "        nn.init.xavier_normal_(self.Wdc.weight)\n",
    "        nn.init.xavier_normal_(self.a.weight)\n",
    "        \n",
    "        self.leakyrelu = nn.LeakyReLU(0.2)\n",
    "        \n",
    "    def forward(self, h,start,get_score=False):\n",
    "        h_neighbors=h[:,0:1,:].repeat(1,h.shape[1],1)\n",
    "        #cstart\n",
    "        if start ==0:\n",
    "            h_neighbors_1=self.Wcc(h_neighbors[:,:20,:]) \n",
    "            h_neighbors_2=self.Wcd(h_neighbors[:,20:,:]) \n",
    "            h_neighbors=torch.cat([h_neighbors_1,h_neighbors_2],dim=1)\n",
    "            h_1=self.Wcc(h[:,:20,:])\n",
    "            h_2=self.Wcd(h[:,20:,:])\n",
    "            h=torch.cat([h_1,h_2],dim=1)\n",
    "            combined=torch.cat([h_neighbors,h],dim=2)\n",
    "            e=self.leakyrelu(self.a(combined))\n",
    "        if start ==1:\n",
    "            h_neighbors_1=self.Wdd(h_neighbors[:,:20,:]) \n",
    "            h_neighbors_2=self.Wdc(h_neighbors[:,20:,:]) \n",
    "            h_neighbors=torch.cat([h_neighbors_1,h_neighbors_2],dim=1)\n",
    "            h_1=self.Wdd(h[:,:20,:])\n",
    "            h_2=self.Wdc(h[:,20:,:])\n",
    "            h=torch.cat([h_1,h_2],dim=1)\n",
    "            combined=torch.cat([h_neighbors,h],dim=2)\n",
    "            e=self.leakyrelu(self.a(combined))\n",
    "        e=torch.transpose(e,1,2)\n",
    "        attention = F.softmax(e, dim=2)\n",
    "        if get_score:\n",
    "            score = attention.squeeze()\n",
    "        h_prime = torch.matmul(attention, h)\n",
    "        h_prime=h_prime.squeeze()\n",
    "        if get_score:\n",
    "            return score\n",
    "        else:\n",
    "            if self.concat:\n",
    "                return F.leaky_relu(h_prime)\n",
    "            else:\n",
    "                return h_prime\n",
    "\n",
    "    def __repr__(self):\n",
    "        return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'\n",
    "\n",
    "    \n",
    "\n",
    "class GAT(nn.Module):\n",
    "    def __init__(self, nfeat, nhid, dropout, alpha, nheads):\n",
    "        super(GAT, self).__init__()\n",
    "        self.dropout = dropout\n",
    "        \n",
    "        self.attentions1 = [GraphAttentionLayer(nfeat, nhid,dropout=dropout, alpha=alpha,concat=True) for _ in range(nheads)]\n",
    "        for i, attention in enumerate(self.attentions1):\n",
    "            self.add_module('attention1_{}'.format(i), attention)\n",
    "        self.out_att1 = nn.Linear(nhid * nheads, nhid)  # 输出层\n",
    "        self.c1=nn.Conv2d(1,32,kernel_size=(2,4),stride=1,padding=0)\n",
    "        self.s1=nn.MaxPool2d(kernel_size=(1,6))\n",
    "        self.c2=nn.Conv2d(32,64,kernel_size=(1,4),stride=1,padding=0)\n",
    "        self.s2=nn.MaxPool2d(kernel_size=(1,10))\n",
    "        self.l1=nn.Sequential(nn.Linear(76*64,1024),nn.LeakyReLU(),nn.Dropout(0.5),nn.Linear(1024,512),nn.LeakyReLU(),nn.Dropout(0.5))\n",
    "        self.l2=nn.Sequential(nn.Linear(512,256),nn.LeakyReLU(),nn.Dropout(0.5),nn.Linear(256,2))\n",
    "        self.leakyrelu=nn.LeakyReLU()\n",
    "        self.g1 = nn.Linear(1527, 1527)  \n",
    "        self.g2 = nn.Linear(1527, 1527)  \n",
    "        self.g3 = nn.Linear(1527, 1527) \n",
    "        self.g4 = nn.Linear(1527, 1527) \n",
    "        self.bias = nn.Parameter(torch.zeros(1527))\n",
    "        self.gc1 = GraphConvolution(nfeat, nhid)\n",
    "        self.gc2 = GraphConvolution(nfeat, nhid)\n",
    "        self.attention2 = SelfAttention(nfeat, nhid, heads=3)\n",
    "        self.mlp = MLP(nhid, 128)\n",
    "        self.reset_para()\n",
    "        \n",
    "    def reset_para(self):\n",
    "        nn.init.xavier_normal_(self.c1.weight)\n",
    "        nn.init.xavier_normal_(self.c2.weight)\n",
    "        nn.init.xavier_normal_(self.g1.weight)\n",
    "        for mode in self.l1:\n",
    "            if isinstance(mode,nn.Linear):\n",
    "                nn.init.xavier_normal_(mode.weight,gain= nn.init.calculate_gain('relu'))\n",
    "        for mode in self.l2:\n",
    "            if isinstance(mode,nn.Linear):\n",
    "                nn.init.xavier_normal_(mode.weight,gain= nn.init.calculate_gain('relu'))\n",
    "\n",
    "    def forward(self, x1,x2,fea,adj,fea_ori,adj_ori,data_list_cc,data_list_dd,X_new,get_score=False):\n",
    "       \n",
    "        fea_ori_first=fea_ori\n",
    "        \n",
    "        selected_row1 = data_list_cc[x1]\n",
    "        selected_row2 = data_list_dd[x2]\n",
    "        selected_feature1=fea[selected_row1]     #(16,20,1527)\n",
    "        selected_feature2=fea[selected_row2]     #(16,20,1527)\n",
    "        selected_feature1 = F.dropout(selected_feature1, self.dropout, training=self.training)\n",
    "        selected_feature2 = F.dropout(selected_feature2, self.dropout, training=self.training)\n",
    "        \n",
    "        relation_cstart=0\n",
    "        relation_dstart=1\n",
    "        \n",
    "        if get_score:\n",
    "            score1 = [att(selected_feature1,relation_cstart,get_score=True) for att in self.attentions1]\n",
    "            score1=torch.stack(score1,dim=0)\n",
    "            score1=score1.mean(dim=0)\n",
    "            score2 = [att(selected_feature2,relation_dstart,get_score=True) for att in self.attentions1]\n",
    "            score2=torch.stack(score2,dim=0)\n",
    "            score2=score2.mean(dim=0)\n",
    "            return score1,score2\n",
    "        \n",
    "        output1 = torch.cat([att(selected_feature1,relation_cstart) for att in self.attentions1], dim=1)\n",
    "        output1 = self.out_att1(output1)  # 最终的输出层\n",
    "        output2 = torch.cat([att(selected_feature2,relation_dstart) for att in self.attentions1], dim=1)\n",
    "        output2 = self.out_att1(output2)  # 最终的输出层\n",
    "        output1 = F.dropout(output1, self.dropout, training=self.training)\n",
    "        output2 = F.dropout(output2, self.dropout, training=self.training)\n",
    "        \n",
    "        \n",
    "        \n",
    "        ori1=fea_ori[x1]\n",
    "        ori2=fea_ori[x2+834]\n",
    "        \n",
    "#         # 计算 S\n",
    "        \n",
    "#         Z = self.attention2(fea_ori)\n",
    "#         S = torch.matmul(Z, Z.transpose(0, 1))\n",
    "\n",
    "        \n",
    "\n",
    "#         # 计算 D^{-1} for A ⊙ S\n",
    "#         A_S = torch.mul(adj_ori, S)\n",
    "#         degree = A_S.sum(0)\n",
    "#         D_inv = torch.diag(1.0/degree)\n",
    "        \n",
    "#         # 计算 D^{-1} (A ⊙ S) \n",
    "#         DA = torch.mm(D_inv, A_S)\n",
    "        \n",
    "# #         # 计算第二层的输出\n",
    "\n",
    "        feak = F.relu(self.gc1(fea_ori, adj_ori))\n",
    "        # feak2 = F.relu(self.gc2(feak, adj_ori))\n",
    "        \n",
    "        # 计算最终的输出\n",
    "        # Z = F.relu(torch.matmul(Z_ORI, self.W1) + self.gc2(fea, DA))\n",
    "        \n",
    "        \n",
    "        con1=feak[x1]\n",
    "        con2=feak[x2+834]\n",
    "        \n",
    "\n",
    "        gate1=F.sigmoid(self.g1(output1)+self.g2(con1)+self.bias)\n",
    "        output1=gate1*output1\n",
    "        con1=(1-gate1)*con1\n",
    "        \n",
    "\n",
    "        gate2=F.sigmoid(self.g3(output2)+self.g4(con2)+self.bias)\n",
    "        output2=gate2*output2\n",
    "        con2=(1-gate2)*con2\n",
    "        \n",
    "\n",
    "        \n",
    "        \n",
    "        output1=torch.cat([ori1,output1,con1],dim=1)\n",
    "        output2=torch.cat([ori2,output2,con2],dim=1)\n",
    "        # y=torch.cat([output1,output2],dim=1) \n",
    "        output1 = output1[:,None,None, :]\n",
    "        output2 = output2[:,None,None, :]\n",
    "        y=torch.cat([output1,output2],dim=2) \n",
    "        y=self.s1(self.leakyrelu(self.c1(y)))  \n",
    "        y=self.s2(self.leakyrelu(self.c2(y))) \n",
    "        y=y.reshape(y.shape[0],-1)\n",
    "        y=self.l1(y)\n",
    "        y=self.l2(y)\n",
    "        return y\n",
    "    \n",
    "\n",
    "    \n",
    "\n",
    "# net = GAT(\n",
    "#         nfeat=1527,\n",
    "#         nhid=1527,\n",
    "#         dropout=0.3,\n",
    "#         nheads=8,\n",
    "#         alpha=0.2\n",
    "#     ).to(device)\n",
    "\n",
    "# x1=torch.tensor([656,642,424,662,697,819,478,36,591,509,806,699,723,670,511,679],dtype=torch.long).to(device)\n",
    "# x2=torch.tensor([88,2,92,65,110,77,108,12,107,118,136,109,128,103,117,69],dtype=torch.long).to(device)\n",
    "# adj_cd = torch.load('./data_circ/dataset/adj_cd_case.pt')\n",
    "# feature_cd = torch.load('./data_circ/dataset/fea_cd_case.pt')\n",
    "# feature_cd= feature_cd.to(device)\n",
    "# adj_cd= adj_cd.to(device)\n",
    "# data_list_cc = torch.load('./data_circ/dataset/data_cc_case.pt')\n",
    "# data_list_dd = torch.load('./data_circ/dataset/data_dd_case.pt')\n",
    "# data_list_cc = torch.stack([torch.stack(inner_list) for inner_list in data_list_cc]).to(device)\n",
    "# data_list_dd = torch.stack([torch.stack(inner_list) for inner_list in data_list_dd]).to(device)\n",
    "# data_list_cc = data_list_cc.squeeze(0)\n",
    "# data_list_dd = data_list_dd.squeeze(0)\n",
    "# print(data_list_cc.shape)\n",
    "# data_list_cc[data_list_cc == 1527] = 972\n",
    "# data_list_dd[data_list_dd == 1527] = 972\n",
    "\n",
    "\n",
    "# adj_ori = adj_cd[:972, :972]     #已经归一化了\n",
    "# feature_ori = feature_cd[:972, :]\n",
    "# X_new = torch.matmul(adj_ori, feature_ori)\n",
    "\n",
    "# one,two=net(x1,x2,feature_cd,adj_cd,feature_ori,adj_ori,data_list_cc,data_list_dd,X_new,get_score=True)#(32,2)\n",
    "# print(one.shape)\n",
    "# print(two.shape)\n",
    "# net(x1,x2,feature_cd,adj_cd,feature_ori,adj_ori,data_list_cc,data_list_dd,X_new).shape#(32,2)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "254b2040-4f80-4732-8e9f-007b9c790c2a",
   "metadata": {},
   "source": [
    " # get_reward"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "1322ec0d-1f39-4045-bbc3-4db8c160cf0d",
   "metadata": {},
   "outputs": [],
   "source": [
    "#计算四种策略的奖励\n",
    "def get_reward(i,args, model, device, loader, p,feature,adj,feature_ori,adj_ori,data_list_cc,data_list_dd,X_new):\n",
    "    \n",
    "    ori_data_list_cc=data_list_cc\n",
    "    ori_data_list_dd=data_list_dd\n",
    "    \n",
    "    \n",
    "    power_adj_list_cd5=copy.deepcopy(adj)\n",
    "    power_adj_list_cd5=[power_adj_list_cd5]\n",
    "    for m in range(2):\n",
    "        power_adj_list_cd5.append(power_adj_list_cd5[0]*power_adj_list_cd5[m])      #(3,972,972)\n",
    "    \n",
    "    #归一化邻接矩阵，一跳邻居\n",
    "    eigen_adj_cd5 = power_adj_list_cd5[0]\n",
    "    #保存二跳举例\n",
    "    eigen_adj1_cd5 = power_adj_list_cd5[1]\n",
    "    #这个 PageRank 矩阵可以用来估计每个节点在网络中的重要性或排名\n",
    "    eigen_adj2_cd5 = power_adj_list_cd5[2]\n",
    "\n",
    "    r = [[], [], []]\n",
    "    reward = np.zeros(3)\n",
    "    model.eval()    #评估模式\n",
    "\n",
    "    \n",
    "    n_node = 25\n",
    "    #返回迭代的索引 step 和对应的数据批次 batch\n",
    "    for x1,x2,y in tqdm(loader, desc=\"Iteration\"):\n",
    "        x1,x2,y=x1.long().to(device),x2.long().to(device),y.long().to(device)\n",
    "        with torch.no_grad():\n",
    "            #返回分数\n",
    "            scores1,scores2 = model(x1,x2,feature,adj,feature_ori,adj_ori,data_list_cc,data_list_dd,X_new,get_score=True)\n",
    "            #scores 变量的列从第1列到第 n_node - 1 列（不包括第 n_node 列）进行切片。\n",
    "            # 这可能是因为在计算中不考虑第一列或最后一列的数据。\n",
    "        x1,x2=x1.to('cpu'),x2.to('cpu')\n",
    "        scores1 = scores1[:, 1:n_node]   #(32,24)   取分数\n",
    "        scores2 = scores2[:, 1:n_node]  #(32,24)\n",
    "        \n",
    "        ids1=data_list_cc[x1]        #(32,24)   取对应索引\n",
    "        ids1 = ids1[:, 1:n_node]   #(32,24)\n",
    "        \n",
    "        ids2=data_list_dd[x2]\n",
    "        ids2 = ids2[:, 1:n_node]\n",
    "        \n",
    "        data_list_cc=ori_data_list_cc\n",
    "        data_list_dd=ori_data_list_dd\n",
    "        \n",
    "\n",
    "        \n",
    "        ids1[ids1 == 1527] = 972\n",
    "        ids2[ids2 == 1527] = 972\n",
    "        \n",
    "        \n",
    "        #循环遍历模型输出的分数 scores\n",
    "        for i,score in enumerate(scores1):\n",
    "            id = x1[i]    #（取行）\n",
    "            ids=ids1[i]     #（取索引）\n",
    "            s = eigen_adj_cd5[id]#(1527,)\n",
    "            s1 = eigen_adj1_cd5[id]\n",
    "            s2 = eigen_adj2_cd5[id]\n",
    "            s[id], s1[id], s2[id] = 0, 0, 0\n",
    "            s = s/(s.sum()+1e-5)\n",
    "            s1 = s1/(s1.sum()+1e-5)\n",
    "            s2 = s2/(s2.sum()+1e-5)\n",
    "            phi = p[0]*s + p[1]*s1 + p[2]*s2 + 1e-5\n",
    "            r[0].append(torch.sum(score * s[ids] / phi[ids]))\n",
    "            r[1].append(torch.sum(score * s1[ids] / phi[ids]))\n",
    "            r[2].append(torch.sum(score * s2[ids] / phi[ids]))\n",
    "            \n",
    "            \n",
    "        for i,score in enumerate(scores2):\n",
    "            id = x2[i]+834\n",
    "            ids=ids2[i]\n",
    "            s = eigen_adj_cd5[id]#(1527,)\n",
    "            s1 = eigen_adj1_cd5[id]\n",
    "            s2 = eigen_adj2_cd5[id]\n",
    "            s[id], s1[id], s2[id] = 0, 0, 0\n",
    "            s = s/(s.sum()+1e-5)\n",
    "            s1 = s1/(s1.sum()+1e-5)\n",
    "            s2 = s2/(s2.sum()+1e-5)\n",
    "            phi = p[0]*s + p[1]*s1 + p[2]*s2 +  1e-5\n",
    "            r[0].append(torch.sum(score * s[ids] / phi[ids]))\n",
    "            r[1].append(torch.sum(score * s1[ids] / phi[ids]))\n",
    "            r[2].append(torch.sum(score * s2[ids] / phi[ids]))\n",
    "    reward[0] = torch.mean(torch.cat([i.unsqueeze(0) for i in r[0]])).cpu().numpy()\n",
    "    reward[1] = torch.mean(torch.cat([i.unsqueeze(0) for i in r[1]])).cpu().numpy()\n",
    "    reward[2] = torch.mean(torch.cat([i.unsqueeze(0) for i in r[2]])).cpu().numpy()\n",
    "    return reward\n",
    "\n",
    "# trset=DataLoader(MyDataset(tri[i],cd),args.batch_size,shuffle=True)      #读训练数据，格式（32,x1,x2,label）\n",
    "# teset=DataLoader(MyDataset(tei[i],cd),args.batch_size,shuffle=False)     #读测试数据\n",
    "# data_list_cc=torch.load('./data_circ/dataset/data_cc.pt')\n",
    "# data_list_dd=torch.load('./data_circ/dataset/data_dd.pt')\n",
    "# feature=torch.load('./data_circ/dataset/fea_cd')\n",
    "# feature= torch.stack(feature).to(device)\n",
    "# adj=torch.load('./data_circ/dataset/adj_cd')\n",
    "# adj= torch.stack(adj).to(device)\n",
    "# data_list_cc = torch.stack([torch.stack(inner_list) for inner_list in data_list_cc]).to(device)\n",
    "# data_list_dd = torch.stack([torch.stack(inner_list) for inner_list in data_list_dd]).to(device)\n",
    "# data_list_cc[data_list_cc == 1527] = 972\n",
    "# data_list_dd[data_list_dd == 1527] = 972\n",
    "# adj_ori = adj_cd[:, :972, :972]     #已经归一化了\n",
    "# feature_ori = feature_cd[:, :972, :]\n",
    "# s_ax = torch.load('./data_circ/dataset/s_ax')  \n",
    "# s_a2x = torch.load('./data_circ/dataset/s_a2x') \n",
    "# p=[0.25,0.25,0.25]\n",
    "# r = get_reward(i,args, model, device, trset, p,feature_cd[i],adj_cd[i],feature_ori[i],adj_ori[i],data_list_cc[i],data_list_dd[i],s_ax[i],s_a2x[i])\n",
    "# print('reward:', r)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2ca1db69",
   "metadata": {},
   "source": [
    "# train"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "id": "0264d431",
   "metadata": {},
   "outputs": [],
   "source": [
    "#模型，设别，数据加载器，优化器，学习率调度器\n",
    "def train(args, model, device, loader, optimizer,feature,adj,feature_ori,adj_ori,data_list_cc,data_list_dd,X_new,teset,cros,epoch ,epochs):\n",
    "    isSave=0\n",
    "    cost=nn.CrossEntropyLoss()\n",
    "    running_loss = 0.0\n",
    "    model.train()       #模型设置为训练模式(启用dropout或批量归一化)\n",
    "#         迭代器，遍历loader中的每个批次，tqdm是一个用于在命令行中显示进度条的库\n",
    "    for x1,x2,y in tqdm(loader, desc=\"Iteration\"):\n",
    "        x1,x2,y = x1.long().to(device),x2.long().to(device),y.long().to(device)    #批次移动到GPU\n",
    "        pred = model(x1,x2,feature,adj,feature_ori,adj_ori,data_list_cc,data_list_dd,X_new)         #前向传播\n",
    "        loss = cost(pred, y)     #计算损失\n",
    "        optimizer.zero_grad()               #梯度清零\n",
    "        loss.backward()             #反向传播，计算梯度\n",
    "        optimizer.step()            #更新参数，最小化损\n",
    "        running_loss += loss.item()\n",
    "    print(f\"Loss: {running_loss}\")\n",
    "    if epoch==epochs:\n",
    "        isSave=1\n",
    "        torch.save(model.state_dict(), './best_network.pth')\n",
    "        tacc(args,model,loader,feature,adj,feature_ori,adj_ori,data_list_cc,data_list_dd,X_new,0,isSave,cros)      #训练集准确率\n",
    "        tacc(args,model,teset,feature,adj,feature_ori,adj_ori,data_list_cc,data_list_dd,X_new,1,isSave,cros)        #测试集准确率\n",
    "        torch.cuda.empty_cache()\n",
    "\n",
    "def tacc(args,model,tset,feature,adj,feature_ori,adj_ori,data_list_cc,data_list_dd,X_new,string,s,cros):\n",
    "    correct=0      #预测正确数\n",
    "    total=0        #样本总数\n",
    "    st={0:'train_acc',1:'test_acc'}\n",
    "    predall,yall=torch.tensor([]).to(device),torch.tensor([]).to(device)     #存预测值和标签\n",
    "    model.eval()      #测试模式，droupout无效\n",
    "    model.load_state_dict(torch.load('./best_network.pth'))\n",
    "    for x1,x2,y in tset:\n",
    "        x1,x2,y=x1.long().to(device),x2.long().to(device),y.long().to(device)\n",
    "        pred=model(x1,x2,feature,adj,feature_ori,adj_ori,data_list_cc,data_list_dd,X_new).data     #得到预测值(32,2)\n",
    "        if s==1:\n",
    "            predall=torch.cat([predall,torch.as_tensor(pred)],dim=0)\n",
    "            yall=torch.cat([yall,torch.as_tensor(y)])\n",
    "        a=torch.max(pred,1)[1]#   pred按行取最大值的索引\n",
    "        total+=y.size(0)    #总数相加\n",
    "        correct+=(a==y).sum()     #预测对的值相加\n",
    "    if string==1 and s==1:\n",
    "        print('yes')\n",
    "        torch.save((predall,yall),'./data_circ/result_case')\n",
    "    print(st[string]+str((correct/total).item()))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "0188fda4",
   "metadata": {},
   "outputs": [],
   "source": [
    "#用于处理自己的数据，输出坐标和cd矩阵，返回坐标和标签\n",
    "class MyDataset(Dataset):\n",
    "    def __init__(self,tri,cd):\n",
    "        self.tri=tri\n",
    "        self.cd=cd\n",
    "    def __getitem__(self,idx):\n",
    "        x,y=self.tri[:,idx]\n",
    "        label=self.cd[x][y]\n",
    "        return x,y,label\n",
    "    def __len__(self):\n",
    "        return self.tri.shape[1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "cb5a995f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([834, 25])\n",
      "dataset load successfuly\n",
      "Namespace(dataset_name='circ_dis', num_heads=8, hidden_dim=1527, edge_hidden=64, dropout_rate=0.5, weight_decay=1e-05, checkpoint_path='', epochs=100, peak_lr=0.0003, num_global_node=1, batch_size=32, seed=42, device=0, weight_update_period=20)\n",
      "1527\n"
     ]
    }
   ],
   "source": [
    "# argparse.ArgumentParser   是argparse库中的一个类，用于创建命令行参数解析器对象。\n",
    "    # description   参数用于提供关于这个命令行工具的简短描述，通常会在用户请求帮助信息时显示。\n",
    "parser = argparse.ArgumentParser(description='PyTorch implementation of ANS-GAT')\n",
    "#使用argparse库定义了一系列命令行参数，以允许用户通过命令行传递这些参数来自定义脚本的行为。\n",
    "parser.add_argument('--dataset_name', type=str, default='circ_dis')\n",
    "parser.add_argument('--num_heads', type=int, default=8)\n",
    "parser.add_argument('--hidden_dim', type=int, default=1527)\n",
    "parser.add_argument('--edge_hidden', type=int, default=64)\n",
    "parser.add_argument('--dropout_rate', type=float, default=0.5)              #模型丢弃率\n",
    "parser.add_argument('--weight_decay', type=float, default=0.00001)                #权重衰减系数\n",
    "parser.add_argument('--checkpoint_path', type=str, default='')              #默认为空字符串，表示不保存检查点。\n",
    "parser.add_argument('--epochs', type=int, default=100)\n",
    "parser.add_argument('--peak_lr', type=float, default=0.0002)                  #顶峰学习率（最大值）\n",
    "parser.add_argument('--num_global_node', type=int, default=1)               #全局节点数量\n",
    "parser.add_argument('--batch_size', type=int, default=32)\n",
    "#具体的种子数值通常是任意选择的，只要在实验和比较中保持一致即可。\n",
    "parser.add_argument('--seed', type=int, default=42)                         #随机种子默认42\n",
    "parser.add_argument('--device', type=int, default=0, help='which gpu to use if any (default: 0)')\n",
    "                                            #GPU设备编号\n",
    "parser.add_argument('--weight_update_period', type=int, default=20, help='epochs to update the sampling weight')\n",
    "                                            #更新采样权重周期数\n",
    "args = parser.parse_args([])                                            \n",
    "#解析用户在命令行中传递的参数，并将这些参数的值存储在 args 对象中，以便在脚本的其他部分使用。\n",
    "#如果CUDA可用且用户指定了GPU设备编号，使用GPU,否则使用CPU计算\n",
    "device = torch.device(\"cuda:\" + str(args.device)) if torch.cuda.is_available() else torch.device(\"cpu\")\n",
    " #加载数据\n",
    "    \n",
    "    \n",
    "adj_cd = torch.load('./data_circ/dataset/adj_cd_case.pt')\n",
    "feature_cd = torch.load('./data_circ/dataset/fea_cd_case.pt')\n",
    "feature_cd= feature_cd.to(device)\n",
    "adj_cd= adj_cd.to(device)\n",
    "data_list_cc = torch.load('./data_circ/dataset/data_cc_case.pt')\n",
    "data_list_dd = torch.load('./data_circ/dataset/data_dd_case.pt')\n",
    "data_list_cc = torch.stack([torch.stack(inner_list) for inner_list in data_list_cc]).to(device)\n",
    "data_list_dd = torch.stack([torch.stack(inner_list) for inner_list in data_list_dd]).to(device)\n",
    "data_list_cc = data_list_cc.squeeze(0)\n",
    "data_list_dd = data_list_dd.squeeze(0)\n",
    "print(data_list_cc.shape)\n",
    "data_list_cc[data_list_cc == 1527] = 972\n",
    "data_list_dd[data_list_dd == 1527] = 972\n",
    "adj_ori = adj_cd[:972, :972]     #已经归一化了\n",
    "feature_ori = feature_cd[:972, :]\n",
    "X_new = torch.matmul(adj_ori, feature_ori)\n",
    "\n",
    "\n",
    "\n",
    "cd,_,tri,tei=torch.load('./caseData.pth')     #读取数据\n",
    "print('dataset load successfuly')\n",
    "print(args)\n",
    "print(feature_cd.shape[1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "106b8207",
   "metadata": {},
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'GAT' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[5], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m model \u001b[38;5;241m=\u001b[39m \u001b[43mGAT\u001b[49m(\n\u001b[0;32m      2\u001b[0m     nfeat\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m1527\u001b[39m,\n\u001b[0;32m      3\u001b[0m     nhid\u001b[38;5;241m=\u001b[39margs\u001b[38;5;241m.\u001b[39mhidden_dim,\n\u001b[0;32m      4\u001b[0m     dropout\u001b[38;5;241m=\u001b[39margs\u001b[38;5;241m.\u001b[39mdropout_rate,\n\u001b[0;32m      5\u001b[0m     nheads\u001b[38;5;241m=\u001b[39margs\u001b[38;5;241m.\u001b[39mnum_heads,\n\u001b[0;32m      6\u001b[0m     alpha\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0.2\u001b[39m,\n\u001b[0;32m      7\u001b[0m )\u001b[38;5;241m.\u001b[39mto(device)\n\u001b[0;32m      8\u001b[0m trset\u001b[38;5;241m=\u001b[39mDataLoader(MyDataset(tri,cd),args\u001b[38;5;241m.\u001b[39mbatch_size,shuffle\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m)      \u001b[38;5;66;03m#读训练数据，格式（32,x1,x2,label）\u001b[39;00m\n\u001b[0;32m      9\u001b[0m teset\u001b[38;5;241m=\u001b[39mDataLoader(MyDataset(tei,cd),args\u001b[38;5;241m.\u001b[39mbatch_size,shuffle\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mFalse\u001b[39;00m)     \u001b[38;5;66;03m#读测试数据\u001b[39;00m\n",
      "\u001b[1;31mNameError\u001b[0m: name 'GAT' is not defined"
     ]
    }
   ],
   "source": [
    "model = GAT(\n",
    "    nfeat=1527,\n",
    "    nhid=args.hidden_dim,\n",
    "    dropout=args.dropout_rate,\n",
    "    nheads=args.num_heads,\n",
    "    alpha=0.2,\n",
    ").to(device)\n",
    "trset=DataLoader(MyDataset(tri,cd),args.batch_size,shuffle=True)      #读训练数据，格式（32,x1,x2,label）\n",
    "teset=DataLoader(MyDataset(tei,cd),args.batch_size,shuffle=False)     #读测试数据\n",
    "#定义优化器，adamW\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=args.peak_lr,weight_decay=args.weight_decay)\n",
    "sampling_weight = np.ones(3)\n",
    "## weight_history 用于记录权重的历史变化\n",
    "weight_history = []\n",
    "# 设定最小的概率值\n",
    "p_min = 0.05\n",
    "p = (1 - 3 * p_min) * sampling_weight / sum(sampling_weight) + p_min\n",
    "for epoch in range(1, args.epochs+1):\n",
    "    print(\"====epoch \" + str(epoch))\n",
    "    #训练\n",
    "    train(args, model, device, trset, optimizer, feature_cd,adj_cd,feature_ori,adj_ori,data_list_cc,data_list_dd,X_new,teset,1,epoch,args.epochs)\n",
    "    #更新学习调度器参数\n",
    "#         lr_scheduler.step()\n",
    "#         检查当前训练周期是否是权重更新的周期\n",
    "    if epoch % args.weight_update_period == 0:\n",
    "        #计算验证集上的奖励\n",
    "        r = get_reward(1,args, model, device, trset, p,feature_cd,adj_cd,feature_ori,adj_ori,data_list_cc,data_list_dd,X_new)\n",
    "        print('reward:', r)\n",
    "        #更新采样权重=sampling_weight*e^(2r+修正项)\n",
    "        sampling_weight = sampling_weight * 1 / (1 + np.exp(-r))\n",
    "        #再将权重转换为概率分布\n",
    "        p = (1 - 3 * p_min) * sampling_weight / sum(sampling_weight) + p_min\n",
    "        print('p:', p)\n",
    "        #保存权重的变化\n",
    "        weight_history.append(p)\n",
    "        #根据概率分布 p 进行节点采样，得到新的数据集 data_list 和节点特征 feature\n",
    "        data_list_cc,data_list_dd = node_sampling(p)\n",
    "        data_list_cc = torch.stack([torch.stack(inner_list) for inner_list in data_list_cc]).to(device)\n",
    "        data_list_dd = torch.stack([torch.stack(inner_list) for inner_list in data_list_dd]).to(device)\n",
    "        data_list_cc = data_list_cc.squeeze(0)\n",
    "        data_list_dd = data_list_dd.squeeze(0)\n",
    "        data_list_cc[data_list_cc == 1527] = 972\n",
    "        data_list_dd[data_list_dd == 1527] = 972"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
