{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "varied-rider",
   "metadata": {},
   "source": [
    "# 一、手动设置语义与label相同\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "id": "aggregate-modern",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Processing...\n",
      "Done!\n",
      "\u001b[32m======================\u001b[0m\n",
      "\u001b[32mNumber of nodes: 364\u001b[0m\n",
      "\u001b[32mNumber of edges: 42240\u001b[0m\n",
      "\u001b[32mAverage node degree: 116.04\u001b[0m\n",
      "\u001b[32mNumber of training nodes: 291\u001b[0m\n",
      "\u001b[32mTraining node label rate: 0.80\u001b[0m\n",
      "\u001b[32mContains isolated nodes: False\u001b[0m\n",
      "\u001b[32mContains self-loops: True\u001b[0m\n",
      "\u001b[32mIs undirected: True\u001b[0m\n",
      "\u001b[32mData's dimension: 1248\u001b[0m\n"
     ]
    }
   ],
   "source": [
    "#!/usr/bin/env python\n",
    "# -*- coding:utf-8 -*-\n",
    "# @Time    : 2022/3/20 1:12 下午\n",
    "# @Author  : WangZhixing\n",
    "\n",
    "\n",
    "import argparse\n",
    "import os\n",
    "import shutil\n",
    "import sys\n",
    "import pandas as pd\n",
    "rootPath = \"/Users/wzx/Downloads/module-reverse-by-gnn\"\n",
    "sys.path.append(rootPath)\n",
    "\n",
    "\n",
    "from torch_geometric.transforms import NormalizeFeatures\n",
    "from ProcessData import DependenceGraph\n",
    "\n",
    "from Utils import ConfigFile\n",
    "from Model.Module.GCN import GCN,Linear_GCN\n",
    "import pandas as pd\n",
    "import torch\n",
    "from Metric import Metric\n",
    "from Output.output_mehod.result2rsf_file import result2rsf_file\n",
    "\n",
    "\n",
    "config =\"/Users/wzx/Downloads/module-reverse-by-gnn/config/client/DependenceGraph/transitive.ini\"\n",
    "\n",
    "kwarg = ConfigFile(config).ReadConfig()\n",
    "\n",
    "if os.path.exists(os.path.join(kwarg[\"root\"], \"processed\")):\n",
    "    shutil.rmtree(os.path.join(kwarg[\"root\"], \"processed\"))\n",
    "data = DependenceGraph(kwarg[\"root\"], transform=NormalizeFeatures()).data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "id": "lightweight-clock",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>0</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>9</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>12</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>9</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>9</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>359</th>\n",
       "      <td>4</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>360</th>\n",
       "      <td>4</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>361</th>\n",
       "      <td>6</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>362</th>\n",
       "      <td>4</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>363</th>\n",
       "      <td>4</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>364 rows × 1 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "      0\n",
       "0     9\n",
       "1     1\n",
       "2    12\n",
       "3     9\n",
       "4     9\n",
       "..   ..\n",
       "359   4\n",
       "360   4\n",
       "361   6\n",
       "362   4\n",
       "363   4\n",
       "\n",
       "[364 rows x 1 columns]"
      ]
     },
     "execution_count": 34,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pd.DataFrame(data.y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "id": "orange-option",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "1     107\n",
       "10     67\n",
       "4      57\n",
       "9      56\n",
       "6      32\n",
       "5      11\n",
       "14      8\n",
       "8       7\n",
       "3       6\n",
       "13      4\n",
       "11      4\n",
       "12      3\n",
       "2       2\n",
       "dtype: int64"
      ]
     },
     "execution_count": 35,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pd.DataFrame(data.y).value_counts()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "id": "encouraging-heart",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>0</th>\n",
       "      <th>1</th>\n",
       "      <th>2</th>\n",
       "      <th>3</th>\n",
       "      <th>4</th>\n",
       "      <th>5</th>\n",
       "      <th>6</th>\n",
       "      <th>7</th>\n",
       "      <th>8</th>\n",
       "      <th>9</th>\n",
       "      <th>...</th>\n",
       "      <th>1238</th>\n",
       "      <th>1239</th>\n",
       "      <th>1240</th>\n",
       "      <th>1241</th>\n",
       "      <th>1242</th>\n",
       "      <th>1243</th>\n",
       "      <th>1244</th>\n",
       "      <th>1245</th>\n",
       "      <th>1246</th>\n",
       "      <th>1247</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>359</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>360</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>361</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>362</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>363</th>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>364 rows × 1248 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "     0     1     2     3     4     5     6     7     8     9     ...  1238  \\\n",
       "0     0.0   0.0   0.0   0.0   0.0   0.0   0.0   1.0   0.0   0.0  ...   0.0   \n",
       "1     1.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0  ...   0.0   \n",
       "2     0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0  ...   0.0   \n",
       "3     0.0   0.0   0.0   0.0   0.0   0.0   0.0   1.0   0.0   0.0  ...   0.0   \n",
       "4     0.0   0.0   0.0   0.0   0.0   0.0   0.0   1.0   0.0   0.0  ...   0.0   \n",
       "..    ...   ...   ...   ...   ...   ...   ...   ...   ...   ...  ...   ...   \n",
       "359   0.0   0.0   0.0   1.0   0.0   0.0   0.0   0.0   0.0   0.0  ...   1.0   \n",
       "360   0.0   0.0   0.0   1.0   0.0   0.0   0.0   0.0   0.0   0.0  ...   1.0   \n",
       "361   0.0   0.0   0.0   0.0   0.0   1.0   0.0   0.0   0.0   0.0  ...   0.0   \n",
       "362   0.0   0.0   0.0   1.0   0.0   0.0   0.0   0.0   0.0   0.0  ...   1.0   \n",
       "363   0.0   0.0   0.0   1.0   0.0   0.0   0.0   0.0   0.0   0.0  ...   1.0   \n",
       "\n",
       "     1239  1240  1241  1242  1243  1244  1245  1246  1247  \n",
       "0     0.0   0.0   0.0   1.0   0.0   0.0   0.0   0.0   0.0  \n",
       "1     0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0  \n",
       "2     0.0   0.0   0.0   0.0   0.0   0.0   1.0   0.0   0.0  \n",
       "3     0.0   0.0   0.0   1.0   0.0   0.0   0.0   0.0   0.0  \n",
       "4     0.0   0.0   0.0   1.0   0.0   0.0   0.0   0.0   0.0  \n",
       "..    ...   ...   ...   ...   ...   ...   ...   ...   ...  \n",
       "359   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0  \n",
       "360   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0  \n",
       "361   0.0   1.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0  \n",
       "362   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0  \n",
       "363   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0   0.0  \n",
       "\n",
       "[364 rows x 1248 columns]"
      ]
     },
     "execution_count": 36,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pd.DataFrame(data.x.numpy())"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "compatible-handle",
   "metadata": {},
   "source": [
    "# 二、kmeans"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "id": "satisfied-tiffany",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Bash项目的指标\n",
      "a2a:100\n",
      "c2c0_8：93\n",
      "c2c0_5：93\n",
      "c2c0_3：93\n",
      "c2c0_1：100\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Users/wzx/opt/anaconda3/envs/torch/lib/python3.6/site-packages/sklearn/cluster/_kmeans.py:1077: ConvergenceWarning: Number of distinct clusters (13) found smaller than n_clusters (15). Possibly due to duplicate points in X.\n",
      "  return self.fit(X, sample_weight=sample_weight).labels_\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "(13, 100, 93, 93, 100)"
      ]
     },
     "execution_count": 37,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from sklearn.cluster import KMeans\n",
    "preds = KMeans(n_clusters=15, random_state=123).fit_predict(data.x)\n",
    "result2rsf_file(kwarg[\"root\"], preds, kwarg[\"outfile_path\"])\n",
    "Metric(kwarg[\"project\"], kwarg[\"outfile_path\"], kwarg[\"ground_path\"])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "institutional-triple",
   "metadata": {},
   "source": [
    "# 三、mlp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "id": "sustained-roads",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from torch.nn import Linear\n",
    "import torch.nn.functional as F\n",
    "\n",
    "\n",
    "class MLP(torch.nn.Module):\n",
    "    def __init__(self,num_features, hidden_channels,num_classes):\n",
    "        super(MLP, self).__init__()\n",
    "        torch.manual_seed(123)\n",
    "        self.lin1 = Linear(num_features, hidden_channels)\n",
    "        self.lin2 = Linear(hidden_channels, num_classes)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.lin1(x)\n",
    "        x = x.relu()\n",
    "        x = F.dropout(x, p=0.5, training=self.training)\n",
    "        x = self.lin2(x)\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "id": "surprising-elite",
   "metadata": {},
   "outputs": [],
   "source": [
    "model = MLP(\n",
    "        num_features=data.num_features,\n",
    "        hidden_channels=kwarg['model_layer'][0],\n",
    "        num_classes=kwarg['model_layer'][1]\n",
    "    )\n",
    "# 定义损失标准\n",
    "criterion = torch.nn.CrossEntropyLoss()\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)  # 定义Adam优化器\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "id": "violent-white",
   "metadata": {},
   "outputs": [],
   "source": [
    "def train(model,data,criterion,optimizer):\n",
    "    model.train()\n",
    "    optimizer.zero_grad()  # 清除梯度\n",
    "    out = model(data.x)  # Perform a single forward pass.执行单次向前传播\n",
    "    loss = criterion(out[data.train_mask], data.y[data.train_mask])  # 根据训练节点计算损失\n",
    "    # Compute the loss solely based on the training nodes.\n",
    "    loss.backward()  # Derive gradients，获取梯度\n",
    "    optimizer.step()  # Update parameters based on gradients.根据梯度更新参数\n",
    "    return loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "id": "anticipated-characteristic",
   "metadata": {},
   "outputs": [],
   "source": [
    "def test(model, data):\n",
    "    model.eval()\n",
    "    out = model(data.x)\n",
    "    pred = out.argmax(dim=1)  # Use the class with highest probability.\n",
    "    # 根据实际的标签进行检查\n",
    "    test_correct = pred[data.test_mask] == data.y[data.test_mask]  # Check against ground-truth labels.\n",
    "    # 得出预测正确的比例\n",
    "    test_acc = int(test_correct.sum()) / int(data.test_mask.sum())  # Derive ratio of correct predictions.\n",
    "    # 返回预测正确的比例\n",
    "    return test_acc"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "id": "neither-thirty",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test Accuracy: 1.0000\n"
     ]
    }
   ],
   "source": [
    "df = pd.DataFrame(columns=[\"Loss\"])\n",
    "df.index.name = \"Epoch\"\n",
    "for epoch in range(1, 5):\n",
    "    loss = train(model, data, criterion, optimizer)\n",
    "test_acc = test(model, data)\n",
    "print(f'Test Accuracy: {test_acc:.4f}')\n",
    "model.eval()\n",
    "out = model(data.x)\n",
    "preds = out.argmax(dim=1).tolist()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "id": "tutorial-drive",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Bash项目的指标\n",
      "a2a:100\n",
      "c2c0_8：93\n",
      "c2c0_5：93\n",
      "c2c0_3：93\n",
      "c2c0_1：100\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "(13, 100, 93, 93, 100)"
      ]
     },
     "execution_count": 43,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "result2rsf_file(kwarg[\"root\"], preds, kwarg[\"outfile_path\"])\n",
    "Metric(kwarg[\"project\"], kwarg[\"outfile_path\"], kwarg[\"ground_path\"])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "athletic-hardware",
   "metadata": {},
   "source": [
    "# 四、gcn"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "id": "supported-execution",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from torch_geometric.nn import GCNConv\n",
    "import torch.nn.functional as F\n",
    "\n",
    "# 包含两个模型，\n",
    "# 1、纯GCN2层模型\n",
    "# 参数：\n",
    "#     1：num_features输入层\n",
    "#     2：hidden_channels中间层\n",
    "#     3：num_classes输出层\n",
    "\n",
    "class GCN(torch.nn.Module):\n",
    "    def __init__(self, num_features, hidden_channels, num_classes):\n",
    "        super(GCN, self).__init__()\n",
    "        torch.manual_seed(12345)\n",
    "        self.conv1 = GCNConv(num_features, hidden_channels)\n",
    "        self.conv2 = GCNConv(hidden_channels, num_classes)\n",
    "\n",
    "    def forward(self, x, edge_index, edge_attr):\n",
    "        x = self.conv1(x, edge_index, edge_weight=edge_attr)\n",
    "        x = x.relu()\n",
    "        x = F.dropout(x, p=0.5, training=self.training)\n",
    "        x = self.conv2(x, edge_index)\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "id": "applied-virginia",
   "metadata": {},
   "outputs": [],
   "source": [
    "def train(model, data, criterion, optimizer):\n",
    "    model.train()\n",
    "    optimizer.zero_grad()  # 清除梯度\n",
    "    out = model(data.x, data.edge_index, data.edge_attr)  # Perform a single forward pass.执行单次向前传播\n",
    "    loss = criterion(out[data.train_mask], data.y[data.train_mask])  # 根据训练节点计算损失\n",
    "    # Compute the loss solely based on the training nodes.\n",
    "    loss.backward()  # Derive gradients，获取梯度\n",
    "    optimizer.step()  # Update parameters based on gradients.根据梯度更新参数\n",
    "    return loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "id": "respected-registration",
   "metadata": {},
   "outputs": [],
   "source": [
    "def test(model, data):\n",
    "    model.eval()\n",
    "    out = model(data.x, data.edge_index, data.edge_attr)\n",
    "    pred = out.argmax(dim=1)  # Use the class with highest probability.\n",
    "    # 根据实际的标签进行检查\n",
    "    test_correct = pred[data.test_mask] == data.y[data.test_mask]  # Check against ground-truth labels.\n",
    "    # 得出预测正确的比例\n",
    "    test_acc = int(test_correct.sum()) / int(data.test_mask.sum())  # Derive ratio of correct predictions.\n",
    "    # 返回预测正确的比例\n",
    "    return test_acc"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "id": "canadian-radio",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test Accuracy: 0.5890\n"
     ]
    }
   ],
   "source": [
    "model = GCN(num_features=data.num_features,\n",
    "            hidden_channels=kwarg['model_layer'][0],\n",
    "            num_classes=kwarg['model_layer'][1])\n",
    "\n",
    "criterion = torch.nn.CrossEntropyLoss()\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=kwarg['lr'], weight_decay=5e-4)  # 定义Adam优化器\n",
    "\n",
    "for epoch in range(1,201):\n",
    "    loss = train(model, data, criterion, optimizer)\n",
    "test_acc = test(model, data)\n",
    "print(f'Test Accuracy: {test_acc:.4f}')\n",
    "model.eval()\n",
    "out = model(data.x, data.edge_index, data.edge_attr)\n",
    "preds = out.argmax(dim=1).tolist()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "id": "invalid-shame",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Bash项目的指标\n",
      "a2a:90\n",
      "c2c0_8：14\n",
      "c2c0_5：21\n",
      "c2c0_3：29\n",
      "c2c0_1：43\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "(5, 90, 21, 29, 43)"
      ]
     },
     "execution_count": 48,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "result2rsf_file(kwarg[\"root\"], preds, kwarg[\"outfile_path\"])\n",
    "Metric(kwarg[\"project\"], kwarg[\"outfile_path\"], kwarg[\"ground_path\"])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "revised-rebecca",
   "metadata": {},
   "source": [
    "# 五、gcn更改的模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "id": "thirty-failure",
   "metadata": {},
   "outputs": [],
   "source": [
    "class Linear_GCN(torch.nn.Module):\n",
    "    def __init__(self, num_features, hidden_channels1,hidden_channels2, num_classes):\n",
    "        super(Linear_GCN, self).__init__()\n",
    "        torch.manual_seed(12345)\n",
    "        self.pre = torch.nn.Linear(num_features,hidden_channels1)\n",
    "        self.conv1 = GCNConv(hidden_channels1, hidden_channels2)\n",
    "        self.conv2 = GCNConv(hidden_channels2, num_classes)\n",
    "\n",
    "\n",
    "    def forward(self, x, edge_index, edge_attr):\n",
    "        x = self.pre(x)\n",
    "        x = self.conv1(x, edge_index, edge_weight=edge_attr)\n",
    "        x = x.relu()\n",
    "        x = F.dropout(x, p=0.5, training=self.training)\n",
    "        x = self.conv2(x, edge_index)\n",
    "        return x\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "id": "lightweight-stupid",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test Accuracy: 0.2121\n"
     ]
    }
   ],
   "source": [
    "model = Linear_GCN(\n",
    "    num_features=data.num_features,\n",
    "    hidden_channels1=256,\n",
    "    hidden_channels2=kwarg['model_layer'][0],\n",
    "    num_classes=kwarg['model_layer'][1]\n",
    ")\n",
    "\n",
    "criterion = torch.nn.CrossEntropyLoss()\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=kwarg['lr'], weight_decay=5e-4)  # 定义Adam优化器\n",
    "for epoch in range(1, 201):\n",
    "    loss = train(model, data, criterion, optimizer)\n",
    "test_acc = test(model, data)\n",
    "print(f'Test Accuracy: {test_acc:.4f}')\n",
    "model.eval()\n",
    "out = model(data.x, data.edge_index, data.edge_attr)\n",
    "preds = out.argmax(dim=1).tolist()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "id": "dominant-logic",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Linear_GCN(\n",
       "  (pre): Linear(in_features=1152, out_features=256, bias=True)\n",
       "  (conv1): GCNConv(256, 128)\n",
       "  (conv2): GCNConv(128, 14)\n",
       ")"
      ]
     },
     "execution_count": 35,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "threaded-century",
   "metadata": {},
   "source": [
    "# GAT"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "limited-group",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn.functional as F\n",
    "\n",
    "from torch_geometric.nn import GATConv\n",
    "\n",
    "class GAT(torch.nn.Module):\n",
    "    def __init__(self, num_features, hidden_channels, num_classes, n_heads=1):\n",
    "        super(GAT, self).__init__()\n",
    "        torch.manual_seed(12345)\n",
    "        self.conv1 = GATConv(num_features, hidden_channels, heads=n_heads)\n",
    "        self.conv2 = GATConv(n_heads * hidden_channels, num_classes)\n",
    "\n",
    "    def forward(self, x, edge_index):\n",
    "        x = self.conv1(x, edge_index)\n",
    "        x = x.relu()\n",
    "        #激活函数，舍弃所有0值\n",
    "        x = F.dropout(x, p=0.5, training=self.training)\n",
    "        x = self.conv2(x, edge_index)\n",
    "        return x\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "id": "classical-neighbor",
   "metadata": {},
   "outputs": [],
   "source": [
    "def train(model, data, criterion, optimizer):\n",
    "    model.train()\n",
    "    optimizer.zero_grad()  # 清除梯度\n",
    "    out = model(data.x, data.edge_index)  # Perform a single forward pass.执行单次向前传播\n",
    "    loss = criterion(out[data.train_mask], data.y[data.train_mask])  # 根据训练节点计算损失\n",
    "    # Compute the loss solely based on the training nodes.\n",
    "    loss.backward()  # Derive gradients，获取梯度\n",
    "    optimizer.step()  # Update parameters based on gradients.根据梯度更新参数\n",
    "    return loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "id": "crude-cabin",
   "metadata": {},
   "outputs": [],
   "source": [
    "def test(model, data):\n",
    "    model.eval()\n",
    "    out = model(data.x, data.edge_index)\n",
    "    pred = out.argmax(dim=1)  # Use the class with highest probability.\n",
    "    # 根据实际的标签进行检查\n",
    "    test_correct = pred[data.test_mask] == data.y[data.test_mask]  # Check against ground-truth labels.\n",
    "    # 得出预测正确的比例\n",
    "    test_acc = int(test_correct.sum()) / int(data.test_mask.sum())  # Derive ratio of correct predictions.\n",
    "    # 返回预测正确的比例\n",
    "    return test_acc"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "id": "bacterial-summary",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test Accuracy: 0.4792\n"
     ]
    }
   ],
   "source": [
    "model = GAT(num_features=data.num_features,\n",
    "            hidden_channels=kwarg['model_layer'][0],\n",
    "            num_classes=kwarg['model_layer'][1])\n",
    "\n",
    "criterion = torch.nn.CrossEntropyLoss()\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=kwarg['lr'], weight_decay=5e-4)  # 定义Adam优化器\n",
    "df.index.name = \"Epoch\"\n",
    "for epoch in range(1,201):\n",
    "    loss = train(model, data, criterion, optimizer)\n",
    "test_acc = test(model, data)\n",
    "print(f'Test Accuracy: {test_acc:.4f}')\n",
    "model.eval()\n",
    "out = model(data.x, data.edge_index)\n",
    "preds = out.argmax(dim=1).tolist()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "id": "posted-insulation",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Bash项目的指标\n",
      "a2a:68\n",
      "c2c0_8：0\n",
      "c2c0_5：0\n",
      "c2c0_3：7\n",
      "c2c0_1：21\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "(3, 68, 0, 7, 21)"
      ]
     },
     "execution_count": 35,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "result2rsf_file(kwarg[\"root\"], preds, kwarg[\"outfile_path\"])\n",
    "Metric(kwarg[\"project\"], kwarg[\"outfile_path\"], kwarg[\"ground_path\"])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "eligible-shannon",
   "metadata": {},
   "source": [
    "# one-hot"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "north-boulder",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Processing...\n",
      "Done!\n",
      "\u001b[32m======================\u001b[0m\n",
      "\u001b[32mNumber of nodes: 364\u001b[0m\n",
      "\u001b[32mNumber of edges: 42240\u001b[0m\n",
      "\u001b[32mAverage node degree: 116.04\u001b[0m\n",
      "\u001b[32mNumber of training nodes: 291\u001b[0m\n",
      "\u001b[32mTraining node label rate: 0.80\u001b[0m\n",
      "\u001b[32mContains isolated nodes: False\u001b[0m\n",
      "\u001b[32mContains self-loops: True\u001b[0m\n",
      "\u001b[32mIs undirected: True\u001b[0m\n",
      "\u001b[32mData's dimension: 364\u001b[0m\n"
     ]
    }
   ],
   "source": [
    "#!/usr/bin/env python\n",
    "# -*- coding:utf-8 -*-\n",
    "# @Time    : 2022/3/20 1:12 下午\n",
    "# @Author  : WangZhixing\n",
    "\n",
    "\n",
    "import argparse\n",
    "import os\n",
    "import shutil\n",
    "import sys\n",
    "import pandas as pd\n",
    "rootPath = \"/Users/wzx/Downloads/module-reverse-by-gnn\"\n",
    "sys.path.append(rootPath)\n",
    "\n",
    "\n",
    "from torch_geometric.transforms import NormalizeFeatures\n",
    "from ProcessData import DependenceGraph\n",
    "\n",
    "from Utils import ConfigFile\n",
    "from Model.Module.GCN import GCN,Linear_GCN\n",
    "import pandas as pd\n",
    "import torch\n",
    "from Metric import Metric\n",
    "from Output.output_mehod.result2rsf_file import result2rsf_file\n",
    "\n",
    "\n",
    "config =\"/Users/wzx/Downloads/module-reverse-by-gnn/config/client/DependenceGraph/transitive.ini\"\n",
    "\n",
    "kwarg = ConfigFile(config).ReadConfig()\n",
    "\n",
    "if os.path.exists(os.path.join(kwarg[\"root\"], \"processed\")):\n",
    "    shutil.rmtree(os.path.join(kwarg[\"root\"], \"processed\"))\n",
    "data = DependenceGraph(kwarg[\"root\"], transform=NormalizeFeatures()).data"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "nutritional-distributor",
   "metadata": {},
   "source": [
    "# kmeans"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "suitable-brunei",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Bash项目的指标\n",
      "a2a:83\n",
      "c2c0_8：0\n",
      "c2c0_5：0\n",
      "c2c0_3：0\n",
      "c2c0_1：36\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "(15, 83, 0, 0, 36)"
      ]
     },
     "execution_count": 27,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from sklearn.cluster import KMeans\n",
    "preds = KMeans(n_clusters=15, random_state=123).fit_predict(data.x)\n",
    "result2rsf_file(kwarg[\"root\"], preds, kwarg[\"outfile_path\"])\n",
    "Metric(kwarg[\"project\"], kwarg[\"outfile_path\"], kwarg[\"ground_path\"])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "id": "cardiac-frame",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from torch.nn import Linear\n",
    "import torch.nn.functional as F\n",
    "\n",
    "\n",
    "class MLP(torch.nn.Module):\n",
    "    def __init__(self,num_features, hidden_channels,num_classes):\n",
    "        super(MLP, self).__init__()\n",
    "        torch.manual_seed(123)\n",
    "        self.lin1 = Linear(num_features, hidden_channels)\n",
    "        self.lin2 = Linear(hidden_channels, num_classes)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.lin1(x)\n",
    "        x = x.relu()\n",
    "        x = F.dropout(x, p=0.5, training=self.training)\n",
    "        x = self.lin2(x)\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "strong-tackle",
   "metadata": {},
   "outputs": [],
   "source": [
    "model = MLP(\n",
    "        num_features=data.num_features,\n",
    "        hidden_channels=kwarg['model_layer'][0],\n",
    "        num_classes=kwarg['model_layer'][1]\n",
    "    )\n",
    "# 定义损失标准\n",
    "criterion = torch.nn.CrossEntropyLoss()\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)  # 定义Adam优化器\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "id": "pursuant-rolling",
   "metadata": {},
   "outputs": [],
   "source": [
    "def train(model,data,criterion,optimizer):\n",
    "    model.train()\n",
    "    optimizer.zero_grad()  # 清除梯度\n",
    "    out = model(data.x)  # Perform a single forward pass.执行单次向前传播\n",
    "    loss = criterion(out[data.train_mask], data.y[data.train_mask])  # 根据训练节点计算损失\n",
    "    # Compute the loss solely based on the training nodes.\n",
    "    loss.backward()  # Derive gradients，获取梯度\n",
    "    optimizer.step()  # Update parameters based on gradients.根据梯度更新参数\n",
    "    return loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "id": "otherwise-french",
   "metadata": {},
   "outputs": [],
   "source": [
    "def test(model, data):\n",
    "    model.eval()\n",
    "    out = model(data.x)\n",
    "    pred = out.argmax(dim=1)  # Use the class with highest probability.\n",
    "    # 根据实际的标签进行检查\n",
    "    test_correct = pred[data.test_mask] == data.y[data.test_mask]  # Check against ground-truth labels.\n",
    "    # 得出预测正确的比例\n",
    "    test_acc = int(test_correct.sum()) / int(data.test_mask.sum())  # Derive ratio of correct predictions.\n",
    "    # 返回预测正确的比例\n",
    "    return test_acc"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "id": "wrapped-prompt",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test Accuracy: 0.1096\n"
     ]
    }
   ],
   "source": [
    "df = pd.DataFrame(columns=[\"Loss\"])\n",
    "df.index.name = \"Epoch\"\n",
    "for epoch in range(1, 5):\n",
    "    loss = train(model, data, criterion, optimizer)\n",
    "test_acc = test(model, data)\n",
    "print(f'Test Accuracy: {test_acc:.4f}')\n",
    "model.eval()\n",
    "out = model(data.x)\n",
    "preds = out.argmax(dim=1).tolist()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "id": "curious-shadow",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Bash项目的指标\n",
      "a2a:82\n",
      "c2c0_8：0\n",
      "c2c0_5：0\n",
      "c2c0_3：7\n",
      "c2c0_1：36\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "(2, 82, 0, 7, 36)"
      ]
     },
     "execution_count": 33,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "result2rsf_file(kwarg[\"root\"], preds, kwarg[\"outfile_path\"])\n",
    "Metric(kwarg[\"project\"], kwarg[\"outfile_path\"], kwarg[\"ground_path\"])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "id": "isolated-greene",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from torch_geometric.nn import GCNConv\n",
    "import torch.nn.functional as F\n",
    "\n",
    "# 包含两个模型，\n",
    "# 1、纯GCN2层模型\n",
    "# 参数：\n",
    "#     1：num_features输入层\n",
    "#     2：hidden_channels中间层\n",
    "#     3：num_classes输出层\n",
    "\n",
    "class GCN(torch.nn.Module):\n",
    "    def __init__(self, num_features, hidden_channels, num_classes):\n",
    "        super(GCN, self).__init__()\n",
    "        torch.manual_seed(12345)\n",
    "        self.conv1 = GCNConv(num_features, hidden_channels)\n",
    "        self.conv2 = GCNConv(hidden_channels, num_classes)\n",
    "\n",
    "    def forward(self, x, edge_index, edge_attr):\n",
    "        x = self.conv1(x, edge_index, edge_weight=edge_attr)\n",
    "        x = x.relu()\n",
    "        x = F.dropout(x, p=0.5, training=self.training)\n",
    "        x = self.conv2(x, edge_index)\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "id": "detailed-upset",
   "metadata": {},
   "outputs": [],
   "source": [
    "def train(model, data, criterion, optimizer):\n",
    "    model.train()\n",
    "    optimizer.zero_grad()  # 清除梯度\n",
    "    out = model(data.x, data.edge_index, data.edge_attr)  # Perform a single forward pass.执行单次向前传播\n",
    "    loss = criterion(out[data.train_mask], data.y[data.train_mask])  # 根据训练节点计算损失\n",
    "    # Compute the loss solely based on the training nodes.\n",
    "    loss.backward()  # Derive gradients，获取梯度\n",
    "    optimizer.step()  # Update parameters based on gradients.根据梯度更新参数\n",
    "    return loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "id": "recognized-static",
   "metadata": {},
   "outputs": [],
   "source": [
    "def test(model, data):\n",
    "    model.eval()\n",
    "    out = model(data.x, data.edge_index, data.edge_attr)\n",
    "    pred = out.argmax(dim=1)  # Use the class with highest probability.\n",
    "    # 根据实际的标签进行检查\n",
    "    test_correct = pred[data.test_mask] == data.y[data.test_mask]  # Check against ground-truth labels.\n",
    "    # 得出预测正确的比例\n",
    "    test_acc = int(test_correct.sum()) / int(data.test_mask.sum())  # Derive ratio of correct predictions.\n",
    "    # 返回预测正确的比例\n",
    "    return test_acc"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "outdoor-withdrawal",
   "metadata": {},
   "outputs": [],
   "source": [
    "model = GCN(num_features=data.num_features,\n",
    "            hidden_channels=kwarg['model_layer'][0],\n",
    "            num_classes=kwarg['model_layer'][1])\n",
    "\n",
    "criterion = torch.nn.CrossEntropyLoss()\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=kwarg['lr'], weight_decay=5e-4)  # 定义Adam优化器\n",
    "for epoch in range(1,201):\n",
    "    loss = train(model, data, criterion, optimizer)\n",
    "test_acc = test(model, data)\n",
    "print(f'Test Accuracy: {test_acc:.4f}')\n",
    "model.eval()\n",
    "out = model(data.x, data.edge_index, data.edge_attr)\n",
    "preds = out.argmax(dim=1).tolist()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "desperate-chile",
   "metadata": {},
   "outputs": [],
   "source": [
    "result2rsf_file(kwarg[\"root\"], preds, kwarg[\"outfile_path\"])\n",
    "Metric(kwarg[\"project\"], kwarg[\"outfile_path\"], kwarg[\"ground_path\"])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "moved-detector",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "latest-action",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "decent-bleeding",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "israeli-sector",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "specific-negotiation",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "burning-headquarters",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "endangered-monday",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "seeing-craft",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "effective-qualification",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "institutional-oxygen",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "devoted-sherman",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "considerable-senate",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "revised-dubai",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "metric-creativity",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fundamental-petite",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "social-string",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "hundred-insurance",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "velvet-encyclopedia",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "sorted-lotus",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "parental-pressing",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
