{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import numpy as np\n",
    "from sklearn import metrics\n",
    "import torch.nn as nn\n",
    "from scipy import sparse\n",
    "from torch.utils.data import DataLoader,Dataset\n",
    "import torch.nn.functional as F\n",
    "import torch.nn.init as init\n",
    "from scipy.linalg import expm\n",
    "import math\n",
    "torch.cuda.empty_cache()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class EarlyStopping:\n",
    "    \"\"\"Early stops the training if validation loss doesn't improve after a given patience.\"\"\"\n",
    "    def __init__(self, patience, verbose=False, delta=0, save_path='checkpoint.pt'):\n",
    "        \"\"\"\n",
    "        Args:\n",
    "            patience (int): How long to wait after last time validation loss improved.\n",
    "                            Default: 7\n",
    "            verbose (bool): If True, prints a message for each validation loss improvement.\n",
    "                            Default: False\n",
    "            delta (float): Minimum change in the monitored quantity to qualify as an improvement.\n",
    "                            Default: 0\n",
    "        \"\"\"\n",
    "        self.patience = patience\n",
    "        self.verbose = verbose\n",
    "        self.counter = 0\n",
    "        self.best_score = None\n",
    "        self.early_stop = False\n",
    "        self.val_loss_min = np.Inf\n",
    "        self.delta = delta\n",
    "        self.save_path = save_path\n",
    "\n",
    "    def __call__(self, val_loss, model):\n",
    "\n",
    "        score = -val_loss\n",
    "\n",
    "        if self.best_score is None:\n",
    "            self.best_score = score\n",
    "            self.save_checkpoint(val_loss, model)\n",
    "        elif score < self.best_score - self.delta:\n",
    "            self.counter += 1\n",
    "            print(f'EarlyStopping counter: {self.counter} out of {self.patience}')\n",
    "            if self.counter >= self.patience:\n",
    "                self.early_stop = True\n",
    "        else:\n",
    "            self.best_score = score\n",
    "            self.save_checkpoint(val_loss, model)\n",
    "            self.counter = 0\n",
    "\n",
    "    def save_checkpoint(self, val_loss, model):\n",
    "        \"\"\"Saves model when validation loss decrease.\"\"\"\n",
    "        if self.verbose:\n",
    "            print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}).  Saving model ...')\n",
    "        torch.save(model.state_dict(), self.save_path)\n",
    "        self.val_loss_min = val_loss\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class GCN(nn.Module):\n",
    "    def __init__(self) :\n",
    "        super().__init__()\n",
    "    def forward(self,adj,x):\n",
    "        return adj@x\n",
    "    \n",
    "class HyperGraph(nn.Module):\n",
    "    def __init__(self,in_dim,hidden_dim,out_dim):\n",
    "        super().__init__()\n",
    "\n",
    "        self.weight1 = nn.Linear(in_dim,hidden_dim)\n",
    "        self.dropout = nn.Dropout(0.3)\n",
    "        self.relu = nn.LeakyReLU(0.2)\n",
    "        self.batch_norm1 = nn.BatchNorm1d(out_dim)\n",
    "\n",
    "        self.gate = nn.Linear(hidden_dim,1)\n",
    "        self.sigmoid = nn.Sigmoid()\n",
    "        self.tanh_f = nn.Tanh()\n",
    "\n",
    "        nn.init.xavier_normal_(self.weight1.weight,gain=nn.init.calculate_gain('relu'))\n",
    "        nn.init.xavier_normal_(self.gate.weight,gain=nn.init.calculate_gain('relu'))\n",
    "\n",
    "    def forward(self,x,hyper_adj,hyper_degree,node_degree):\n",
    "        x = self.weight1(x)\n",
    "        x = self.dropout(x)\n",
    "        hyper_edge_emb = hyper_adj.T@node_degree@x #[超边数，特征维度]，这里是1527，1527\n",
    "        #利用门控对超边特征进行调整大小\n",
    "        hyper_edge_weight = self.tanh_f(self.sigmoid(self.gate(hyper_edge_emb)))\n",
    "        hyper_edge_weight = torch.diag(torch.squeeze(hyper_edge_weight))\n",
    "        hyper_edge_emb = hyper_edge_weight@hyper_edge_emb\n",
    "        #超边特征传播到节点\n",
    "        hyper_emb = node_degree@hyper_adj@hyper_edge_emb #1527*600\n",
    "        hyper_emb = self.batch_norm1(hyper_emb)\n",
    "        return hyper_emb\n",
    "\n",
    "class HyperGraphModel(nn.Module):\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        self.gcn = GCN()\n",
    "        self.hyper_weight = nn.Parameter(torch.ones(1527,1527))\n",
    "        self.hgcn1 = HyperGraph(1527,600,600)\n",
    "        self.hgcn2 = HyperGraph(600,300,300)\n",
    "        self.dropout = nn.Dropout(0.3)\n",
    "        self.batch_norm = nn.BatchNorm1d(1527)\n",
    "        \n",
    "        self.relu = nn.LeakyReLU()\n",
    "        self.dropout = nn.Dropout(0.3)\n",
    "    def forward(self,adj,x,degree_matrix):\n",
    "        #动态超图\n",
    "        hyper_adj = x@self.hyper_weight\n",
    "        hyper_degree = torch.inverse(torch.diag(torch.sum(hyper_adj,dim=0)))\n",
    "        node_degree = torch.sqrt(torch.inverse(torch.diag(torch.squeeze(degree_matrix))))\n",
    "        #超图卷积\n",
    "        hyper_emb = self.hgcn1(x,hyper_adj,hyper_degree,node_degree) #1527*600\n",
    "        hyper_emb = self.relu(hyper_emb)\n",
    "        hyper_emb = self.hgcn2(hyper_emb,hyper_adj,hyper_degree,node_degree) #1527*600\n",
    "        # hyper_emb = self.relu(hyper_emb)\n",
    "        return hyper_emb"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class TopoRep(nn.Module):\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        self.rw_fuse = nn.Conv2d(in_channels=2,out_channels=1,kernel_size=1)\n",
    "        self.beita = nn.Parameter(torch.diag(torch.ones(1527)))\n",
    "\n",
    "        self.q_linear = nn.Linear(1527,300)\n",
    "        self.k_linear = nn.Linear(1527,300)\n",
    "        self.v_linear = nn.Linear(1527,300)\n",
    "        self.scale = 300 ** -0.5\n",
    "        self.norm1 = nn.LayerNorm(300)\n",
    "        self.dropout = nn.Dropout(0.3)\n",
    "\n",
    "        self.align = nn.Linear(1527,300)\n",
    "    def forward(self,adj_norm):\n",
    "        rw_one = adj_norm[None,:,:]\n",
    "        rw_two = adj_norm@adj_norm[None,:,:]\n",
    "        rw_matrix = torch.cat([rw_one,rw_two],dim=0) #通道融合\n",
    "        multi_step_topo = self.rw_fuse(rw_matrix).reshape(-1,1527) #1527,1527\n",
    "\n",
    "        residual = self.beita@adj_norm\n",
    "        multi_step_topo = multi_step_topo+residual #1527*1527\n",
    "        \n",
    "        #attention\n",
    "        Q = self.q_linear(multi_step_topo)\n",
    "        K = self.k_linear(multi_step_topo)\n",
    "        V = self.v_linear(multi_step_topo)\n",
    "        #注意力分数矩阵\n",
    "        attn = (Q @ K.T)*self.scale\n",
    "        attn = F.softmax(attn,dim=-1) #1527*1527\n",
    "        attn = self.dropout(attn)\n",
    "        topo_feature = torch.matmul(attn,V) #1528*300\n",
    "\n",
    "        multi_step_topo = self.align(multi_step_topo) #1527,1527->1527,300\n",
    "        topo_feature = self.norm1(topo_feature+multi_step_topo) #1527*300\n",
    "        return topo_feature"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class MetaPath(nn.Module):\n",
    "    def __init__(self,device):\n",
    "        super().__init__()\n",
    "        self.cc_emb = nn.Linear(1,50)\n",
    "        self.ccc_emb = nn.Linear(1,50)\n",
    "        self.cdc_emb = nn.Linear(1,50)\n",
    "        self.dd_emb = nn.Linear(1,50)\n",
    "        self.dcd_emb = nn.Linear(1,50)\n",
    "        self.ddd_emb = nn.Linear(1,50)\n",
    "\n",
    "        self.cc_fuse = nn.Conv2d(in_channels=834,out_channels=1,kernel_size=1)\n",
    "        self.ccc_fuse = nn.Conv2d(in_channels=834,out_channels=1,kernel_size=1)\n",
    "        self.cdc_fuse = nn.Conv2d(in_channels=834,out_channels=1,kernel_size=1)\n",
    "        self.dd_fuse = nn.Conv2d(in_channels=138,out_channels=1,kernel_size=1)\n",
    "        self.dcd_fuse = nn.Conv2d(in_channels=138,out_channels=1,kernel_size=1)\n",
    "        self.ddd_fuse = nn.Conv2d(in_channels=138,out_channels=1,kernel_size=1)\n",
    "\n",
    "\n",
    "        self.cc_meta = nn.Linear(650,300)\n",
    "        self.ccc_meta = nn.Linear(650,300)\n",
    "        self.cdc_meta = nn.Linear(650,300)\n",
    "        self.dd_meta = nn.Linear(650,300)\n",
    "        self.ddd_meta = nn.Linear(650,300)\n",
    "        self.dcd_meta = nn.Linear(650,300)\n",
    "\n",
    "\n",
    "        self.relu = nn.LeakyReLU()\n",
    "        self.relu1 = nn.ReLU()\n",
    "        self.norm1 = nn.BatchNorm1d(300)\n",
    "        self.norm2 = nn.BatchNorm1d(300)\n",
    "        self.device = device\n",
    "\n",
    "        self.cc_gcn1 = nn.Linear(1577,900)\n",
    "        self.ccc_gcn1 = nn.Linear(1577,900)\n",
    "        self.cdc_gcn1 = nn.Linear(1577,900)\n",
    "        self.dd_gcn1 = nn.Linear(1577,900)\n",
    "        self.ddd_gcn1 = nn.Linear(1577,900)\n",
    "        self.dcd_gcn1 = nn.Linear(1577,900)\n",
    "        \n",
    "        self.cc_gcn2 = nn.Linear(900,600)\n",
    "        self.ccc_gcn2 = nn.Linear(900,600)\n",
    "        self.cdc_gcn2 = nn.Linear(900,600)\n",
    "        self.dd_gcn2 = nn.Linear(900,600)\n",
    "        self.ddd_gcn2 = nn.Linear(900,600)\n",
    "        self.dcd_gcn2 = nn.Linear(900,600)\n",
    "\n",
    "        self.circ_fuse = nn.Linear(600*3,300)\n",
    "        self.dise_fuse = nn.Linear(600*3,300)\n",
    "        self.dropout = nn.Dropout(0.3)\n",
    "        nn.init.xavier_normal_(self.cc_emb.weight,gain=nn.init.calculate_gain('relu'))\n",
    "        nn.init.xavier_normal_(self.ccc_emb.weight,gain=nn.init.calculate_gain('relu'))\n",
    "        nn.init.xavier_normal_(self.cdc_emb.weight,gain=nn.init.calculate_gain('relu'))\n",
    "        nn.init.xavier_normal_(self.dd_emb.weight,gain=nn.init.calculate_gain('relu'))\n",
    "        nn.init.xavier_normal_(self.dcd_emb.weight,gain=nn.init.calculate_gain('relu'))\n",
    "        nn.init.xavier_normal_(self.ddd_emb.weight,gain=nn.init.calculate_gain('relu'))\n",
    "\n",
    "        nn.init.xavier_normal_(self.cc_fuse.weight,gain=nn.init.calculate_gain('relu'))\n",
    "        nn.init.xavier_normal_(self.ccc_fuse.weight,gain=nn.init.calculate_gain('relu'))\n",
    "        nn.init.xavier_normal_(self.cdc_fuse.weight,gain=nn.init.calculate_gain('relu'))\n",
    "        nn.init.xavier_normal_(self.dd_fuse.weight,gain=nn.init.calculate_gain('relu'))\n",
    "        nn.init.xavier_normal_(self.dcd_fuse.weight,gain=nn.init.calculate_gain('relu'))\n",
    "        nn.init.xavier_normal_(self.ddd_fuse.weight,gain=nn.init.calculate_gain('relu'))\n",
    "\n",
    "        nn.init.xavier_normal_(self.cc_gcn1.weight,gain=nn.init.calculate_gain('relu'))\n",
    "        nn.init.xavier_normal_(self.ccc_gcn1.weight,gain=nn.init.calculate_gain('relu'))\n",
    "        nn.init.xavier_normal_(self.cdc_gcn1.weight,gain=nn.init.calculate_gain('relu'))\n",
    "        nn.init.xavier_normal_(self.dd_gcn1.weight,gain=nn.init.calculate_gain('relu'))\n",
    "        nn.init.xavier_normal_(self.ddd_gcn1.weight,gain=nn.init.calculate_gain('relu'))\n",
    "        nn.init.xavier_normal_(self.dcd_gcn1.weight,gain=nn.init.calculate_gain('relu'))\n",
    "    def forward(self,meta_paths,node_fea):\n",
    "        #meta_paths\n",
    "        c_c = meta_paths['cc']\n",
    "        c_c_c = meta_paths['ccc']\n",
    "        c_d_c = meta_paths['cdc']\n",
    "        d_d = meta_paths['dd']\n",
    "        d_c_d = meta_paths['dcd']\n",
    "        d_d_d = meta_paths['ddd']\n",
    "\n",
    "        cc_d = torch.sum(c_c,dim=1)\n",
    "        ccc_d = torch.sum(c_c_c,dim=1)\n",
    "        I = torch.eye(c_d_c.shape[0]).to(self.device)\n",
    "        c_d_c = c_d_c + I\n",
    "        cdc_d = torch.sum(c_d_c,dim=1)\n",
    "        cc_norm = torch.sqrt(torch.inverse(torch.diag(cc_d)))@c_c@torch.sqrt(torch.inverse(torch.diag(cc_d)))\n",
    "        ccc_norm = torch.sqrt(torch.inverse(torch.diag(ccc_d)))@c_c_c@torch.sqrt(torch.inverse(torch.diag(ccc_d)))   \n",
    "        cdc_norm = torch.sqrt(torch.inverse(torch.diag(cdc_d)))@c_d_c@torch.sqrt(torch.inverse(torch.diag(cdc_d)))\n",
    "\n",
    "        dd_d = torch.sum(d_d,dim=1)\n",
    "        dd_norm = torch.sqrt(torch.inverse(torch.diag(dd_d)))@d_d@torch.sqrt(torch.inverse(torch.diag(dd_d)))\n",
    "        ddd_d = torch.sum(d_d_d,dim=1)\n",
    "        ddd_norm = torch.sqrt(torch.inverse(torch.diag(ddd_d)))@d_d_d@torch.sqrt(torch.inverse(torch.diag(ddd_d)))   \n",
    "        I = torch.eye(d_c_d.shape[0]).to(self.device)\n",
    "        d_c_d = d_c_d + I\n",
    "        dcd_d = torch.sum(d_c_d,dim=1)\n",
    "        dcd_norm = torch.sqrt(torch.inverse(torch.diag(dcd_d)))@d_c_d@torch.sqrt(torch.inverse(torch.diag(dcd_d)))\n",
    "        #元路径综合语义\n",
    "        cc_sema = c_c[:,:,None]\n",
    "        ccc_sema = c_c_c[:,:,None]\n",
    "        cdc_sema = c_d_c[:,:,None]\n",
    "        \n",
    "        cc_fea = self.cc_emb(cc_sema)\n",
    "        ccc_fea = self.ccc_emb(ccc_sema)\n",
    "        cdc_fea = self.cdc_emb(cdc_sema)\n",
    "\n",
    "        cc_fea = cc_fea.reshape(834,834,50)\n",
    "        ccc_fea = ccc_fea.reshape(834,834,50)\n",
    "        cdc_fea = cdc_fea.reshape(834,834,50)\n",
    "\n",
    "        #aggregation\n",
    "        cc_fea = self.cc_fuse(cc_fea)  #834,50\n",
    "        cc_fea = cc_fea.view(-1,50)\n",
    "        ccc_fea = self.ccc_fuse(ccc_fea) \n",
    "        ccc_fea = ccc_fea.view(-1,50)\n",
    "        cdc_fea = self.cdc_fuse(cdc_fea)\n",
    "        cdc_fea = cdc_fea.view(-1,50)\n",
    "\n",
    "\n",
    "        #disease\n",
    "        dd_sema = d_d[:,:,None]\n",
    "        dcd_sema = d_c_d[:,:,None]\n",
    "        ddd_sema = d_d_d[:,:,None]\n",
    "\n",
    "        dd_fea = self.dd_emb(dd_sema)\n",
    "        dcd_fea = self.dcd_emb(dcd_sema)\n",
    "        ddd_fea = self.ddd_emb(ddd_sema)\n",
    "        dd_fea = dd_fea.reshape(138,138,50)\n",
    "        ddd_fea = ddd_fea.reshape(138,138,50)\n",
    "        dcd_fea = dcd_fea.reshape(138,138,50)\n",
    "        #aggregation\n",
    "        dd_fea = self.dd_fuse(dd_fea)  #834,50\n",
    "        dd_fea = dd_fea.view(-1,50)\n",
    "        dcd_fea = self.dcd_fuse(dcd_fea) \n",
    "        dcd_fea = dcd_fea.view(-1,50)\n",
    "        ddd_fea = self.ddd_fuse(ddd_fea)\n",
    "        ddd_fea = ddd_fea.view(-1,50)\n",
    "        #GCN\n",
    "        circ_fea = node_fea[0:834]\n",
    "        dise_fea = node_fea[834:834+138]\n",
    "        cc_fea = torch.cat([circ_fea,cc_fea],dim=1)\n",
    "        ccc_fea = torch.cat([circ_fea,ccc_fea],dim=1)\n",
    "        cdc_fea = torch.cat([circ_fea,cdc_fea],dim=1)\n",
    "        dd_fea = torch.cat([dise_fea,dd_fea],dim=1)\n",
    "        ddd_fea = torch.cat([dise_fea,ddd_fea],dim=1)\n",
    "        dcd_fea = torch.cat([dise_fea,dcd_fea],dim=1)\n",
    "        #circRNA\n",
    "\n",
    "        cc_node = cc_norm@cc_fea         \n",
    "        cc_node = self.cc_gcn1(cc_node)\n",
    "        cc_node = self.relu(cc_node)\n",
    "        cc_node = self.dropout(cc_node)\n",
    "        cc_node = self.cc_gcn2(cc_node)\n",
    "\n",
    "        ccc_node = ccc_norm@ccc_fea\n",
    "        ccc_node = self.ccc_gcn1(ccc_node)\n",
    "        ccc_node = self.relu(ccc_node)\n",
    "        ccc_node = self.dropout(ccc_node)\n",
    "        ccc_node = self.ccc_gcn2(ccc_node)\n",
    "\n",
    "        cdc_node = cdc_norm@cdc_fea\n",
    "        cdc_node = self.cdc_gcn1(cdc_node)\n",
    "        cdc_node = self.relu(cdc_node)\n",
    "        cdc_node = self.dropout(cdc_node)\n",
    "        cdc_node = self.cdc_gcn2(cdc_node)\n",
    "        #disease\n",
    "\n",
    "        dd_node = dd_norm@dd_fea  #834,1527\n",
    "        ddd_node = ddd_norm@ddd_fea\n",
    "        dcd_node = dcd_norm@dcd_fea\n",
    "\n",
    "        dd_node = self.dd_gcn1(dd_node)\n",
    "        dd_node = self.relu(dd_node)\n",
    "        dd_node = self.dropout(dd_node)\n",
    "        dd_node = self.dd_gcn2(dd_node)\n",
    "\n",
    "        dcd_node = self.dcd_gcn1(dcd_node)\n",
    "        dcd_node = self.relu(dcd_node)\n",
    "        dcd_node = self.dropout(dcd_node)\n",
    "        dcd_node = self.dcd_gcn2(dcd_node)\n",
    "\n",
    "        ddd_node = self.ddd_gcn1(ddd_node)\n",
    "        ddd_node = self.relu(ddd_node)\n",
    "        ddd_node = self.dropout(ddd_node)\n",
    "        ddd_node = self.ddd_gcn2(ddd_node)\n",
    "\n",
    "        circ_sema = torch.cat([cc_node,ccc_node,cdc_node],dim=1)\n",
    "        dise_sema = torch.cat([dd_node,ddd_node,dcd_node],dim=1)\n",
    "\n",
    "        circ_sema = self.circ_fuse(circ_sema) #834*300\n",
    "        # circ_sema = self.relu1(circ_sema)\n",
    "        circ_sema = self.norm2(circ_sema)\n",
    "        dise_sema = self.dise_fuse(dise_sema) #138*300\n",
    "        # dise_sema = self.relu1(dise_sema)\n",
    "        dise_sema = self.norm2(dise_sema)\n",
    "        return circ_sema,dise_sema\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class RepModel(nn.Module):\n",
    "    def __init__(self,device):\n",
    "        super().__init__()\n",
    "        self.longDistance= TopoRep()\n",
    "\n",
    "        self.meta_path = MetaPath(device)\n",
    "        self.hyper_model = HyperGraphModel()\n",
    "        \n",
    "        self.par = nn.Parameter(torch.diag(torch.ones(972)))\n",
    "        self.align = nn.Linear(1527,300)\n",
    "        self.device = device\n",
    "        self.tan_h = nn.Tanh()\n",
    "        self.relu = nn.LeakyReLU()\n",
    "    def forward(self,adj_norm,node_fea,rowD,meta_paths):\n",
    "        #超图得到的特征\n",
    "        hyper_emb = self.hyper_model(adj_norm,node_fea,rowD.reshape(1527,-1)) #1527*300\n",
    "        hyper_emb = hyper_emb[:972,:]\n",
    "\n",
    "        #拓扑特征\n",
    "        topo_emb = self.longDistance(adj_norm) #1527*300\n",
    "        topo_emb = topo_emb[:972,:]\n",
    "        #元路径得到的特征\n",
    "        circ_emb,d_emb = self.meta_path(meta_paths,node_fea)  \n",
    "        meta_emb = torch.cat([circ_emb,d_emb],dim=0) #972*300\n",
    "        meta_emb = self.tan_h(meta_emb)\n",
    "        meta_emb = self.relu(meta_emb)\n",
    "\n",
    "        node_rep = node_fea[:972,:]\n",
    "        return topo_emb,meta_emb,hyper_emb,node_rep"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class GateAdjust(nn.Module):\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        self.proj_s = nn.Linear(900,300)\n",
    "        self.relu = nn.LeakyReLU()\n",
    "        \n",
    "        self.topo_w = nn.Parameter(torch.ones(972,300))\n",
    "        self.hyper_w = nn.Parameter(torch.ones(972,300))\n",
    "        self.meta_w = nn.Parameter(torch.ones(972,300))\n",
    "\n",
    "        self.tan_h = nn.Tanh()\n",
    "        nn.init.xavier_normal_(self.topo_w,gain=nn.init.calculate_gain('relu'))\n",
    "        nn.init.xavier_normal_(self.hyper_w,gain=nn.init.calculate_gain('relu'))\n",
    "        nn.init.xavier_normal_(self.meta_w,gain=nn.init.calculate_gain('relu'))\n",
    "\n",
    "    def forward(self,semantic,topo,hyper_semantic):\n",
    "        semantic = self.relu(self.tan_h(self.meta_w*semantic))\n",
    "        topo = self.relu(self.tan_h(self.topo_w*topo))\n",
    "        hyper_semantic = self.relu(self.tan_h(self.hyper_w*hyper_semantic))\n",
    "        \n",
    "        fused = torch.cat([topo,hyper_semantic,semantic],dim=1)\n",
    "        fused = self.proj_s(fused)\n",
    "        fused = self.relu(fused)\n",
    "        return fused  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class my_model(nn.Module):\n",
    "    def __init__(self,device):\n",
    "        super().__init__()\n",
    "\n",
    "        self.mp_model = RepModel(device)\n",
    "        self.gate_adjust = GateAdjust()\n",
    "\n",
    "        self.c1 = nn.Conv2d(in_channels=1,out_channels=32,kernel_size=(2,13),stride=1,padding=0) #8*32*1*3392\n",
    "        self.p1 = nn.MaxPool2d(kernel_size=(1,5)) #363\n",
    "        \n",
    "        self.c2 = nn.Conv2d(in_channels=32,out_channels=64,kernel_size=(1,4),stride=1,padding=0) #8*64*1*840\n",
    "        self.p2 = nn.MaxPool2d(kernel_size=(1,10)) #36\n",
    "        \n",
    "        self.l1 = nn.Linear(64*36,1200)\n",
    "        self.d1 = nn.Dropout(0.5)\n",
    "        self.l2 = nn.Linear(1200,700)\n",
    "        self.l3=nn.Linear(700,200)\n",
    "        self.l4=nn.Linear(200,2)\n",
    "        self.LR = nn.LeakyReLU()\n",
    "        \n",
    "        self.regularizer = nn.MSELoss()\n",
    "\n",
    "        nn.init.xavier_normal_(self.c1.weight,gain=nn.init.calculate_gain('relu'))\n",
    "        nn.init.xavier_normal_(self.c2.weight,gain=nn.init.calculate_gain('relu'))\n",
    "        nn.init.xavier_normal_(self.l1.weight,gain=nn.init.calculate_gain('relu'))\n",
    "        nn.init.xavier_normal_(self.l2.weight,gain=nn.init.calculate_gain('relu'))\n",
    "        nn.init.xavier_normal_(self.l3.weight,gain=nn.init.calculate_gain('relu'))\n",
    "        nn.init.xavier_normal_(self.l4.weight)\n",
    "    def forward(self,adj_norm,node_fea,x,y,rowD,meta_paths):\n",
    "        topo_emb,semantic_emb,hyper_emb,node_rep = self.mp_model(adj_norm,node_fea,rowD,meta_paths) #1527\n",
    "        fuse_fea = self.gate_adjust(semantic_emb,topo_emb,hyper_emb) #300\n",
    "\n",
    "        represent = torch.cat([node_rep,fuse_fea],dim=1)\n",
    "\n",
    "        x2 = y+834\n",
    "        circ_fea = represent[x][:,None,None,:]\n",
    "        dise_fea = represent[x2][:,None,None,:]\n",
    "\n",
    "        fea = torch.cat([circ_fea,dise_fea],dim=2)\n",
    "        fea = fea.to(torch.float32)\n",
    "        x = self.c1(fea) #16*32*1*3404\n",
    "        x = self.LR(x) \n",
    "        x = self.p1(x)\n",
    "\n",
    "        x = self.c2(x) \n",
    "        x = self.LR(x)\n",
    "        x = self.p2(x) \n",
    "\n",
    "\n",
    "        x = x.reshape(x.shape[0],-1) \n",
    "        x = self.l1(x)\n",
    "        x = self.LR(x)   \n",
    "        x = self.d1(x)\n",
    "        \n",
    "        x = self.l2(x) \n",
    "        x = self.LR(x)\n",
    "        x = self.d1(x)\n",
    "        \n",
    "        x = self.l3(x) \n",
    "        x = self.LR(x)\n",
    "        x = self.d1(x)\n",
    "\n",
    "        x = self.l4(x) \n",
    "        \n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train(model,adj_norm,node_fea,train_dataset,test_dataset,fold_num,epoch,device,degree,meta_paths):\n",
    "    cros_loss = nn.CrossEntropyLoss().to(device)\n",
    "    optimizer = torch.optim.Adam(model.parameters(),lr=0.001)\n",
    "    early_stopping= EarlyStopping(patience=6, verbose= True, save_path=rf'output\\pt\\dict{fold_num}.pth')\n",
    "    for e in range(epoch):\n",
    "        model.train()\n",
    "        correct = 0\n",
    "        LOSS = 0\n",
    "        for x,y,label in train_dataset:\n",
    "            model.train()\n",
    "            x = x.to(torch.long).to(device)\n",
    "            y = y.to(torch.long).to(device)\n",
    "            label = label.to(torch.long).to(device)\n",
    "            output = model(adj_norm,node_fea,x,y,degree,meta_paths)\n",
    "            loss = cros_loss(output,label)\n",
    "            LOSS += loss.item()\n",
    "\n",
    "            optimizer.zero_grad()\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "\n",
    "            max_index = torch.argmax(output,dim=1)\n",
    "            eq_e = max_index==label\n",
    "            eq_num = eq_e.sum()\n",
    "            correct+=eq_num\n",
    "        early_stopping(LOSS, model)\n",
    "        correct_percent = correct/len(train_dataset.dataset)\n",
    "        print(f'第{e}个eopch的正确率为{correct_percent}')\n",
    "        if early_stopping.early_stop:\n",
    "            print(f'early_stopping!')\n",
    "            break\n",
    "        if e + 1 == epoch:\n",
    "            torch.save(model.state_dict(), rf'output\\dict{fold_num}.pth')\n",
    "    torch.cuda.empty_cache()\n",
    "    # torch.save(model.state_dict(),RF'F:\\origin_model_5k\\cnn_model\\epoch20\\cnn_model{cros}.pth')\n",
    "    test_correct = 0\n",
    "    output_all = torch.tensor([]).to(device)\n",
    "    label_all = torch.tensor([]).to(device)\n",
    "    model.load_state_dict(torch.load(rf'output\\dict{fold_num}.pth'))\n",
    "    model.eval()\n",
    "    with torch.no_grad():\n",
    "        for x,y,label in test_dataset:\n",
    "            x = x.to(torch.long).to(device)\n",
    "            y = y.to(torch.long).to(device)\n",
    "            label = label.to(torch.long).to(device)\n",
    "            t_output = model(adj_norm,node_fea,x,y,degree,meta_paths)#8*2\n",
    "            output_all = torch.cat([output_all,t_output],dim=0) #cat每一个batch的output\n",
    "            label_all = torch.cat([label_all,label],dim=0)\n",
    "\n",
    "                # 输出准确率用\n",
    "            max_index = torch.argmax(t_output,dim=1)\n",
    "            eq_e = max_index==label\n",
    "                \n",
    "            eq_num = eq_e.sum()\n",
    "            test_correct+=eq_num\n",
    "        correct_percent = test_correct/len(test_dataset.dataset)\n",
    "        torch.save(output_all,RF'output\\output{fold_num}')\n",
    "        torch.save(label_all,RF'output\\label{fold_num}')\n",
    "        # print(f'测试集的正确率为:{correct_percent}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class MyDataset(Dataset):\n",
    "    def __init__(self,t_dataset,c_d) :\n",
    "        super().__init__()\n",
    "        self.t_dataset = t_dataset\n",
    "        self.c_d = c_d\n",
    "    def __getitem__(self, index) :\n",
    "        x,y = self.t_dataset[:,index]\n",
    "        label = self.c_d[x][y]\n",
    "        return x,y,label\n",
    "    def __len__(self):\n",
    "        return self.t_dataset.shape[1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "c_d = np.load(rf'data\\circRNA_disease.npy')\n",
    "c_d = torch.tensor(c_d).to(torch.long)\n",
    "train_data = torch.load(rf'data\\train_dataset.pth')\n",
    "test_data = torch.load(rf'data\\test_data.pth')\n",
    "adj_matrix_list = torch.load(rf'data\\cover_feature_matrix.pth')\n",
    "cc_list = torch.load(rf'data\\meta_path\\cc.pth')\n",
    "ccc_list = torch.load(rf'data\\meta_path\\ccc.pth')\n",
    "cdc_list = torch.load(rf'data\\meta_path\\cdc.pth')\n",
    "dd_list = torch.load(rf'data\\meta_path\\dd.pth')\n",
    "dcd_list = torch.load(rf'data\\meta_path\\dcd.pth')\n",
    "ddd_list = torch.load(rf'data\\meta_path\\ddd.pth')\n",
    "torch.cuda.empty_cache()\n",
    "for i in range(5):\n",
    "    cc = cc_list[i].to(torch.float32).to(device)\n",
    "    ccc = ccc_list[i].to(torch.float32).to(device)\n",
    "    cdc = cdc_list[i].to(torch.float32).to(device)\n",
    "    dd = dd_list[i].to(torch.float32).to(device)\n",
    "    ddd = ddd_list[i].to(torch.float32).to(device)\n",
    "    dcd = dcd_list[i].to(torch.float32).to(device)\n",
    "    node_fea = adj_matrix_list[i].to(torch.float32).to(device)\n",
    "    meta_paths = {}\n",
    "    meta_paths['cc'] = cc\n",
    "    meta_paths['ccc'] = ccc\n",
    "    meta_paths['cdc'] = cdc\n",
    "    meta_paths['dd'] = dd\n",
    "    meta_paths['ddd'] = ddd\n",
    "    meta_paths['dcd'] = dcd\n",
    "    adj_matrix = adj_matrix_list[i]\n",
    "    adj_matrix = adj_matrix.to(torch.float32).to(device)\n",
    "    rowD = torch.sum(adj_matrix,dim=1)\n",
    "    colD = torch.sum(adj_matrix,dim=0)\n",
    "    avgD = (rowD+colD)/2.0 #1527\n",
    "    adj_norm = torch.sqrt(torch.inverse(torch.diag(avgD)))@adj_matrix@torch.sqrt(torch.inverse(torch.diag(avgD)))\n",
    "    adj_norm = adj_norm.to(torch.float32)\n",
    "\n",
    "    print(f'cross:{i}')\n",
    "    \n",
    "    model = my_model(device).to(device)\n",
    "    train_dataset = DataLoader(dataset=MyDataset(t_dataset=train_data[i].to(device),c_d=c_d),batch_size=16,shuffle=True)\n",
    "    test_dataset = DataLoader(dataset=MyDataset(t_dataset=test_data[i].to(device),c_d=c_d),batch_size=16,shuffle=False)\n",
    "    epoch = 70\n",
    "    train(model,adj_norm,adj_matrix,train_dataset,test_dataset,i,epoch,device,rowD,meta_paths)\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "python3.8version",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
