{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import numpy as np\n",
    "from sklearn import metrics\n",
    "import torch.nn as nn\n",
    "from scipy import sparse\n",
    "from torch.utils.data import DataLoader,Dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_data():\n",
    "    dise_sim = np.load('data\\disease_disease.npy')#138*138\n",
    "    c_d = np.load('data\\circRNA_disease.npy')#834*138\n",
    "    c_m = np.load('data\\circRNA_miRNA.npy')#834*555\n",
    "    m_d = np.load('data\\disease_miRNA.npy').T #555*138\n",
    "    return torch.tensor(dise_sim,dtype=float),torch.tensor(c_d,dtype=torch.long),torch.tensor(c_m,dtype=torch.long),torch.tensor(m_d,dtype=torch.long)\n",
    "\n",
    "import itertools\n",
    "def calculate_sim(cd,dd):\n",
    "    s1=cd.shape[0]\n",
    "    ll=torch.eye(s1)\n",
    "    m2=dd*cd[:,None,:]\n",
    "    m1=cd[:,:,None]\n",
    "    for x,y in itertools.permutations(torch.linspace(0,s1-1,s1,dtype=torch.long),2):\n",
    "        x,y=x.item(),y.item()\n",
    "        m=m1[x,:,:]*m2[y,:,:]\n",
    "        if cd[x].sum()+cd[y].sum()==0:\n",
    "            ll[x,y]=0\n",
    "        else:\n",
    "            ll[x,y]=(m.max(dim=0,keepdim=True)[0].sum()+m.max(dim=1,keepdim=True)[0].sum())/(cd[x].sum()+cd[y].sum())\n",
    "    return ll"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#n就是n折交叉\n",
    "def split_dataset(n,c_d,dise_sim):\n",
    "    #正样本，找出所有坐标\n",
    "    cd_axix = torch.where(c_d==1) #989   表示989对关联\n",
    "    circ_x = cd_axix[0] #989\n",
    "    dise_y = cd_axix[1] #989\n",
    "    #有顺序的坐标\n",
    "    positive_sample = torch.cat([circ_x[:,None],dise_y[:,None]],dim=1) #989*2\n",
    "    rand_index1 = torch.randperm(c_d.sum()) #0~988\n",
    "    #打乱顺序\n",
    "    ps = torch.index_select(positive_sample,dim=0,index=rand_index1) #989*2，在上面的positive_sample中按列随机选中\n",
    "    \n",
    "    #负样本\n",
    "    cd_axix_neg = torch.where(c_d==0) \n",
    "    circ_x_neg = cd_axix_neg[0]  #114103\n",
    "    dise_y_neg = cd_axix_neg[1]  #114103\n",
    "    #有顺序的坐标\n",
    "    negative_sample = torch.cat([circ_x_neg[:,None],dise_y_neg[:,None]],dim=1) #114103*2\n",
    "    rand_index2 = torch.randperm(negative_sample.shape[0])\n",
    "    ns = torch.index_select(input=negative_sample,dim=0,index=rand_index2) #114103*2    \n",
    "    #开始5折交叉验证\n",
    "    p_len = ps.shape[0]\n",
    "    one_sample_len = int(p_len/n) #197\n",
    "    train_dataset,test_dataset,cd_cover,circ_sims =[],[],[],[]\n",
    "    #5倍交叉，可以理解为5次不同的训练集和测试集，分为4个训练集一个测试集，989*0.8=788,\n",
    "    #如果有1k个，第一次就选0-800，第二次200-1000？\n",
    "    for i in range(n):\n",
    "        #训练集 每轮取四fold\n",
    "        train_p = torch.cat([ps[:i*one_sample_len,:],ps[(i+1)*one_sample_len:n*one_sample_len,:]],dim=0).T #788\n",
    "        train_n = torch.cat([ns[:i*one_sample_len,:],ns[(i+1)*one_sample_len:n*one_sample_len,:]],dim=0).T #788\n",
    "        train_data = torch.cat([train_p,train_n],dim=1)# 2*1576\n",
    "        train_dataset.append(train_data)\n",
    "        #测试集 取剩下的一fold\n",
    "        test_p = torch.cat((ps[i*one_sample_len:(i+1)*one_sample_len,:],ps[n*one_sample_len:,:]),dim=0).T #2*197+4\n",
    "        test_n = torch.cat((ns[i*one_sample_len:(i+1)*one_sample_len,:],ns[n*one_sample_len:,:]),dim=0).T #2*(197+113118)\n",
    "\n",
    "        test_data = torch.cat([test_p,test_n],dim=1)\n",
    "        test_dataset.append(test_data)\n",
    "        #进行遮盖\n",
    "        cover_one = c_d.clone()\n",
    "        row_one = test_p[0,:]\n",
    "        row_two = test_p[1,:]\n",
    "        cover_one[row_one,row_two]=0 #将测试集的正例置0 \n",
    "        print('坐标')\n",
    "        print(test_p[:,0])\n",
    "        print('遮盖，这里应该为false')\n",
    "        print(cover_one[test_p[0,0],test_p[1,0]])\n",
    "        print(c_d[test_p[0,0],test_p[1,0]])\n",
    "        cd_cover.append(cover_one)\n",
    "        \n",
    "        c_sim = calculate_sim(cover_one,dise_sim)\n",
    "        circ_sims.append(c_sim)\n",
    "        \n",
    "    return train_dataset,test_dataset,cd_cover,circ_sims\n",
    "dise_sim,c_d,c_m,m_d = load_data()\n",
    "train_dataset,test_dataset,cd_cover,circ_sims = split_dataset(n=5,c_d=c_d,dise_sim=dise_sim)\n",
    "#构造大的cover矩阵\n",
    "mi_sim = calculate_sim(m_d,dise_sim)\n",
    "\n",
    "circ_sim =circ_sims\n",
    "print(circ_sim[0].shape)\n",
    "print(circ_sim[0])\n",
    "def make_cover_matrix(n,circ_sim,cd_cover,c_m,dise_sim,m_d,mi_sim):\n",
    "    #获取数据\n",
    "\n",
    "    #五倍交叉，构造五次\n",
    "    cover_feature_matrix = []\n",
    "    for i in range(n):\n",
    "        circ_row = torch.cat([circ_sim[i],cd_cover[i],c_m],dim=1)\n",
    "        dise_row = torch.cat([cd_cover[i].T,dise_sim,m_d.T],dim=1)\n",
    "        mi_row = torch.cat([c_m.T,m_d,mi_sim],dim=1)\n",
    "        cover_matrix = torch.cat([circ_row,dise_row,mi_row],dim=0)\n",
    "        cover_feature_matrix.append(cover_matrix)\n",
    "    return cover_feature_matrix\n",
    "\n",
    "cover_feature_matrix = make_cover_matrix(5,circ_sim,cd_cover,c_m,dise_sim,m_d,mi_sim)\n",
    "torch.save(c_d,R'data\\circ_disease.pth')\n",
    "torch.save(cd_cover,R'data\\cd_cover.pth')\n",
    "# print(cover_feature_matrix[0].shape)\n",
    "# print(cover_feature_matrix[1].shape)\n",
    "# print(cover_feature_matrix[2].shape)\n",
    "# print(cover_feature_matrix[3].shape)\n",
    "# print(cover_feature_matrix[4].shape)\n",
    "torch.save(train_dataset,R'data\\train_dataset.pth')\n",
    "torch.save(test_dataset,R'data\\test_data.pth')\n",
    "torch.save(cover_feature_matrix,R'data\\cover_feature_matrix.pth')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "adj_matrix_list = torch.load(rf'data\\cover_feature_matrix.pth')\n",
    "cd,ccc,cdc,dcd,ddd,cc,dd = [],[],[],[],[],[],[]\n",
    "for i in range(5):\n",
    "    adj_matrix = adj_matrix_list[i]\n",
    "    circ_sim = adj_matrix[0:834,0:834]\n",
    "    c_d = adj_matrix[0:834,834:834+138]\n",
    "\n",
    "    di_sim = adj_matrix[834:834+138,834:834+138]\n",
    "    \n",
    "    c_c_c = torch.matmul(circ_sim,circ_sim)\n",
    "    c_d_c = torch.matmul(c_d,c_d.T)\n",
    "    \n",
    "    d_c_d = torch.matmul(c_d.T,c_d)\n",
    "    d_d_d = torch.matmul(di_sim,di_sim)\n",
    "    \n",
    "    # meta_c = torch.cat([c_d,c_c_c,c_d_c],dim=1)\n",
    "    # meta_d = torch.cat([c_d.T,d_d_d,d_c_d],dim=1)\n",
    "\n",
    "    cd.append(c_d)\n",
    "    ccc.append(c_c_c)\n",
    "    cdc.append(c_d_c)\n",
    "    dcd.append(d_c_d)\n",
    "    ddd.append(d_d_d)\n",
    "    cc.append(circ_sim)\n",
    "    dd.append(di_sim)\n",
    "torch.save(cd,rf'data\\meta_path\\cd.pth')\n",
    "torch.save(cc,rf'data\\meta_path\\cc.pth')\n",
    "torch.save(dd,rf'data\\meta_path\\dd.pth')\n",
    "torch.save(ccc,rf'data\\meta_path\\ccc.pth')\n",
    "torch.save(cdc,rf'data\\meta_path\\cdc.pth')\n",
    "torch.save(dcd,rf'data\\meta_path\\dcd.pth')\n",
    "torch.save(ddd,rf'data\\meta_path\\ddd.pth')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "python3.7version",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
