{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "from scipy import interp\n",
    "from sklearn import metrics\n",
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")\n",
    "\n",
    "import time\n",
    "import random\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import math\n",
    "import mxnet as mx\n",
    "from mxnet import ndarray as nd, gluon, autograd\n",
    "from mxnet.gluon import loss as gloss\n",
    "import dgl\n",
    "from sklearn.model_selection import KFold\n",
    "from sklearn import metrics\n",
    "\n",
    "from utils import build_graph, load_data \n",
    "from model import GNNMDA, GraphEncoder, BilinearDecoder\n",
    "\n",
    "from sklearn.metrics import confusion_matrix\n",
    "from sklearn.metrics import roc_auc_score, auc\n",
    "from sklearn.metrics import precision_recall_fscore_support\n",
    "from sklearn.metrics import precision_recall_curve\n",
    "from sklearn.metrics import classification_report\n",
    "from collections import Counter"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "def sample(directory, random_seed):\n",
    "    all_associations = pd.read_csv(directory + '/all_gpe_pairs.csv')\n",
    "    known_associations = all_associations.loc[all_associations['label'] == 1]\n",
    "    unknown_associations = all_associations.loc[all_associations['label'] == 0]\n",
    "    random_negative = unknown_associations.sample(n=known_associations.shape[0], random_state=random_seed, axis=0)\n",
    "\n",
    "    sample_df = known_associations.append(random_negative)\n",
    "    sample_df.reset_index(drop=True, inplace=True)\n",
    "\n",
    "    return sample_df,sample_df.values"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def generate_task_Tg_Tpe_train_test_idx(item, ids, dtp):\n",
    "    \n",
    "    test_num = int(len(ids) / 5)\n",
    "    \n",
    "    train_index_all, test_index_all = [], []\n",
    "    train_id_all, test_id_all = [], []\n",
    "    \n",
    "    for fold in range(5):\n",
    "        print('-------Fold ', fold)\n",
    "        if fold != 4:\n",
    "            test_ids = ids[fold * test_num : (fold + 1) * test_num]\n",
    "        else:\n",
    "            test_ids = ids[fold * test_num :]\n",
    "\n",
    "        train_ids = list(set(ids) ^ set(test_ids))\n",
    "        print('# {}: Train = {} | Test = {}'.format(item, len(train_ids), len(test_ids)))\n",
    "\n",
    "        test_idx = dtp[dtp[item].isin(test_ids)].index.tolist()\n",
    "        train_idx = dtp[dtp[item].isin(train_ids)].index.tolist()\n",
    "        random.shuffle(test_idx)\n",
    "        random.shuffle(train_idx)\n",
    "        print('# Pairs: Train = {} | Test = {}'.format(len(train_idx), len(test_idx)))\n",
    "        assert len(train_idx) + len(test_idx) == len(dtp)\n",
    "\n",
    "        train_index_all.append(train_idx) \n",
    "        test_index_all.append(test_idx)\n",
    "        \n",
    "        train_id_all.append(train_ids)\n",
    "        test_id_all.append(test_ids)\n",
    "        \n",
    "    return train_index_all, test_index_all, train_id_all, test_id_all"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def Train(directory, epochs, aggregator, embedding_size, layers, dropout, slope, lr, wd, random_seed, ctx, isbalance, task):\n",
    "    #dgl.load_backend('mxnet')\n",
    "    random.seed(random_seed)\n",
    "    np.random.seed(random_seed)\n",
    "    mx.random.seed(random_seed)\n",
    "\n",
    "    g, peco_ids_invmap, gene_ids_invmap = build_graph(directory, random_seed, ctx)\n",
    "    print(g)\n",
    "    if isbalance:\n",
    "        dtp, samples = sample(directory, random_seed)\n",
    "\n",
    "        samples = dtp.values\n",
    "    print(samples.shape)\n",
    "    IPE, IG = load_data(directory)\n",
    "    \n",
    "    print('## vertices:', g.number_of_nodes())\n",
    "    print('## edges:', g.number_of_edges())\n",
    "    print('## peco nodes:', nd.sum(g.ndata['type'] == 1).asnumpy())\n",
    "    print('## gene nodes:', nd.sum(g.ndata['type'] == 0).asnumpy())\n",
    "\n",
    "    samples_df = pd.DataFrame(samples)\n",
    "    sample_peco_vertices = [peco_ids_invmap[id_] for id_ in samples[:, 1]]\n",
    "    sample_gene_vertices = [gene_ids_invmap[id_] + IG.shape[0] for id_ in samples[:, 0]]\n",
    "    \n",
    "    if task == 'Tp':\n",
    "        kf = KFold(n_splits=5, shuffle=True, random_state=random_seed)\n",
    "        train_index = []\n",
    "        test_index = []\n",
    "        for train_idx, test_idx in kf.split(samples[:, 2]):\n",
    "            train_index.append(train_idx)\n",
    "            test_index.append(test_idx)\n",
    "    else:\n",
    "        gene_ids = list(set(dtp['gene_idx']))\n",
    "        peco_ids = list(set(dtp['peco__idx']))\n",
    "        random.shuffle(gene_ids)\n",
    "        random.shuffle(peco_ids)\n",
    "        print('# gene = {} | peco = {}'.format(len(gene_ids), len(peco_ids)))\n",
    "\n",
    "        gene_test_num = int(len(gene_ids) / 5)\n",
    "        peco_test_num = int(len(peco_ids) / 5)\n",
    "        print('# Test: gene = {} | peco = {}'.format(gene_test_num, peco_test_num))\n",
    "        \n",
    "        if task == 'Tpe':\n",
    "            item = 'peco_idx'\n",
    "            ids = peco_ids\n",
    "        elif task == 'Tg':\n",
    "            item = 'gene'\n",
    "            ids = gene_ids\n",
    "            \n",
    "        train_index, test_index = generate_task_Tg_Tpe_train_test_idx(item, ids, dtp)\n",
    "      \n",
    "    #####################################################################################        \n",
    "    auc_result = []\n",
    "    acc_result = []\n",
    "    pre_result = []\n",
    "    recall_result = []\n",
    "    f1_result = []\n",
    "\n",
    "    fprs = []\n",
    "    tprs = []\n",
    "\n",
    "    for i in range(len(train_index)):\n",
    "        print('------------------------------------------------------------------------------------------------------')\n",
    "        print('Training for Fold ', i + 1)\n",
    "\n",
    "        samples_df['train'] = 0\n",
    "        samples_df['test'] = 0\n",
    "\n",
    "        samples_df['train'].iloc[train_index[i]] = 1\n",
    "        samples_df['test'].iloc[test_index[i]] = 1\n",
    "\n",
    "        train_tensor = nd.from_numpy(samples_df['train'].values.astype('int32')).copyto(ctx)\n",
    "        test_tensor = nd.from_numpy(samples_df['test'].values.astype('int32')).copyto(ctx)\n",
    "\n",
    "        edge_data = {'train': train_tensor,\n",
    "                     'test': test_tensor}\n",
    "\n",
    "        g.edges[sample_peco_vertices, sample_gene_vertices].data.update(edge_data)\n",
    "        g.edges[sample_gene_vertices, sample_peco_vertices].data.update(edge_data)\n",
    "\n",
    "        train_eid = g.filter_edges(lambda edges: edges.data['train']).astype('int64')\n",
    "        g_train = g.edge_subgraph(train_eid, preserve_nodes=True)\n",
    "        g_train.copy_from_parent()\n",
    "\n",
    "        # get the training set\n",
    "        rating_train = g_train.edata['rating']\n",
    "        src_train, dst_train = g_train.all_edges()\n",
    "        # get the testing edge set\n",
    "        test_eid = g.filter_edges(lambda edges: edges.data['test']).astype('int64')\n",
    "        src_test, dst_test = g.find_edges(test_eid)\n",
    "        rating_test = g.edges[test_eid].data['rating']\n",
    "        src_train = src_train.copyto(ctx)\n",
    "        src_test = src_test.copyto(ctx)\n",
    "        dst_train = dst_train.copyto(ctx)\n",
    "        dst_test = dst_test.copyto(ctx)\n",
    "        print('## Training edges:', len(train_eid))\n",
    "        print('## Testing edges:', len(test_eid))\n",
    "\n",
    "        # Train the model\n",
    "        model = GNNMDA(GraphEncoder(embedding_size=embedding_size, n_layers=layers, G=g_train, aggregator=aggregator,\n",
    "                                    dropout=dropout, slope=slope, ctx=ctx),\n",
    "                       BilinearDecoder(feature_size=embedding_size))\n",
    "\n",
    "        model.collect_params().initialize(init=mx.init.Xavier(magnitude=math.sqrt(2.0)), ctx=ctx)\n",
    "        cross_entropy = gloss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=True)\n",
    "        trainer = gluon.Trainer(model.collect_params(), 'adam', {'learning_rate': lr, 'wd': wd})\n",
    "\n",
    "        for epoch in range(epochs):\n",
    "            start = time.time()\n",
    "            for _ in range(10):\n",
    "                with mx.autograd.record():\n",
    "                    score_train = model(g_train, src_train, dst_train)\n",
    "                    loss_train = cross_entropy(score_train, rating_train).mean()\n",
    "                    loss_train.backward()\n",
    "                trainer.step(1)\n",
    "                \n",
    "            results_train = [0 if j < 0.5 else 1 for j in np.squeeze(score_train.asnumpy())]\n",
    "            \n",
    "            h_val = model.encoder(g)\n",
    "            score_val = model.decoder(h_val[src_test], h_val[dst_test])\n",
    "            loss_val = cross_entropy(score_val, rating_test).mean()\n",
    "            results_val = [0 if j < 0.5 else 1 for j in np.squeeze(score_val.asnumpy())]\n",
    "\n",
    "            end = time.time()\n",
    "\n",
    "            print('Epoch:', epoch + 1, \n",
    "                  'Train Loss: %.4f' % loss_train.asscalar(),\n",
    "                  'Val Loss: %.4f' % loss_val.asscalar(),\n",
    "                  'Time: %.2f' % (end - start))    \n",
    "        \n",
    "            print('***************Train: ')\n",
    "            ys_train, performances_train = performances(rating_train.asnumpy(), \n",
    "                                            results_train, \n",
    "                                            score_train.asnumpy())\n",
    "            \n",
    "            print('***************Test: ')\n",
    "            ys_val, performances_val = performances(rating_test.asnumpy(), \n",
    "                                            results_val, \n",
    "                                            score_val.asnumpy())\n",
    "            \n",
    "\n",
    "\n",
    "        h_test = model.encoder(g)\n",
    "        score_test = model.decoder(h_test[src_test], h_test[dst_test])\n",
    "#         loss_test = cross_entropy(score_test, rating_test).mean()\n",
    "        results_test = [0 if j < 0.5 else 1 for j in np.squeeze(score_test.asnumpy())]\n",
    "\n",
    "        print('***************Fold:', i + 1)\n",
    "        ys_test, performances_test = performances(rating_test.asnumpy(), \n",
    "                                            results_test, \n",
    "                                            score_test.asnumpy())\n",
    "\n",
    "    print('## Training Finished !')\n",
    "    print('----------------------------------------------------------------------------------------------------------')\n",
    "\n",
    "    return ys_train, performances_train, ys_test, performances_test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "def performances(y_true, y_pred, y_prob):\n",
    "\n",
    "    tn, fp, fn, tp = confusion_matrix(y_true, y_pred, labels = [0, 1]).ravel().tolist()\n",
    "\n",
    "    accuracy = (tp+tn)/(tn+fp+fn+tp)\n",
    "    \n",
    "    if tp+fn != 0:\n",
    "        recall = tp / (tp+fn)\n",
    "    else:\n",
    "        print('tp + fn = 0')\n",
    "        recall = 0\n",
    "    \n",
    "    if tp+fp != 0:\n",
    "        precision = tp / (tp+fp)\n",
    "    else:\n",
    "        print('tp + fp = 0')\n",
    "        precision = 0\n",
    "    \n",
    "    if precision + recall != 0:\n",
    "        f1 = 2*precision*recall / (precision+recall)\n",
    "    else:\n",
    "        f1 = 0\n",
    "        \n",
    "    roc_auc = roc_auc_score(y_true, y_prob)\n",
    "    prec, reca, _ = precision_recall_curve(y_true, y_prob)\n",
    "    aupr = auc(reca, prec)\n",
    "    \n",
    "    print('tn = {}, fp = {}, fn = {}, tp = {}'.format(tn, fp, fn, tp))\n",
    "    print('y_pred: 0 = {} | 1 = {}'.format(Counter(y_pred)[0], Counter(y_pred)[1]))\n",
    "    print('y_true: 0 = {} | 1 = {}'.format(Counter(y_true)[0], Counter(y_true)[1]))\n",
    "    print('acc={:.4f}|precision={:.4f}|recall={:.4f}|f1={:.4f}|auc={:.4f}|aupr={:.4f}'.format(accuracy, precision, recall, f1, roc_auc, aupr))\n",
    "    return (y_true, y_pred, y_prob), (accuracy, precision, recall, f1, roc_auc, aupr)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "def run(isbalance, task):\n",
    "    ys_train, performances_train, ys_test, performances_test = Train(directory=r'../../data',\n",
    "                                                      epochs=100,\n",
    "                                                      aggregator='GraphSAGE',  \n",
    "                                                      embedding_size=256,\n",
    "                                                      layers=2,\n",
    "                                                      dropout=0.7,\n",
    "                                                      slope=0.2,  # LeakyReLU\n",
    "                                                      lr=0.001,\n",
    "                                                      wd=1e-3,\n",
    "                                                      random_seed=1234,\n",
    "                                                      ctx=mx.cpu(),\n",
    "                                                      isbalance = isbalance,\n",
    "                                                      task = task)\n",
    "    return ys_train, performances_train, ys_test, performances_test"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Run"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ys_train, performances_train, ys_test, performances_test = run(isbalance = True, task = 'Tg')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ys_train, performances_train, ys_test, performances_test = run(isbalance = True, task = 'Tpe')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ys_train, performances_train, ys_test, performances_test = run(isbalance = True, task = 'Tp')"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
