{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import math\n",
    "from sklearn.model_selection import KFold\n",
    "from sklearn import metrics\n",
    "from sklearn.metrics import precision_recall_curve\n",
    "from sklearn import preprocessing\n",
    "\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "\n",
    "import time\n",
    "import random\n",
    "import matplotlib.pyplot as plt\n",
    "from scipy import interp\n",
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")\n",
    "\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "from collections import Counter\n",
    "from tqdm import tqdm"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.metrics import confusion_matrix\n",
    "from sklearn.metrics import roc_auc_score, auc\n",
    "from sklearn.metrics import precision_recall_fscore_support\n",
    "from sklearn.metrics import precision_recall_curve\n",
    "from sklearn.metrics import classification_report\n",
    "from sklearn.metrics import roc_curve\n",
    "from sklearn.metrics import precision_recall_curve"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_data(directory):\n",
    "    GSSM = np.loadtxt(directory + '\\GSSM.txt',dtype=np.float32)\n",
    "    PESSM = np.loadtxt(directory + '\\PSSM.txt',dtype=np.float32,delimiter='\\t')\n",
    "\n",
    "    IPE = pd.DataFrame(PESSM).reset_index()\n",
    "    IG = pd.DataFrame(GSSM).reset_index()\n",
    "    IPE.rename(columns = {'index':'id'}, inplace = True)\n",
    "    IG.rename(columns = {'index':'id'}, inplace = True)\n",
    "    IPE['id'] = IPE['id']\n",
    "    IG['id'] = IG['id']\n",
    "    \n",
    "    return IPE, IG\n",
    "\n",
    "def sample(directory, random_seed):\n",
    "    all_associations = pd.read_csv(directory + '/all_gpe_pairs.csv')\n",
    "    known_associations = all_associations.loc[all_associations['label'] == 1]\n",
    "    peco_ids = list(set(known_associations['peco_idx']))\n",
    "    unknown_associations = all_associations.loc[all_associations['label'] == 0]\n",
    "    sample_df = known_associations\n",
    "    for peco_id in peco_ids:\n",
    "        random_negative = unknown_associations.loc[all_associations['peco_idx'] == peco_id].sample(n=known_associations.loc[all_associations['peco_idx'] == peco_id].shape[0], random_state=random_seed, axis=0, replace=True)\n",
    "\n",
    "        sample_df = pd.concat([sample_df,random_negative], axis=0)\n",
    "\n",
    "    sample_df.reset_index(drop=True, inplace=True)\n",
    "\n",
    "    return sample_df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def performances(y_true, y_pred, y_prob):\n",
    "\n",
    "    tn, fp, fn, tp = confusion_matrix(y_true, y_pred, labels = [0, 1]).ravel().tolist()\n",
    "\n",
    "    pos_acc = tp / sum(y_true)\n",
    "    neg_acc = tn / (len(y_pred) - sum(y_pred)) # [y_true=0 & y_pred=0] / y_pred=0\n",
    "    accuracy = (tp+tn)/(tn+fp+fn+tp)\n",
    "    \n",
    "    recall = tp / (tp+fn)\n",
    "    precision = tp / (tp+fp)\n",
    "    f1 = 2*precision*recall / (precision+recall)\n",
    "    \n",
    "    roc_auc = roc_auc_score(y_true, y_prob)\n",
    "    prec, reca, _ = precision_recall_curve(y_true, y_prob)\n",
    "    aupr = auc(reca, prec)\n",
    "    \n",
    "    print('tn = {}, fp = {}, fn = {}, tp = {}'.format(tn, fp, fn, tp))\n",
    "    print('y_pred: 0 = {} | 1 = {}'.format(Counter(y_pred)[0], Counter(y_pred)[1]))\n",
    "    print('y_true: 0 = {} | 1 = {}'.format(Counter(y_true)[0], Counter(y_true)[1]))\n",
    "    print('acc={:.4f}|precision={:.4f}|recall={:.4f}|f1={:.4f}|auc={:.4f}|aupr={:.4f}|pos_acc={:.4f}|neg_acc={:.4f}'.format(accuracy, precision, recall, f1, roc_auc, aupr, pos_acc, neg_acc))\n",
    "    return (y_true, y_pred, y_prob), (accuracy, precision, recall, f1, roc_auc, aupr, pos_acc, neg_acc)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "def obtain_data(directory, isbalance):\n",
    "\n",
    "    IPE, IG = load_data(directory)\n",
    "\n",
    "    if isbalance:\n",
    "        dtp = sample(directory, random_seed = 1234)\n",
    "    else:\n",
    "        dtp = pd.read_csv(directory + '/all_gene_peco_pairs.csv')\n",
    "\n",
    "    gene_ids = list(set(dtp['gene_idx']))\n",
    "    peco_ids = list(set(dtp['peco_idx']))\n",
    "    random.shuffle(gene_ids)\n",
    "    random.shuffle(peco_ids)\n",
    "    print('# gene = {} | peco = {}'.format(len(gene_ids), len(peco_ids)))\n",
    "\n",
    "    gene_test_num = int(len(gene_ids) / 5)\n",
    "    peco_test_num = int(len(peco_ids) / 5)\n",
    "    print('# Test: gene = {} | peco = {}'.format(gene_test_num, peco_test_num))    \n",
    "    \n",
    "    samples = pd.merge(pd.merge(dtp, IPE, left_on = 'peco_idx', right_on = 'id'), IG, left_on = 'gene_idx', right_on = 'id')\n",
    "    samples.drop(labels = ['id_x', 'id_y'], axis = 1, inplace = True)\n",
    "    \n",
    "    return IPE, IG, dtp, gene_ids, peco_ids, gene_test_num, peco_test_num, samples"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "def generate_task_Tp_train_test_idx(samples):\n",
    "    kf = KFold(n_splits = 5, shuffle = True, random_state = 1234)\n",
    "\n",
    "    train_index_all, test_index_all, n = [], [], 0\n",
    "    train_id_all, test_id_all = [], []\n",
    "    fold = 0\n",
    "    for train_idx, test_idx in tqdm(kf.split(samples.iloc[:, 3:])):\n",
    "        print('-------Fold ', fold)\n",
    "        train_index_all.append(train_idx) \n",
    "        test_index_all.append(test_idx)\n",
    "\n",
    "        train_id_all.append(np.array(dtp.iloc[train_idx][['gene_idx', 'peco_idx']]))\n",
    "        test_id_all.append(np.array(dtp.iloc[test_idx][['gene_idx', 'peco_idx']]))\n",
    "\n",
    "        print('# Pairs: Train = {} | Test = {}'.format(len(train_idx), len(test_idx)))\n",
    "        fold += 1\n",
    "    return train_index_all, test_index_all, train_id_all, test_id_all"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def generate_task_Tg_Tpe_train_test_idx(item, ids, dtp):\n",
    "    \n",
    "    test_num = int(len(ids) / 5)\n",
    "    \n",
    "    train_index_all, test_index_all = [], []\n",
    "    train_id_all, test_id_all = [], []\n",
    "    \n",
    "    for fold in range(5):\n",
    "        print('-------Fold ', fold)\n",
    "        if fold != 4:\n",
    "            test_ids = ids[fold * test_num : (fold + 1) * test_num]\n",
    "        else:\n",
    "            test_ids = ids[fold * test_num :]\n",
    "\n",
    "        train_ids = list(set(ids) ^ set(test_ids))\n",
    "        print('# {}: Train = {} | Test = {}'.format(item, len(train_ids), len(test_ids)))\n",
    "\n",
    "        test_idx = dtp[dtp[item].isin(test_ids)].index.tolist()\n",
    "        train_idx = dtp[dtp[item].isin(train_ids)].index.tolist()\n",
    "        random.shuffle(test_idx)\n",
    "        random.shuffle(train_idx)\n",
    "        print('# Pairs: Train = {} | Test = {}'.format(len(train_idx), len(test_idx)))\n",
    "        assert len(train_idx) + len(test_idx) == len(dtp)\n",
    "\n",
    "        train_index_all.append(train_idx) \n",
    "        test_index_all.append(test_idx)\n",
    "        \n",
    "        train_id_all.append(train_ids)\n",
    "        test_id_all.append(test_ids)\n",
    "        \n",
    "    return train_index_all, test_index_all, train_id_all, test_id_all"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# RF"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "def run_rf(train_index_all, test_index_all, samples):\n",
    "    \n",
    "    fold = 0\n",
    "    for train_idx, test_idx in zip(train_index_all, test_index_all):\n",
    "        print('-----------------------Fold = ', str(fold))\n",
    "\n",
    "        X = samples.iloc[:, 3:]\n",
    "        y = samples['label']\n",
    "\n",
    "        scaler = preprocessing.MinMaxScaler().fit(X.iloc[train_idx,:])\n",
    "        X = scaler.transform(X)\n",
    "\n",
    "        x_train, y_train = X[train_idx], y[train_idx]\n",
    "        x_test, y_test = X[test_idx], y[test_idx]\n",
    "\n",
    "        clf = RandomForestClassifier(random_state = 19961231)\n",
    "        clf.fit(x_train, y_train)\n",
    "\n",
    "        y_train_prob = clf.predict_proba(x_train)\n",
    "        y_test_prob = clf.predict_proba(x_test)\n",
    "\n",
    "        y_train_pred = clf.predict(x_train)\n",
    "        y_test_pred = clf.predict(x_test)\n",
    "\n",
    "        print('Train:')\n",
    "        ys_train, metrics_train = performances(y_train, y_train_pred, y_train_prob[:, 1])\n",
    "        print('Test:')\n",
    "        ys_test, metrics_test = performances(y_test, y_test_pred, y_test_prob[:, 1])\n",
    "\n",
    "        fold += 1\n",
    "        \n",
    "    return ys_train, metrics_train, ys_test, metrics_test"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# DNN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "from tensorflow.keras.models import Sequential\n",
    "from tensorflow.keras.layers import *\n",
    "from tensorflow.keras.optimizers import *\n",
    "from tensorflow.keras.losses import binary_crossentropy\n",
    "from tensorflow.keras.metrics import *\n",
    "from tensorflow.keras import callbacks\n",
    "from tensorflow.keras.callbacks import EarlyStopping\n",
    "from sklearn.metrics import roc_auc_score, auc, precision_recall_curve, confusion_matrix\n",
    "import numpy as np\n",
    "import sklearn.metrics as metrics\n",
    "from collections import Counter\n",
    "from sklearn import preprocessing\n",
    "from tensorflow.keras import backend as K"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "def precision(y_true, y_pred):\n",
    "    # Calculates the precision\n",
    "    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n",
    "    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n",
    "    precision = true_positives / (predicted_positives + K.epsilon())\n",
    "    return precision\n",
    "\n",
    "\n",
    "def recall(y_true, y_pred):\n",
    "    # Calculates the recall\n",
    "    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n",
    "    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n",
    "    recall = true_positives / (possible_positives + K.epsilon())\n",
    "    return recall\n",
    "\n",
    "def fbeta_score(y_true, y_pred, beta=1):\n",
    "    # Calculates the F score, the weighted harmonic mean of precision and recall.\n",
    "    if beta < 0:\n",
    "        raise ValueError('The lowest choosable beta is zero (only precision).')\n",
    "        \n",
    "    if K.sum(K.round(K.clip(y_true, 0, 1))) == 0.0:\n",
    "        return 0.0\n",
    "\n",
    "    p = precision(y_true, y_pred)\n",
    "    r = recall(y_true, y_pred)\n",
    "    bb = beta ** 2\n",
    "    fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())\n",
    "    return fbeta_score\n",
    "\n",
    "def f1(y_true, y_pred):\n",
    "    return fbeta_score(y_true, y_pred, beta=1)\n",
    "\n",
    "def transfer(y_pred):\n",
    "    return [[0,1][x>0.5] for x in y_pred.reshape(-1)]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "def Model(x):\n",
    "    model = Sequential()\n",
    "    model.add(Dense(1024, activation='elu', input_shape=(x.shape[1],)))\n",
    "    model.add(Dense(512, activation='elu'))\n",
    "    model.add(Dense(256, activation='relu'))\n",
    "    model.add(Dense(64, activation='relu'))\n",
    "    model.add(Dense(1, activation='sigmoid'))\n",
    "    #optimizer = Adam(lr = 0.0001)\n",
    "    model.compile(optimizer = 'Adam', loss = binary_crossentropy, metrics=[binary_accuracy, f1, recall, precision])\n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "def run_dnn(train_index_all, test_index_all, samples):\n",
    "    \n",
    "    fold = 0\n",
    "    for train_idx, test_idx in zip(train_index_all, test_index_all):\n",
    "        print('----------------------- Fold = ', str(fold))\n",
    "        train_idx, test_idx = train_index_all[0], test_index_all[0]\n",
    "        X = samples.iloc[:, 3:]\n",
    "        y = samples['label']\n",
    "\n",
    "        scaler = preprocessing.MinMaxScaler().fit(X.iloc[train_idx,:])\n",
    "        X = scaler.transform(X)\n",
    "\n",
    "        x_train, y_train = X[train_idx], y[train_idx]\n",
    "        x_test, y_test = X[test_idx], y[test_idx]\n",
    "\n",
    "        model = Model(x_train)\n",
    "        early_stopping = EarlyStopping(monitor='val_loss', patience = 50)\n",
    "        model.fit(x_train, y_train, epochs = 500, batch_size = 250, validation_data=(x_test, y_test), callbacks=[early_stopping],verbose = 1)\n",
    "\n",
    "        y_train_pred, y_test_pred = model.predict(x_train, verbose = 0), model.predict(x_test, verbose = 0)\n",
    "        y_train_prob, y_test_prob = model.predict_proba(x_train), model.predict_proba(x_test)\n",
    "\n",
    "        if len(Counter(y_train_pred.reshape(-1))) > 2: \n",
    "            y_train_pred = transfer(y_train_pred)\n",
    "        else:\n",
    "            print(Counter(y_train_pred.reshape(-1)))\n",
    "        if len(Counter(y_test_pred.reshape(-1))) > 2: \n",
    "            y_test_pred = transfer(y_test_pred)\n",
    "        else:\n",
    "            print(Counter(y_test_pred.reshape(-1)))\n",
    "\n",
    "        performances_train = performances(y_train, y_train_pred, y_train_prob)\n",
    "        performances_test = performances(y_test, y_test_pred, y_test_prob)\n",
    "\n",
    "        fold += 1\n",
    "    \n",
    "    return y_train, y_test, y_train_pred, y_test_pred, y_train_prob, y_test_prob, performances_train, performances_test"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Run"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## RF"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "5it [00:00, 277.78it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "# gene = 11177 | peco = 24\n",
      "# Test: gene = 2235 | peco = 4\n",
      "========== isbalance = True | task = Tp\n",
      "-------Fold  0\n",
      "# Pairs: Train = 37692 | Test = 9424\n",
      "-------Fold  1\n",
      "# Pairs: Train = 37693 | Test = 9423\n",
      "-------Fold  2\n",
      "# Pairs: Train = 37693 | Test = 9423\n",
      "-------Fold  3\n",
      "# Pairs: Train = 37693 | Test = 9423\n",
      "-------Fold  4\n",
      "# Pairs: Train = 37693 | Test = 9423\n",
      "-----------------------Fold =  0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train:\n",
      "tn = 18871, fp = 0, fn = 0, tp = 18821\n",
      "y_pred: 0 = 18871 | 1 = 18821\n",
      "y_true: 0 = 18871 | 1 = 18821\n",
      "acc=1.0000|precision=1.0000|recall=1.0000|f1=1.0000|auc=1.0000|aupr=1.0000|pos_acc=1.0000|neg_acc=1.0000\n",
      "Test:\n",
      "tn = 3649, fp = 1038, fn = 973, tp = 3764\n",
      "y_pred: 0 = 4622 | 1 = 4802\n",
      "y_true: 0 = 4687 | 1 = 4737\n",
      "acc=0.7866|precision=0.7838|recall=0.7946|f1=0.7892|auc=0.9012|aupr=0.9047|pos_acc=0.7946|neg_acc=0.7895\n",
      "-----------------------Fold =  1\n",
      "Train:\n",
      "tn = 18800, fp = 0, fn = 0, tp = 18893\n",
      "y_pred: 0 = 18800 | 1 = 18893\n",
      "y_true: 0 = 18800 | 1 = 18893\n",
      "acc=1.0000|precision=1.0000|recall=1.0000|f1=1.0000|auc=1.0000|aupr=1.0000|pos_acc=1.0000|neg_acc=1.0000\n",
      "Test:\n",
      "tn = 3627, fp = 1131, fn = 943, tp = 3722\n",
      "y_pred: 0 = 4570 | 1 = 4853\n",
      "y_true: 0 = 4758 | 1 = 4665\n",
      "acc=0.7799|precision=0.7669|recall=0.7979|f1=0.7821|auc=0.8960|aupr=0.8978|pos_acc=0.7979|neg_acc=0.7937\n",
      "-----------------------Fold =  2\n",
      "Train:\n",
      "tn = 18884, fp = 0, fn = 0, tp = 18809\n",
      "y_pred: 0 = 18884 | 1 = 18809\n",
      "y_true: 0 = 18884 | 1 = 18809\n",
      "acc=1.0000|precision=1.0000|recall=1.0000|f1=1.0000|auc=1.0000|aupr=1.0000|pos_acc=1.0000|neg_acc=1.0000\n",
      "Test:\n",
      "tn = 3589, fp = 1085, fn = 924, tp = 3825\n",
      "y_pred: 0 = 4513 | 1 = 4910\n",
      "y_true: 0 = 4674 | 1 = 4749\n",
      "acc=0.7868|precision=0.7790|recall=0.8054|f1=0.7920|auc=0.8981|aupr=0.9030|pos_acc=0.8054|neg_acc=0.7953\n",
      "-----------------------Fold =  3\n",
      "Train:\n",
      "tn = 18878, fp = 0, fn = 0, tp = 18815\n",
      "y_pred: 0 = 18878 | 1 = 18815\n",
      "y_true: 0 = 18878 | 1 = 18815\n",
      "acc=1.0000|precision=1.0000|recall=1.0000|f1=1.0000|auc=1.0000|aupr=1.0000|pos_acc=1.0000|neg_acc=1.0000\n",
      "Test:\n",
      "tn = 3661, fp = 1019, fn = 915, tp = 3828\n",
      "y_pred: 0 = 4576 | 1 = 4847\n",
      "y_true: 0 = 4680 | 1 = 4743\n",
      "acc=0.7948|precision=0.7898|recall=0.8071|f1=0.7983|auc=0.9045|aupr=0.9062|pos_acc=0.8071|neg_acc=0.8000\n",
      "-----------------------Fold =  4\n",
      "Train:\n",
      "tn = 18799, fp = 0, fn = 0, tp = 18894\n",
      "y_pred: 0 = 18799 | 1 = 18894\n",
      "y_true: 0 = 18799 | 1 = 18894\n",
      "acc=1.0000|precision=1.0000|recall=1.0000|f1=1.0000|auc=1.0000|aupr=1.0000|pos_acc=1.0000|neg_acc=1.0000\n",
      "Test:\n",
      "tn = 3654, fp = 1105, fn = 950, tp = 3714\n",
      "y_pred: 0 = 4604 | 1 = 4819\n",
      "y_true: 0 = 4759 | 1 = 4664\n",
      "acc=0.7819|precision=0.7707|recall=0.7963|f1=0.7833|auc=0.8987|aupr=0.9000|pos_acc=0.7963|neg_acc=0.7937\n"
     ]
    }
   ],
   "source": [
    "directory = '../../data'\n",
    "for isbalance in [True]:\n",
    "    \n",
    "    IPE, IG, dtp, gene_ids, peco_ids, gene_test_num, peco_test_num, samples = obtain_data(directory, \n",
    "                                                                                                 isbalance)\n",
    "    for task in ['Tp', 'Tg', 'Tpe']:   \n",
    "        print('========== isbalance = {} | task = {}'.format(isbalance, task))\n",
    "        \n",
    "        if task == 'Tp':\n",
    "            train_index_all, test_index_all, train_id_all, test_id_all = generate_task_Tp_train_test_idx(samples)\n",
    "            \n",
    "        elif task == 'Tg':\n",
    "            item = 'gene_idx'\n",
    "            ids = gene_ids\n",
    "            train_index_all, test_index_all, train_id_all, test_id_all = generate_task_Tg_Tpe_train_test_idx(item, ids, dtp)\n",
    "\n",
    "        elif task == 'Tpe':\n",
    "            item = 'peco_idx'\n",
    "            ids = peco_ids\n",
    "            train_index_all, test_index_all, train_id_all, test_id_all = generate_task_Tg_Tpe_train_test_idx(item, ids, dtp)\n",
    "\n",
    "        ys_train, metrics_train, ys_test, metrics_test = run_rf(train_index_all, test_index_all, samples)\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## DNN "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "5it [00:00, 192.31it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "# gene = 11177 | peco = 24\n",
      "# Test: gene = 2235 | peco = 4\n",
      "========== isbalance = True | task = Tp\n",
      "-------Fold  0\n",
      "# Pairs: Train = 37692 | Test = 9424\n",
      "-------Fold  1\n",
      "# Pairs: Train = 37693 | Test = 9423\n",
      "-------Fold  2\n",
      "# Pairs: Train = 37693 | Test = 9423\n",
      "-------Fold  3\n",
      "# Pairs: Train = 37693 | Test = 9423\n",
      "-------Fold  4\n",
      "# Pairs: Train = 37693 | Test = 9423\n",
      "Epoch 1/500\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "151/151 [==============================] - 2s 12ms/step - loss: 0.6978 - binary_accuracy: 0.5086 - f1: 0.3737 - recall: 0.4573 - precision: 0.4631 - val_loss: 0.6942 - val_binary_accuracy: 0.5184 - val_f1: 0.5531 - val_recall: 0.7451 - val_precision: 0.4890\n",
      "Epoch 2/500\n",
      "151/151 [==============================] - 2s 12ms/step - loss: 0.6932 - binary_accuracy: 0.5160 - f1: 0.5023 - recall: 0.5542 - precision: 0.5236 - val_loss: 0.6929 - val_binary_accuracy: 0.5064 - val_f1: 0.4286 - val_recall: 0.3593 - val_precision: 0.5912\n",
      "Epoch 3/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6934 - binary_accuracy: 0.5149 - f1: 0.4678 - recall: 0.4850 - precision: 0.5215 - val_loss: 0.6929 - val_binary_accuracy: 0.5131 - val_f1: 0.2000 - val_recall: 0.1358 - val_precision: 0.5378\n",
      "Epoch 4/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6928 - binary_accuracy: 0.5173 - f1: 0.4159 - recall: 0.4287 - precision: 0.5195 - val_loss: 0.6927 - val_binary_accuracy: 0.5198 - val_f1: 0.3061 - val_recall: 0.2295 - val_precision: 0.5726\n",
      "Epoch 5/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6927 - binary_accuracy: 0.5144 - f1: 0.3916 - recall: 0.3959 - precision: 0.5165 - val_loss: 0.6970 - val_binary_accuracy: 0.5128 - val_f1: 0.5909 - val_recall: 0.8273 - val_precision: 0.5078\n",
      "Epoch 6/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6925 - binary_accuracy: 0.5200 - f1: 0.4775 - recall: 0.4816 - precision: 0.5378 - val_loss: 0.6923 - val_binary_accuracy: 0.5134 - val_f1: 0.2306 - val_recall: 0.1649 - val_precision: 0.5233\n",
      "Epoch 7/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6918 - binary_accuracy: 0.5187 - f1: 0.4224 - recall: 0.4054 - precision: 0.5328 - val_loss: 0.6920 - val_binary_accuracy: 0.5125 - val_f1: 0.3926 - val_recall: 0.3429 - val_precision: 0.5541\n",
      "Epoch 8/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6921 - binary_accuracy: 0.5169 - f1: 0.4272 - recall: 0.4226 - precision: 0.5257 - val_loss: 0.6910 - val_binary_accuracy: 0.5319 - val_f1: 0.4150 - val_recall: 0.3936 - val_precision: 0.5251\n",
      "Epoch 9/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6913 - binary_accuracy: 0.5240 - f1: 0.4427 - recall: 0.4108 - precision: 0.5419 - val_loss: 0.6925 - val_binary_accuracy: 0.5208 - val_f1: 0.4141 - val_recall: 0.4085 - val_precision: 0.5015\n",
      "Epoch 10/500\n",
      "151/151 [==============================] - 2s 10ms/step - loss: 0.6917 - binary_accuracy: 0.5233 - f1: 0.4941 - recall: 0.5208 - precision: 0.5346 - val_loss: 0.6909 - val_binary_accuracy: 0.5311 - val_f1: 0.4090 - val_recall: 0.3841 - val_precision: 0.5280\n",
      "Epoch 11/500\n",
      "151/151 [==============================] - 2s 10ms/step - loss: 0.6920 - binary_accuracy: 0.5174 - f1: 0.4639 - recall: 0.4883 - precision: 0.5305 - val_loss: 0.6909 - val_binary_accuracy: 0.5264 - val_f1: 0.4767 - val_recall: 0.5314 - val_precision: 0.4948\n",
      "Epoch 12/500\n",
      "151/151 [==============================] - 2s 10ms/step - loss: 0.6919 - binary_accuracy: 0.5181 - f1: 0.4761 - recall: 0.4808 - precision: 0.5306 - val_loss: 0.6940 - val_binary_accuracy: 0.5034 - val_f1: 0.0636 - val_recall: 0.0345 - val_precision: 0.6349\n",
      "Epoch 13/500\n",
      "151/151 [==============================] - 2s 10ms/step - loss: 0.6920 - binary_accuracy: 0.5192 - f1: 0.3711 - recall: 0.3304 - precision: 0.5522 - val_loss: 0.6904 - val_binary_accuracy: 0.5219 - val_f1: 0.3845 - val_recall: 0.3520 - val_precision: 0.5080\n",
      "Epoch 14/500\n",
      "151/151 [==============================] - 2s 10ms/step - loss: 0.6909 - binary_accuracy: 0.5278 - f1: 0.4350 - recall: 0.3811 - precision: 0.5418 - val_loss: 0.6916 - val_binary_accuracy: 0.5135 - val_f1: 0.5467 - val_recall: 0.6359 - val_precision: 0.5290\n",
      "Epoch 15/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6910 - binary_accuracy: 0.5296 - f1: 0.4271 - recall: 0.3832 - precision: 0.5388 - val_loss: 0.6908 - val_binary_accuracy: 0.5307 - val_f1: 0.5224 - val_recall: 0.6353 - val_precision: 0.5002\n",
      "Epoch 16/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6904 - binary_accuracy: 0.5313 - f1: 0.4506 - recall: 0.4239 - precision: 0.5461 - val_loss: 0.6906 - val_binary_accuracy: 0.5281 - val_f1: 0.4206 - val_recall: 0.4046 - val_precision: 0.5279\n",
      "Epoch 17/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6905 - binary_accuracy: 0.5301 - f1: 0.4897 - recall: 0.5060 - precision: 0.5420 - val_loss: 0.6907 - val_binary_accuracy: 0.5272 - val_f1: 0.3848 - val_recall: 0.3500 - val_precision: 0.5289\n",
      "Epoch 18/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6902 - binary_accuracy: 0.5306 - f1: 0.4572 - recall: 0.4150 - precision: 0.5455 - val_loss: 0.6899 - val_binary_accuracy: 0.5236 - val_f1: 0.3943 - val_recall: 0.3532 - val_precision: 0.5290\n",
      "Epoch 19/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6895 - binary_accuracy: 0.5295 - f1: 0.4406 - recall: 0.3986 - precision: 0.5508 - val_loss: 0.6916 - val_binary_accuracy: 0.5125 - val_f1: 0.3855 - val_recall: 0.3274 - val_precision: 0.5591ry_accuracy: 0.5296 - f1: 0.4407 - recall: 0.3987 - precision: 0.551\n",
      "Epoch 20/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6897 - binary_accuracy: 0.5257 - f1: 0.4451 - recall: 0.4205 - precision: 0.5408 - val_loss: 0.6886 - val_binary_accuracy: 0.5233 - val_f1: 0.4434 - val_recall: 0.4503 - val_precision: 0.5062\n",
      "Epoch 21/500\n",
      "151/151 [==============================] - 2s 12ms/step - loss: 0.6883 - binary_accuracy: 0.5294 - f1: 0.4581 - recall: 0.4151 - precision: 0.5459 - val_loss: 0.6911 - val_binary_accuracy: 0.5157 - val_f1: 0.2859 - val_recall: 0.2231 - val_precision: 0.5386\n",
      "Epoch 22/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6886 - binary_accuracy: 0.5296 - f1: 0.4418 - recall: 0.3912 - precision: 0.5515 - val_loss: 0.6900 - val_binary_accuracy: 0.5349 - val_f1: 0.4504 - val_recall: 0.4569 - val_precision: 0.5290\n",
      "Epoch 23/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6900 - binary_accuracy: 0.5272 - f1: 0.4541 - recall: 0.4121 - precision: 0.5442 - val_loss: 0.6870 - val_binary_accuracy: 0.5328 - val_f1: 0.3441 - val_recall: 0.2822 - val_precision: 0.5398\n",
      "Epoch 24/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6892 - binary_accuracy: 0.5305 - f1: 0.4435 - recall: 0.3966 - precision: 0.5476 - val_loss: 0.6856 - val_binary_accuracy: 0.5318 - val_f1: 0.3472 - val_recall: 0.2811 - val_precision: 0.5493\n",
      "Epoch 25/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6871 - binary_accuracy: 0.5358 - f1: 0.4043 - recall: 0.3408 - precision: 0.5660 - val_loss: 0.6901 - val_binary_accuracy: 0.5154 - val_f1: 0.5330 - val_recall: 0.6392 - val_precision: 0.5150\n",
      "Epoch 26/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6902 - binary_accuracy: 0.5315 - f1: 0.4205 - recall: 0.3666 - precision: 0.5622 - val_loss: 0.6876 - val_binary_accuracy: 0.5333 - val_f1: 0.3861 - val_recall: 0.3400 - val_precision: 0.5404\n",
      "Epoch 27/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6897 - binary_accuracy: 0.5343 - f1: 0.4392 - recall: 0.3916 - precision: 0.5521 - val_loss: 0.6908 - val_binary_accuracy: 0.5173 - val_f1: 0.5583 - val_recall: 0.6976 - val_precision: 0.5139\n",
      "Epoch 28/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6867 - binary_accuracy: 0.5376 - f1: 0.4865 - recall: 0.4543 - precision: 0.5508 - val_loss: 0.6888 - val_binary_accuracy: 0.5281 - val_f1: 0.3988 - val_recall: 0.3520 - val_precision: 0.5475\n",
      "Epoch 29/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6895 - binary_accuracy: 0.5269 - f1: 0.4088 - recall: 0.3582 - precision: 0.5531 - val_loss: 0.6900 - val_binary_accuracy: 0.5264 - val_f1: 0.3854 - val_recall: 0.3426 - val_precision: 0.5318\n",
      "Epoch 30/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6879 - binary_accuracy: 0.5339 - f1: 0.4547 - recall: 0.4051 - precision: 0.5489 - val_loss: 0.6885 - val_binary_accuracy: 0.5319 - val_f1: 0.3539 - val_recall: 0.3027 - val_precision: 0.5279\n",
      "Epoch 31/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6870 - binary_accuracy: 0.5345 - f1: 0.4405 - recall: 0.3935 - precision: 0.5561 - val_loss: 0.6882 - val_binary_accuracy: 0.5271 - val_f1: 0.3061 - val_recall: 0.2331 - val_precision: 0.5306\n",
      "Epoch 32/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6853 - binary_accuracy: 0.5437 - f1: 0.4313 - recall: 0.3555 - precision: 0.5727 - val_loss: 0.6838 - val_binary_accuracy: 0.5348 - val_f1: 0.3619 - val_recall: 0.3023 - val_precision: 0.5472\n",
      "Epoch 33/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6805 - binary_accuracy: 0.5493 - f1: 0.4705 - recall: 0.4101 - precision: 0.5751 - val_loss: 0.6841 - val_binary_accuracy: 0.5342 - val_f1: 0.4398 - val_recall: 0.4283 - val_precision: 0.5254\n",
      "Epoch 34/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6789 - binary_accuracy: 0.5531 - f1: 0.4581 - recall: 0.3893 - precision: 0.5858 - val_loss: 0.6791 - val_binary_accuracy: 0.5417 - val_f1: 0.3688 - val_recall: 0.3093 - val_precision: 0.5420\n",
      "Epoch 35/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6799 - binary_accuracy: 0.5463 - f1: 0.4458 - recall: 0.3742 - precision: 0.5784 - val_loss: 0.6864 - val_binary_accuracy: 0.5305 - val_f1: 0.3257 - val_recall: 0.2575 - val_precision: 0.5328\n",
      "Epoch 36/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6818 - binary_accuracy: 0.5463 - f1: 0.4188 - recall: 0.3390 - precision: 0.5742 - val_loss: 0.6785 - val_binary_accuracy: 0.5456 - val_f1: 0.3572 - val_recall: 0.2921 - val_precision: 0.5596\n",
      "Epoch 37/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6742 - binary_accuracy: 0.5579 - f1: 0.4881 - recall: 0.4305 - precision: 0.5877 - val_loss: 0.6762 - val_binary_accuracy: 0.5493 - val_f1: 0.3397 - val_recall: 0.2671 - val_precision: 0.5786\n",
      "Epoch 38/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6748 - binary_accuracy: 0.5593 - f1: 0.4539 - recall: 0.3832 - precision: 0.5873 - val_loss: 0.6727 - val_binary_accuracy: 0.5521 - val_f1: 0.3275 - val_recall: 0.2462 - val_precision: 0.6110\n",
      "Epoch 39/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6680 - binary_accuracy: 0.5574 - f1: 0.4364 - recall: 0.3521 - precision: 0.6038 - val_loss: 0.6678 - val_binary_accuracy: 0.5577 - val_f1: 0.4386 - val_recall: 0.4057 - val_precision: 0.5696\n",
      "Epoch 40/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6628 - binary_accuracy: 0.5697 - f1: 0.4925 - recall: 0.4247 - precision: 0.6010 - val_loss: 0.6643 - val_binary_accuracy: 0.5557 - val_f1: 0.4755 - val_recall: 0.4847 - val_precision: 0.5258\n",
      "Epoch 41/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6574 - binary_accuracy: 0.5720 - f1: 0.5280 - recall: 0.4871 - precision: 0.5893 - val_loss: 0.6709 - val_binary_accuracy: 0.5656 - val_f1: 0.4135 - val_recall: 0.3654 - val_precision: 0.5745\n",
      "Epoch 42/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6616 - binary_accuracy: 0.5656 - f1: 0.5099 - recall: 0.4589 - precision: 0.5938 - val_loss: 0.6520 - val_binary_accuracy: 0.5713 - val_f1: 0.4149 - val_recall: 0.3526 - val_precision: 0.5879\n",
      "Epoch 43/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6452 - binary_accuracy: 0.5852 - f1: 0.5065 - recall: 0.4336 - precision: 0.6222 - val_loss: 0.6516 - val_binary_accuracy: 0.5679 - val_f1: 0.4665 - val_recall: 0.4502 - val_precision: 0.5543\n",
      "Epoch 44/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6419 - binary_accuracy: 0.5881 - f1: 0.5290 - recall: 0.4690 - precision: 0.6202 - val_loss: 0.6384 - val_binary_accuracy: 0.5789 - val_f1: 0.4272 - val_recall: 0.3642 - val_precision: 0.5914\n",
      "Epoch 45/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6270 - binary_accuracy: 0.6017 - f1: 0.5185 - recall: 0.4351 - precision: 0.6536 - val_loss: 0.6438 - val_binary_accuracy: 0.5735 - val_f1: 0.5050 - val_recall: 0.4951 - val_precision: 0.5762\n",
      "Epoch 46/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6211 - binary_accuracy: 0.6074 - f1: 0.5373 - recall: 0.4641 - precision: 0.6554 - val_loss: 0.6281 - val_binary_accuracy: 0.5896 - val_f1: 0.3911 - val_recall: 0.2975 - val_precision: 0.6278\n",
      "Epoch 47/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6130 - binary_accuracy: 0.6076 - f1: 0.5482 - recall: 0.4803 - precision: 0.6642 - val_loss: 0.6263 - val_binary_accuracy: 0.5947 - val_f1: 0.5338 - val_recall: 0.5633 - val_precision: 0.5629\n",
      "Epoch 48/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6074 - binary_accuracy: 0.6073 - f1: 0.5493 - recall: 0.4836 - precision: 0.6507 - val_loss: 0.6251 - val_binary_accuracy: 0.5904 - val_f1: 0.4189 - val_recall: 0.3327 - val_precision: 0.6207\n",
      "Epoch 49/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.6006 - binary_accuracy: 0.6165 - f1: 0.5434 - recall: 0.4629 - precision: 0.6726 - val_loss: 0.6077 - val_binary_accuracy: 0.6113 - val_f1: 0.4660 - val_recall: 0.3905 - val_precision: 0.6320\n",
      "Epoch 50/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.5894 - binary_accuracy: 0.6273 - f1: 0.5580 - recall: 0.4787 - precision: 0.6831 - val_loss: 0.6140 - val_binary_accuracy: 0.6077 - val_f1: 0.4887 - val_recall: 0.4475 - val_precision: 0.5965\n",
      "Epoch 51/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.5841 - binary_accuracy: 0.6344 - f1: 0.5702 - recall: 0.4938 - precision: 0.6880 - val_loss: 0.6028 - val_binary_accuracy: 0.6099 - val_f1: 0.4743 - val_recall: 0.4162 - val_precision: 0.6143\n",
      "Epoch 52/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.5749 - binary_accuracy: 0.6379 - f1: 0.5844 - recall: 0.5134 - precision: 0.6885 - val_loss: 0.6070 - val_binary_accuracy: 0.6131 - val_f1: 0.4796 - val_recall: 0.4222 - val_precision: 0.6206\n",
      "Epoch 53/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.5730 - binary_accuracy: 0.6346 - f1: 0.5720 - recall: 0.4931 - precision: 0.6858 - val_loss: 0.5940 - val_binary_accuracy: 0.6112 - val_f1: 0.5179 - val_recall: 0.4970 - val_precision: 0.5884\n",
      "Epoch 54/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.5659 - binary_accuracy: 0.6429 - f1: 0.5949 - recall: 0.5317 - precision: 0.6846 - val_loss: 0.6030 - val_binary_accuracy: 0.6159 - val_f1: 0.5491 - val_recall: 0.5701 - val_precision: 0.5808\n",
      "Epoch 55/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.5636 - binary_accuracy: 0.6420 - f1: 0.6159 - recall: 0.5783 - precision: 0.6712 - val_loss: 0.5940 - val_binary_accuracy: 0.6230 - val_f1: 0.5069 - val_recall: 0.4610 - val_precision: 0.6117\n",
      "Epoch 56/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.5530 - binary_accuracy: 0.6570 - f1: 0.6038 - recall: 0.5245 - precision: 0.7184 - val_loss: 0.5817 - val_binary_accuracy: 0.6277 - val_f1: 0.5128 - val_recall: 0.4684 - val_precision: 0.6159\n",
      "Epoch 57/500\n",
      "151/151 [==============================] - 2s 12ms/step - loss: 0.5503 - binary_accuracy: 0.6573 - f1: 0.6214 - recall: 0.5656 - precision: 0.7028 - val_loss: 0.5971 - val_binary_accuracy: 0.6174 - val_f1: 0.5095 - val_recall: 0.4735 - val_precision: 0.6079\n",
      "Epoch 58/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.5449 - binary_accuracy: 0.6597 - f1: 0.6246 - recall: 0.5676 - precision: 0.7006 - val_loss: 0.5842 - val_binary_accuracy: 0.6354 - val_f1: 0.5264 - val_recall: 0.4866 - val_precision: 0.6251\n",
      "Epoch 59/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.5436 - binary_accuracy: 0.6632 - f1: 0.6241 - recall: 0.5671 - precision: 0.6989 - val_loss: 0.6010 - val_binary_accuracy: 0.6165 - val_f1: 0.4816 - val_recall: 0.4153 - val_precision: 0.6290\n",
      "Epoch 60/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.5385 - binary_accuracy: 0.6672 - f1: 0.6269 - recall: 0.5669 - precision: 0.7104 - val_loss: 0.5835 - val_binary_accuracy: 0.6277 - val_f1: 0.4992 - val_recall: 0.4426 - val_precision: 0.6225\n",
      "Epoch 61/500\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "151/151 [==============================] - 2s 13ms/step - loss: 0.5275 - binary_accuracy: 0.6739 - f1: 0.6368 - recall: 0.5737 - precision: 0.7203 - val_loss: 0.5843 - val_binary_accuracy: 0.6435 - val_f1: 0.5238 - val_recall: 0.4718 - val_precision: 0.6374\n",
      "Epoch 62/500\n",
      "151/151 [==============================] - 2s 12ms/step - loss: 0.5253 - binary_accuracy: 0.6770 - f1: 0.6345 - recall: 0.5682 - precision: 0.7263 - val_loss: 0.5752 - val_binary_accuracy: 0.6419 - val_f1: 0.5446 - val_recall: 0.5203 - val_precision: 0.6103\n",
      "Epoch 63/500\n",
      "151/151 [==============================] - 2s 12ms/step - loss: 0.5168 - binary_accuracy: 0.6835 - f1: 0.6489 - recall: 0.5913 - precision: 0.7262 - val_loss: 0.5841 - val_binary_accuracy: 0.6366 - val_f1: 0.5330 - val_recall: 0.5050 - val_precision: 0.6037\n",
      "Epoch 64/500\n",
      "151/151 [==============================] - 2s 12ms/step - loss: 0.5194 - binary_accuracy: 0.6802 - f1: 0.6448 - recall: 0.5832 - precision: 0.7263 - val_loss: 0.5802 - val_binary_accuracy: 0.6322 - val_f1: 0.5058 - val_recall: 0.4483 - val_precision: 0.6315\n",
      "Epoch 65/500\n",
      "151/151 [==============================] - 2s 12ms/step - loss: 0.5123 - binary_accuracy: 0.6868 - f1: 0.6488 - recall: 0.5838 - precision: 0.7349 - val_loss: 0.5798 - val_binary_accuracy: 0.6413 - val_f1: 0.5329 - val_recall: 0.4926 - val_precision: 0.6239\n",
      "Epoch 66/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.5240 - binary_accuracy: 0.6773 - f1: 0.6467 - recall: 0.5945 - precision: 0.7161 - val_loss: 0.5723 - val_binary_accuracy: 0.6580 - val_f1: 0.5478 - val_recall: 0.5005 - val_precision: 0.6444\n",
      "Epoch 67/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4998 - binary_accuracy: 0.6976 - f1: 0.6630 - recall: 0.6008 - precision: 0.7440 - val_loss: 0.5883 - val_binary_accuracy: 0.6470 - val_f1: 0.5687 - val_recall: 0.5733 - val_precision: 0.6068\n",
      "Epoch 68/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.5051 - binary_accuracy: 0.6950 - f1: 0.6660 - recall: 0.6107 - precision: 0.7369 - val_loss: 0.5865 - val_binary_accuracy: 0.6404 - val_f1: 0.5444 - val_recall: 0.5202 - val_precision: 0.6149\n",
      "Epoch 69/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.5003 - binary_accuracy: 0.6951 - f1: 0.6671 - recall: 0.6177 - precision: 0.7307 - val_loss: 0.5761 - val_binary_accuracy: 0.6493 - val_f1: 0.5138 - val_recall: 0.4462 - val_precision: 0.6435\n",
      "Epoch 70/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.5005 - binary_accuracy: 0.6972 - f1: 0.6595 - recall: 0.5922 - precision: 0.7500 - val_loss: 0.5796 - val_binary_accuracy: 0.6556 - val_f1: 0.5424 - val_recall: 0.4949 - val_precision: 0.6411\n",
      "Epoch 71/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.5054 - binary_accuracy: 0.6943 - f1: 0.6710 - recall: 0.6224 - precision: 0.7354 - val_loss: 0.5918 - val_binary_accuracy: 0.6427 - val_f1: 0.5212 - val_recall: 0.4632 - val_precision: 0.6435\n",
      "Epoch 72/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4954 - binary_accuracy: 0.6970 - f1: 0.6654 - recall: 0.6078 - precision: 0.7410 - val_loss: 0.5898 - val_binary_accuracy: 0.6540 - val_f1: 0.5241 - val_recall: 0.4580 - val_precision: 0.6480\n",
      "Epoch 73/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4865 - binary_accuracy: 0.6999 - f1: 0.6700 - recall: 0.6104 - precision: 0.7501 - val_loss: 0.5809 - val_binary_accuracy: 0.6469 - val_f1: 0.5578 - val_recall: 0.5462 - val_precision: 0.6159\n",
      "Epoch 74/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4886 - binary_accuracy: 0.7035 - f1: 0.6688 - recall: 0.6048 - precision: 0.7554 - val_loss: 0.5826 - val_binary_accuracy: 0.6545 - val_f1: 0.5620 - val_recall: 0.5456 - val_precision: 0.6189\n",
      "Epoch 75/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4814 - binary_accuracy: 0.7099 - f1: 0.6805 - recall: 0.6234 - precision: 0.7551 - val_loss: 0.5829 - val_binary_accuracy: 0.6611 - val_f1: 0.5230 - val_recall: 0.4477 - val_precision: 0.6565\n",
      "Epoch 76/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4864 - binary_accuracy: 0.7018 - f1: 0.6714 - recall: 0.6124 - precision: 0.7486 - val_loss: 0.5779 - val_binary_accuracy: 0.6628 - val_f1: 0.5412 - val_recall: 0.4827 - val_precision: 0.6517\n",
      "Epoch 77/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4764 - binary_accuracy: 0.7132 - f1: 0.6810 - recall: 0.6168 - precision: 0.7673 - val_loss: 0.5867 - val_binary_accuracy: 0.6547 - val_f1: 0.5286 - val_recall: 0.4697 - val_precision: 0.6356\n",
      "Epoch 78/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4715 - binary_accuracy: 0.7189 - f1: 0.6894 - recall: 0.6291 - precision: 0.7692 - val_loss: 0.5864 - val_binary_accuracy: 0.6682 - val_f1: 0.5680 - val_recall: 0.5351 - val_precision: 0.6297\n",
      "Epoch 79/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4664 - binary_accuracy: 0.7219 - f1: 0.6937 - recall: 0.6339 - precision: 0.7705 - val_loss: 0.5965 - val_binary_accuracy: 0.6562 - val_f1: 0.6075 - val_recall: 0.6627 - val_precision: 0.5963\n",
      "Epoch 80/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4718 - binary_accuracy: 0.7153 - f1: 0.6988 - recall: 0.6660 - precision: 0.7442 - val_loss: 0.6141 - val_binary_accuracy: 0.6507 - val_f1: 0.5680 - val_recall: 0.5673 - val_precision: 0.6060\n",
      "Epoch 81/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4921 - binary_accuracy: 0.7028 - f1: 0.6876 - recall: 0.6574 - precision: 0.7264 - val_loss: 0.5779 - val_binary_accuracy: 0.6634 - val_f1: 0.5724 - val_recall: 0.5601 - val_precision: 0.6229\n",
      "Epoch 82/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4601 - binary_accuracy: 0.7220 - f1: 0.6963 - recall: 0.6417 - precision: 0.7653 - val_loss: 0.5932 - val_binary_accuracy: 0.6666 - val_f1: 0.5609 - val_recall: 0.5233 - val_precision: 0.6419\n",
      "Epoch 83/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4729 - binary_accuracy: 0.7176 - f1: 0.6954 - recall: 0.6457 - precision: 0.7598 - val_loss: 0.5972 - val_binary_accuracy: 0.6601 - val_f1: 0.5500 - val_recall: 0.5091 - val_precision: 0.6351\n",
      "Epoch 84/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4574 - binary_accuracy: 0.7276 - f1: 0.7059 - recall: 0.6523 - precision: 0.7731 - val_loss: 0.5846 - val_binary_accuracy: 0.6704 - val_f1: 0.5727 - val_recall: 0.5442 - val_precision: 0.6376\n",
      "Epoch 85/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4447 - binary_accuracy: 0.7352 - f1: 0.7120 - recall: 0.6585 - precision: 0.7791 - val_loss: 0.5975 - val_binary_accuracy: 0.6661 - val_f1: 0.5742 - val_recall: 0.5507 - val_precision: 0.6333\n",
      "Epoch 86/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4481 - binary_accuracy: 0.7324 - f1: 0.7063 - recall: 0.6478 - precision: 0.7813 - val_loss: 0.5872 - val_binary_accuracy: 0.6725 - val_f1: 0.5841 - val_recall: 0.5659 - val_precision: 0.6332\n",
      "Epoch 87/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4514 - binary_accuracy: 0.7385 - f1: 0.7111 - recall: 0.6564 - precision: 0.7829 - val_loss: 0.6770 - val_binary_accuracy: 0.6338 - val_f1: 0.5269 - val_recall: 0.4938 - val_precision: 0.6150\n",
      "Epoch 88/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4793 - binary_accuracy: 0.7138 - f1: 0.6909 - recall: 0.6419 - precision: 0.7550 - val_loss: 0.5997 - val_binary_accuracy: 0.6680 - val_f1: 0.5617 - val_recall: 0.5171 - val_precision: 0.6434\n",
      "Epoch 89/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4471 - binary_accuracy: 0.7317 - f1: 0.7030 - recall: 0.6383 - precision: 0.7868 - val_loss: 0.6201 - val_binary_accuracy: 0.6650 - val_f1: 0.5909 - val_recall: 0.6013 - val_precision: 0.6211\n",
      "Epoch 90/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4495 - binary_accuracy: 0.7359 - f1: 0.7134 - recall: 0.6626 - precision: 0.7768 - val_loss: 0.6266 - val_binary_accuracy: 0.6579 - val_f1: 0.5648 - val_recall: 0.5452 - val_precision: 0.6239\n",
      "Epoch 91/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4405 - binary_accuracy: 0.7373 - f1: 0.7155 - recall: 0.6662 - precision: 0.7775 - val_loss: 0.6056 - val_binary_accuracy: 0.6646 - val_f1: 0.5868 - val_recall: 0.5763 - val_precision: 0.6287\n",
      "Epoch 92/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4327 - binary_accuracy: 0.7439 - f1: 0.7243 - recall: 0.6813 - precision: 0.7770 - val_loss: 0.5910 - val_binary_accuracy: 0.6720 - val_f1: 0.5848 - val_recall: 0.5618 - val_precision: 0.6370\n",
      "Epoch 93/500\n",
      "151/151 [==============================] - 2s 12ms/step - loss: 0.4287 - binary_accuracy: 0.7461 - f1: 0.7254 - recall: 0.6774 - precision: 0.7856 - val_loss: 0.5999 - val_binary_accuracy: 0.6759 - val_f1: 0.5655 - val_recall: 0.5178 - val_precision: 0.6471\n",
      "Epoch 94/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4313 - binary_accuracy: 0.7459 - f1: 0.7229 - recall: 0.6689 - precision: 0.7910 - val_loss: 0.6155 - val_binary_accuracy: 0.6728 - val_f1: 0.5688 - val_recall: 0.5282 - val_precision: 0.6489\n",
      "Epoch 95/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4304 - binary_accuracy: 0.7476 - f1: 0.7286 - recall: 0.6836 - precision: 0.7841 - val_loss: 0.6581 - val_binary_accuracy: 0.6712 - val_f1: 0.5647 - val_recall: 0.5302 - val_precision: 0.6426\n",
      "Epoch 96/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4344 - binary_accuracy: 0.7407 - f1: 0.7240 - recall: 0.6842 - precision: 0.7727 - val_loss: 0.6112 - val_binary_accuracy: 0.6790 - val_f1: 0.5862 - val_recall: 0.5676 - val_precision: 0.6344\n",
      "Epoch 97/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4215 - binary_accuracy: 0.7520 - f1: 0.7311 - recall: 0.6783 - precision: 0.7961 - val_loss: 0.6279 - val_binary_accuracy: 0.6590 - val_f1: 0.5505 - val_recall: 0.5109 - val_precision: 0.6352\n",
      "Epoch 98/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4273 - binary_accuracy: 0.7458 - f1: 0.7284 - recall: 0.6807 - precision: 0.7862 - val_loss: 0.6172 - val_binary_accuracy: 0.6750 - val_f1: 0.5951 - val_recall: 0.5737 - val_precision: 0.6459\n",
      "Epoch 99/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4194 - binary_accuracy: 0.7507 - f1: 0.7307 - recall: 0.6793 - precision: 0.7946 - val_loss: 0.6413 - val_binary_accuracy: 0.6735 - val_f1: 0.6071 - val_recall: 0.5922 - val_precision: 0.6436\n",
      "Epoch 100/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4185 - binary_accuracy: 0.7548 - f1: 0.7390 - recall: 0.7005 - precision: 0.7855 - val_loss: 0.6199 - val_binary_accuracy: 0.6776 - val_f1: 0.5651 - val_recall: 0.5108 - val_precision: 0.6592\n",
      "Epoch 101/500\n",
      "151/151 [==============================] - 2s 12ms/step - loss: 0.4300 - binary_accuracy: 0.7466 - f1: 0.7264 - recall: 0.6789 - precision: 0.7855 - val_loss: 0.6313 - val_binary_accuracy: 0.6805 - val_f1: 0.5869 - val_recall: 0.5574 - val_precision: 0.6500\n",
      "Epoch 102/500\n",
      "151/151 [==============================] - 2s 12ms/step - loss: 0.4330 - binary_accuracy: 0.7454 - f1: 0.7242 - recall: 0.6706 - precision: 0.7894 - val_loss: 0.6225 - val_binary_accuracy: 0.6788 - val_f1: 0.6033 - val_recall: 0.5922 - val_precision: 0.6398\n",
      "Epoch 103/500\n",
      "151/151 [==============================] - 2s 12ms/step - loss: 0.4249 - binary_accuracy: 0.7491 - f1: 0.7323 - recall: 0.6888 - precision: 0.7854 - val_loss: 0.6415 - val_binary_accuracy: 0.6729 - val_f1: 0.5691 - val_recall: 0.5347 - val_precision: 0.6397\n",
      "Epoch 104/500\n",
      "151/151 [==============================] - 2s 12ms/step - loss: 0.4157 - binary_accuracy: 0.7593 - f1: 0.7400 - recall: 0.6879 - precision: 0.8044 - val_loss: 0.6130 - val_binary_accuracy: 0.6858 - val_f1: 0.5980 - val_recall: 0.5571 - val_precision: 0.6624\n",
      "Epoch 105/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4028 - binary_accuracy: 0.7648 - f1: 0.7480 - recall: 0.7000 - precision: 0.8078 - val_loss: 0.6212 - val_binary_accuracy: 0.6859 - val_f1: 0.5759 - val_recall: 0.5306 - val_precision: 0.6565\n",
      "Epoch 106/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4051 - binary_accuracy: 0.7642 - f1: 0.7478 - recall: 0.7024 - precision: 0.8026 - val_loss: 0.6784 - val_binary_accuracy: 0.6716 - val_f1: 0.5548 - val_recall: 0.5099 - val_precision: 0.6411\n",
      "Epoch 107/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4199 - binary_accuracy: 0.7531 - f1: 0.7330 - recall: 0.6805 - precision: 0.7994 - val_loss: 0.6234 - val_binary_accuracy: 0.6828 - val_f1: 0.5882 - val_recall: 0.5523 - val_precision: 0.6560\n",
      "Epoch 108/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4066 - binary_accuracy: 0.7630 - f1: 0.7464 - recall: 0.7037 - precision: 0.7979 - val_loss: 0.6193 - val_binary_accuracy: 0.6905 - val_f1: 0.5998 - val_recall: 0.5707 - val_precision: 0.6555\n",
      "Epoch 109/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.3912 - binary_accuracy: 0.7713 - f1: 0.7567 - recall: 0.7122 - precision: 0.8098 - val_loss: 0.6477 - val_binary_accuracy: 0.6905 - val_f1: 0.5816 - val_recall: 0.5280 - val_precision: 0.6736\n",
      "Epoch 110/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.3975 - binary_accuracy: 0.7682 - f1: 0.7480 - recall: 0.6928 - precision: 0.8181 - val_loss: 0.6199 - val_binary_accuracy: 0.6949 - val_f1: 0.6198 - val_recall: 0.5867 - val_precision: 0.6743\n",
      "Epoch 111/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.3839 - binary_accuracy: 0.7773 - f1: 0.7614 - recall: 0.7142 - precision: 0.8186 - val_loss: 0.6703 - val_binary_accuracy: 0.6742 - val_f1: 0.5467 - val_recall: 0.4816 - val_precision: 0.6585\n",
      "Epoch 112/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4132 - binary_accuracy: 0.7613 - f1: 0.7443 - recall: 0.6983 - precision: 0.8028 - val_loss: 0.6813 - val_binary_accuracy: 0.6826 - val_f1: 0.6301 - val_recall: 0.6793 - val_precision: 0.6140\n",
      "Epoch 113/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4254 - binary_accuracy: 0.7592 - f1: 0.7439 - recall: 0.7070 - precision: 0.7918 - val_loss: 0.6848 - val_binary_accuracy: 0.6729 - val_f1: 0.5926 - val_recall: 0.5950 - val_precision: 0.6226\n",
      "Epoch 114/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4147 - binary_accuracy: 0.7580 - f1: 0.7440 - recall: 0.7082 - precision: 0.7886 - val_loss: 0.6746 - val_binary_accuracy: 0.6740 - val_f1: 0.5699 - val_recall: 0.5368 - val_precision: 0.6408\n",
      "Epoch 115/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.4238 - binary_accuracy: 0.7585 - f1: 0.7417 - recall: 0.6968 - precision: 0.7962 - val_loss: 0.6704 - val_binary_accuracy: 0.6847 - val_f1: 0.5784 - val_recall: 0.5182 - val_precision: 0.6803\n",
      "Epoch 116/500\n",
      "151/151 [==============================] - 2s 11ms/step - loss: 0.3844 - binary_accuracy: 0.7779 - f1: 0.7615 - recall: 0.7137 - precision: 0.8220 - val_loss: 0.6407 - val_binary_accuracy: 0.6978 - val_f1: 0.6257 - val_recall: 0.6062 - val_precision: 0.6649\n",
      "tn = 15768, fp = 3103, fn = 4954, tp = 13867\n",
      "y_pred: 0 = 20722 | 1 = 16970\n",
      "y_true: 0 = 18871 | 1 = 18821\n",
      "acc=0.7862|precision=0.8171|recall=0.7368|f1=0.7749|auc=0.8961|aupr=0.9068|pos_acc=0.7368|neg_acc=0.7609\n",
      "tn = 3579, fp = 1108, fn = 1740, tp = 2997\n",
      "y_pred: 0 = 5319 | 1 = 4105\n",
      "y_true: 0 = 4687 | 1 = 4737\n",
      "acc=0.6978|precision=0.7301|recall=0.6327|f1=0.6779|auc=0.7974|aupr=0.8128|pos_acc=0.6327|neg_acc=0.6729\n"
     ]
    }
   ],
   "source": [
    "for isbalance in [True]:\n",
    "    \n",
    "    IPE, IG, dtp, gene_ids, peco_ids, gene_test_num, peco_test_num, samples = obtain_data(directory, \n",
    "                                                                                                 isbalance)\n",
    "    for task in ['Tp', 'Tg', 'Tpe']:  \n",
    "        print('========== isbalance = {} | task = {}'.format(isbalance, task))\n",
    "        \n",
    "        if task == 'Tp':\n",
    "            train_index_all, test_index_all, train_id_all, test_id_all = generate_task_Tp_train_test_idx(samples)\n",
    "            \n",
    "        elif task == 'Tg':\n",
    "            item = 'gene_idx'\n",
    "            ids = gene_ids\n",
    "            train_index_all, test_index_all, train_id_all, test_id_all = generate_task_Tg_Tpe_train_test_idx(item, ids, dtp)\n",
    "\n",
    "        elif task == 'Tpe':\n",
    "            item = 'peco_idx'\n",
    "            ids = peco_ids\n",
    "            train_index_all, test_index_all, train_id_all, test_id_all = generate_task_Tg_Tpe_train_test_idx(item, ids, dtp)\n",
    "\n",
    "        y_train, y_test, y_train_pred, y_test_pred, y_train_prob, y_test_prob, performances_train, performances_test = run_dnn(train_index_all, test_index_all, samples)\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "anaconda-cloud": {},
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
