{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "dCpvgG0vwXAZ"
   },
   "source": [
    "# Predicting Diabetes patient risk to develop Heart Failure with Med-BERT"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "hsZvic2YxnTz"
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "I0520 21:21:25.325698 140179506145024 file_utils.py:39] PyTorch version 1.5.0+cu101 available.\n"
     ]
    }
   ],
   "source": [
    "### Required Packages\n",
    "from termcolor import colored\n",
    "import math\n",
    "from sklearn.model_selection import train_test_split\n",
    "import pandas as pd\n",
    "import random\n",
    "import numpy as np\n",
    "from datetime import datetime\n",
    "import pickle as pkl\n",
    "import os\n",
    "os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"   \n",
    "os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\"\n",
    "import torch\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "import torch.nn as nn\n",
    "from torch.autograd import Variable\n",
    "import torch.nn.functional as F\n",
    "from torch import optim\n",
    "import tqdm\n",
    "import time\n",
    "import transformers\n",
    "from sklearn.metrics import roc_auc_score  \n",
    "from sklearn.metrics import roc_curve \n",
    "import matplotlib\n",
    "import matplotlib.pyplot as plt\n",
    "from matplotlib.pyplot import cm\n",
    "%matplotlib inline\n",
    "use_cuda = torch.cuda.is_available()\n",
    "import transformers\n",
    "from transformers import BertForSequenceClassification\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "pmFYvkylMwXn"
   },
   "source": [
    "#### Load Data from pickled list\n",
    "\n",
    "The pickled list is a list of lists where each sublist represent a patient record that looks like \n",
    "[pt_id,label, seq_list , segment_list ]\n",
    "where\n",
    "    Label: 1: pt developed HF (case) , 0 control\n",
    "    seq_list: list of all medical codes in all visits\n",
    "    segment list: the visit number mapping to each code in the sequence list\n",
    " "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_f=pkl.load( open('../Diab_HF/preprocessed_data/lr_dhf.combined_BertFT.train', 'rb'), encoding='bytes')\n",
    "valid_f=pkl.load( open('../Diab_HF/preprocessed_data/lr_dhf.combined_BertFT.valid', 'rb'), encoding='bytes')\n",
    "test_f=pkl.load( open('../Diab_HF/preprocessed_data/lr_dhf.combined_BertFT.test', 'rb'), encoding='bytes')\n",
    "test_f2=pkl.load( open('../Diab_HF/preprocessed_data/lr_dhf_s5k.combined_BertFT.test', 'rb'), encoding='bytes')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "### Below are key functions for  Data prepartion ,formating input data into features, and model defintion \n",
    "\n",
    "class PaddingInputExample(object):\n",
    "  \"\"\"Fake example so the num input examples is a multiple of the batch size.\n",
    "\n",
    "  When running eval/predict on the TPU, we need to pad the number of examples\n",
    "  to be a multiple of the batch size, because the TPU requires a fixed batch\n",
    "  size. The alternative is to drop the last batch, which is bad because it means\n",
    "  the entire output data won't be generated.\n",
    "\n",
    "  We use this class instead of `None` because treating `None` as padding\n",
    "  battches could cause silent errors.\n",
    "  \"\"\"\n",
    "\n",
    "class InputFeatures(object):\n",
    "  \"\"\"A single set of features of data.\"\"\"\n",
    "\n",
    "  def __init__(self,\n",
    "               input_ids,\n",
    "               input_mask,\n",
    "               segment_ids,\n",
    "               label_id,\n",
    "               is_real_example=True):\n",
    "    self.input_ids = input_ids\n",
    "    self.input_mask = input_mask\n",
    "    self.segment_ids = segment_ids\n",
    "    self.label_id = label_id\n",
    "    self.is_real_example = is_real_example\n",
    "    \n",
    "\n",
    "    \n",
    "def convert_EHRexamples_to_features(examples,max_seq_length):\n",
    "    \"\"\"Convert a set of `InputExample`s to a list of `InputFeatures`.\"\"\"\n",
    "\n",
    "    features = []\n",
    "    for (ex_index, example) in enumerate(examples):\n",
    "        feature = convert_singleEHR_example(ex_index, example, max_seq_length)\n",
    "        features.append(feature)\n",
    "    return features\n",
    "\n",
    "### This is the EHR version\n",
    "\n",
    "def convert_singleEHR_example(ex_index, example, max_seq_length):\n",
    "    if isinstance(example, PaddingInputExample):\n",
    "        return InputFeatures(\n",
    "        input_ids=[0] * max_seq_length,\n",
    "        input_mask=[0] * max_seq_length,\n",
    "        segment_ids=[0] * max_seq_length,\n",
    "        label_id=0,\n",
    "        is_real_example=False)\n",
    "    \n",
    "    input_ids=example[2]\n",
    "    segment_ids=example[3]\n",
    "    label_id=example[1]\n",
    "    \n",
    "\n",
    "  # The mask has 1 for real tokens and 0 for padding tokens. Only real\n",
    "  # tokens are attended to.\n",
    "    input_mask = [1] * len(input_ids)\n",
    "\n",
    "   \n",
    "  # LR 5/13 Left Truncate longer sequence \n",
    "    while len(input_ids) > max_seq_length:\n",
    "        input_ids= input_ids[-max_seq_length:] \n",
    "        input_mask= input_mask[-max_seq_length:]\n",
    "        segment_ids= segment_ids[-max_seq_length:]\n",
    " \n",
    "    \n",
    "    \n",
    "  # Zero-pad up to the sequence length.\n",
    "    while len(input_ids) < max_seq_length:\n",
    "        input_ids.append(0)\n",
    "        input_mask.append(0)\n",
    "        segment_ids.append(0)\n",
    "\n",
    "    assert len(input_ids) == max_seq_length\n",
    "    assert len(input_mask) == max_seq_length\n",
    "    assert len(segment_ids) == max_seq_length\n",
    "\n",
    "  \n",
    "    feature =[input_ids,input_mask,segment_ids,label_id,True]\n",
    "    return feature\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "class BERTdataEHR(Dataset):\n",
    "    def __init__(self, Features):\n",
    "           \n",
    "        self.data= Features\n",
    "  \n",
    "                                     \n",
    "    def __getitem__(self, idx, seeDescription = False):\n",
    "\n",
    "        sample = self.data[idx]\n",
    "   \n",
    "        return sample\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.data)     \n",
    "\n",
    "         \n",
    "#customized parts for EHRdataloader\n",
    "def my_collate(batch):\n",
    "        all_input_ids = []\n",
    "        all_input_mask = []\n",
    "        all_segment_ids = []\n",
    "        all_label_ids = []\n",
    "\n",
    "        for feature in batch:\n",
    "            all_input_ids.append(feature[0])\n",
    "            all_input_mask.append(feature[1])\n",
    "            all_segment_ids.append(feature[2])\n",
    "            all_label_ids.append(feature[3])\n",
    "        return [all_input_ids, all_input_mask,all_segment_ids,all_label_ids]\n",
    "            \n",
    "\n",
    "class BERTdataEHRloader(DataLoader):\n",
    "    def __init__(self, dataset, batch_size=128, shuffle=False, sampler=None, batch_sampler=None,\n",
    "                 num_workers=0, collate_fn=my_collate, pin_memory=False, drop_last=False,\n",
    "                 timeout=0, worker_init_fn=None):\n",
    "        DataLoader.__init__(self, dataset, batch_size=batch_size, shuffle=False, sampler=None, batch_sampler=None,\n",
    "                 num_workers=0, collate_fn=my_collate, pin_memory=False, drop_last=False,\n",
    "                 timeout=0, worker_init_fn=None)\n",
    "        self.collate_fn = collate_fn\n",
    "\n",
    " "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### Model Definition"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "class EHR_BERT_LR(nn.Module):\n",
    "    def __init__(self, input_size,embed_dim, hidden_size, n_layers=1,dropout_r=0.1,cell_type='LSTM',bi=False ,time=False, preTrainEmb=''):\n",
    "        super(EHR_BERT_LR, self).__init__()\n",
    "        self.n_layers = n_layers\n",
    "        self.hidden_size = hidden_size\n",
    "        self.embed_dim = embed_dim\n",
    "        self.dropout_r = dropout_r\n",
    "        self.cell_type = cell_type\n",
    "        self.preTrainEmb=preTrainEmb\n",
    "        self.time=time\n",
    "        \n",
    "        if bi: self.bi=2 \n",
    "        else: self.bi=1\n",
    "        \n",
    "        self.PreBERTmodel=BertForSequenceClassification.from_pretrained(\"pretrained_py_models/45M_chk\")\n",
    "        if use_cuda:\n",
    "           self.PreBERTmodel.cuda()\n",
    "        input_size=self.PreBERTmodel.bert.config.vocab_size\n",
    "        self.in_size= self.PreBERTmodel.bert.config.hidden_size\n",
    "       \n",
    "        self.dropout = nn.Dropout(p=self.dropout_r)\n",
    "        self.out = nn.Linear(self.in_size,1)\n",
    "        self.sigmoid = nn.Sigmoid()\n",
    "        self.softmax=nn.Softmax()\n",
    "        if use_cuda:\n",
    "            self.flt_typ=torch.cuda.FloatTensor\n",
    "            self.lnt_typ=torch.cuda.LongTensor\n",
    "        else: \n",
    "            self.lnt_typ=torch.LongTensor\n",
    "            self.flt_typ=torch.FloatTensor\n",
    "\n",
    "    def forward(self, sequence):\n",
    "        token_t=torch.from_numpy(np.asarray(sequence[0],dtype=int)).type(self.lnt_typ)\n",
    "        seg_t=torch.from_numpy(np.asarray(sequence[2],dtype=int)).type(self.lnt_typ)\n",
    "        Label_t=torch.from_numpy(np.asarray(sequence[3],dtype=int)).type(self.lnt_typ)\n",
    "        Bert_out=self.PreBERTmodel.bert(input_ids=token_t, attention_mask=torch.from_numpy(np.asarray(sequence[1],dtype=int)).type(self.lnt_typ),\n",
    "                                    token_type_ids=seg_t)\n",
    "        output=self.sigmoid(self.out(Bert_out[1]))\n",
    "        return output.squeeze(),Label_t.type(self.flt_typ)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "def timeSince(since):\n",
    "   now = time.time()\n",
    "   s = now - since\n",
    "   m = math.floor(s / 60)\n",
    "   s -= m * 60\n",
    "   return '%dm %ds' % (m, s)\n",
    "\n",
    "def trainsample(sample, model, optimizer, criterion = nn.BCELoss()): \n",
    "    model.train() \n",
    "    model.zero_grad()\n",
    "    output,label_tensor = model(sample)   \n",
    "    loss = criterion(output, label_tensor)    \n",
    "    loss.backward()   \n",
    "    optimizer.step()\n",
    "    return output, loss.item()\n",
    "\n",
    "\n",
    "#train with loaders\n",
    "\n",
    "def trainbatches(mbs_list, model, optimizer, shuffle = True):\n",
    "    current_loss = 0\n",
    "    all_losses =[]\n",
    "    plot_every = 5\n",
    "    n_iter = 0 \n",
    "    if shuffle: \n",
    "        random.shuffle(mbs_list)\n",
    "    for i,batch in enumerate(mbs_list):\n",
    "        output, loss = trainsample(batch, model, optimizer, criterion = nn.BCELoss())\n",
    "        current_loss += loss\n",
    "        n_iter +=1\n",
    "    \n",
    "        if n_iter % plot_every == 0:\n",
    "            all_losses.append(current_loss/plot_every)\n",
    "            current_loss = 0    \n",
    "    return current_loss, all_losses \n",
    "\n",
    "\n",
    "def calculate_auc(model, mbs_list, shuffle = True): \n",
    "    model.eval() \n",
    "    y_real =[]\n",
    "    y_hat= []\n",
    "    if shuffle: \n",
    "        random.shuffle(mbs_list)\n",
    "    for i,batch in enumerate(mbs_list):\n",
    "        output,label_tensor = model(batch)\n",
    "        y_hat.extend(output.cpu().data.view(-1).numpy())  \n",
    "        y_real.extend(label_tensor.cpu().data.view(-1).numpy())       \n",
    "    auc = roc_auc_score(y_real, y_hat)\n",
    "    return auc, y_real, y_hat \n",
    "\n",
    "    \n",
    "#define the final epochs running, use the different names\n",
    "\n",
    "def epochs_run(epochs, train, valid, test1,test2, model, optimizer, shuffle = True,  patience = 20, output_dir = '../models/', model_prefix = 'dhf.train', model_customed= ''):  \n",
    "    bestValidAuc = 0.0\n",
    "    bestTestAuc1 = 0.0\n",
    "    bestTestAuc2 = 0.0\n",
    "    bestValidEpoch = 0\n",
    "    #header = 'BestValidAUC|TestAUC|atEpoch'\n",
    "    #logFile = output_dir + model_prefix + model_customed +'EHRmodel.log'\n",
    "    #print2file(header, logFile)\n",
    "    #writer = SummaryWriter(output_dir+'/tsb_runs/') ## LR added 9/27 for tensorboard integration\n",
    "    for ep in range(epochs):\n",
    "        print (ep)\n",
    "        start = time.time()\n",
    "        current_loss, train_loss = trainbatches(mbs_list = train, model= model, optimizer = optimizer)\n",
    "        train_time = timeSince(start)\n",
    "        #epoch_loss.append(train_loss)\n",
    "        avg_loss = np.mean(train_loss)\n",
    "        #writer.add_scalar('Loss/train', avg_loss, ep) ## LR added 9/27 \n",
    "        valid_start = time.time()\n",
    "        train_auc, _, _ = calculate_auc(model = model, mbs_list = train, shuffle = shuffle)\n",
    "        valid_auc, _, _ = calculate_auc(model = model, mbs_list = valid, shuffle = shuffle)\n",
    "        valid_time = timeSince(valid_start)\n",
    "        #writer.add_scalar('train_auc', train_auc, ep) ## LR added 9/27 \n",
    "        #writer.add_scalar('valid_auc', valid_auc, ep) ## LR added 9/27 \n",
    "        print(colored('\\n Epoch (%s): Train_auc (%s), Valid_auc (%s) ,Training Average_loss (%s), Train_time (%s), Eval_time (%s)'%(ep, train_auc, valid_auc , avg_loss,train_time, valid_time), 'green'))\n",
    "        if valid_auc > bestValidAuc: \n",
    "              bestValidAuc = valid_auc\n",
    "              bestValidEpoch = ep\n",
    "              best_model= model \n",
    "              bestTrainAuc = train_auc  \n",
    "              if test:      \n",
    "                      testeval_start = time.time()\n",
    "                      bestTestAuc1, _, _ = calculate_auc(model = best_model, mbs_list = test1,  shuffle = shuffle) \n",
    "                      bestTestAuc2, _, _ = calculate_auc(model = best_model, mbs_list = test2,  shuffle = shuffle) \n",
    "\n",
    "                        #writer.add_scalar('test_auc', valid_auc, ep) ## LR added 9/27 \n",
    "                      print(colored('\\n Test_AUC1 (%s) ,Test_AUC2 (%s) , Test_eval_time (%s) '%(bestTestAuc1,bestTestAuc2, timeSince(testeval_start)), 'yellow')) \n",
    "                      #print(best_model,model) ## to verify that the hyperparameters already impacting the model definition\n",
    "                      #print(optimizer)\n",
    "        if ep - bestValidEpoch > patience:\n",
    "              break\n",
    "\n",
    "    #writer.close()\n",
    "    #if not os.path.exists(output_dir):\n",
    "    #    os.makedirs(output_dir)\n",
    "    ###save model & parameters\n",
    "    #torch.save(best_model, output_dir + model_prefix + model_customed + 'EHRmodel.pth')\n",
    "    #torch.save(best_model.state_dict(), output_dir + model_prefix + model_customed + 'EHRmodel.st')\n",
    "\n",
    "    if test:\n",
    "        print(colored('BestValidAuc %f has a TestAuc of %f at epoch %d ' % (bestValidAuc, bestTestAuc1, bestValidEpoch),'green'))\n",
    "        return bestTrainAuc,bestValidAuc, bestTestAuc1, bestTestAuc2, bestValidEpoch\n",
    "    else: \n",
    "        print(colored('BestValidAuc %f at epoch %d ' % (bestValidAuc,  bestValidEpoch),'green'))\n",
    "        print('No Test Accuracy')\n",
    "    \n",
    "    print(colored('Details see ../models/%sEHRmodel.log' %(model_prefix + model_customed),'green'))\n",
    "        \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " creating the list of training minibatches\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "I0520 21:22:05.684562 140179506145024 configuration_utils.py:283] loading configuration file pretrained_py_models/orig_45M_chkp_transcli/config.json\n",
      "I0520 21:22:05.686757 140179506145024 configuration_utils.py:321] Model config BertConfig {\n",
      "  \"attention_probs_dropout_prob\": 0.1,\n",
      "  \"hidden_act\": \"gelu\",\n",
      "  \"hidden_dropout_prob\": 0.1,\n",
      "  \"hidden_size\": 192,\n",
      "  \"initializer_range\": 0.02,\n",
      "  \"intermediate_size\": 64,\n",
      "  \"layer_norm_eps\": 1e-12,\n",
      "  \"max_position_embeddings\": 512,\n",
      "  \"model_type\": \"bert\",\n",
      "  \"num_attention_heads\": 6,\n",
      "  \"num_hidden_layers\": 6,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"type_vocab_size\": 1000,\n",
      "  \"vocab_size\": 82603\n",
      "}\n",
      "\n",
      "I0520 21:22:05.687871 140179506145024 modeling_utils.py:615] loading weights file pretrained_py_models/orig_45M_chkp_transcli/pytorch_model.bin\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " creating the list of test minibatches\n",
      " creating the list of test2 minibatches\n",
      " creating the list of valid minibatches\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "I0520 21:22:06.574445 140179506145024 modeling_utils.py:708] Weights of BertForSequenceClassification not initialized from pretrained model: ['classifier.weight', 'classifier.bias']\n",
      "I0520 21:22:06.575920 140179506145024 modeling_utils.py:714] Weights from pretrained model not used in BertForSequenceClassification: ['cls.predictions.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.transform.dense.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.decoder.weight', 'cls.predictions.decoder.bias', 'cls.seq_relationship.weight', 'cls.seq_relationship.bias']\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "\u001b[32m\n",
      " Epoch (0): Train_auc (0.8344208078615551), Valid_auc (0.8184748806805653) ,Training Average_loss (0.6015739386081695), Train_time (0m 23s), Eval_time (0m 5s)\u001b[0m\n",
      "\u001b[33m\n",
      " Test_AUC1 (0.813739276864951) ,Test_AUC2 (0.8129291783088921) , Test_eval_time (0m 1s) \u001b[0m\n",
      "1\n",
      "\u001b[32m\n",
      " Epoch (1): Train_auc (0.8689255080828232), Valid_auc (0.8328357126090946) ,Training Average_loss (0.5050303135514259), Train_time (0m 22s), Eval_time (0m 5s)\u001b[0m\n",
      "\u001b[33m\n",
      " Test_AUC1 (0.827368265712256) ,Test_AUC2 (0.8259503103980095) , Test_eval_time (0m 1s) \u001b[0m\n",
      "2\n",
      "\u001b[32m\n",
      " Epoch (2): Train_auc (0.8945819412477846), Valid_auc (0.8348941240068568) ,Training Average_loss (0.46658780950307843), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[33m\n",
      " Test_AUC1 (0.8300147372468025) ,Test_AUC2 (0.8279952299876525) , Test_eval_time (0m 1s) \u001b[0m\n",
      "3\n",
      "\u001b[32m\n",
      " Epoch (3): Train_auc (0.919262215204665), Valid_auc (0.8296378450661146) ,Training Average_loss (0.4308578492999077), Train_time (0m 22s), Eval_time (0m 5s)\u001b[0m\n",
      "4\n",
      "\u001b[32m\n",
      " Epoch (4): Train_auc (0.9391497843471488), Valid_auc (0.8203920648130365) ,Training Average_loss (0.39333054256439204), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "5\n",
      "\u001b[32m\n",
      " Epoch (5): Train_auc (0.9532243302458633), Valid_auc (0.8151828734822782) ,Training Average_loss (0.3566794258356094), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "6\n",
      "\u001b[32m\n",
      " Epoch (6): Train_auc (0.9642822264884321), Valid_auc (0.8070189346250418) ,Training Average_loss (0.3246071296781301), Train_time (0m 21s), Eval_time (0m 5s)\u001b[0m\n",
      "7\n",
      "\u001b[32m\n",
      " Epoch (7): Train_auc (0.9725742849708805), Valid_auc (0.7993628591141555) ,Training Average_loss (0.29402682702243327), Train_time (0m 22s), Eval_time (0m 5s)\u001b[0m\n",
      "8\n",
      "\u001b[32m\n",
      " Epoch (8): Train_auc (0.9786934433463503), Valid_auc (0.792326995710902) ,Training Average_loss (0.2665277595669031), Train_time (0m 22s), Eval_time (0m 5s)\u001b[0m\n",
      "9\n",
      "\u001b[32m\n",
      " Epoch (9): Train_auc (0.9833665810386603), Valid_auc (0.7864001095391532) ,Training Average_loss (0.24528794925659891), Train_time (0m 20s), Eval_time (0m 5s)\u001b[0m\n",
      "10\n",
      "\u001b[32m\n",
      " Epoch (10): Train_auc (0.9869543651574662), Valid_auc (0.7868324691120927) ,Training Average_loss (0.22257137741893526), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "11\n",
      "\u001b[32m\n",
      " Epoch (11): Train_auc (0.9899807395230021), Valid_auc (0.778291331469745) ,Training Average_loss (0.2062570991851389), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "12\n",
      "\u001b[32m\n",
      " Epoch (12): Train_auc (0.9918156909775728), Valid_auc (0.7754628384866527) ,Training Average_loss (0.19040890549868347), Train_time (0m 22s), Eval_time (0m 5s)\u001b[0m\n",
      "13\n",
      "\u001b[32m\n",
      " Epoch (13): Train_auc (0.993229784531738), Valid_auc (0.7726178790659297) ,Training Average_loss (0.17459200223907828), Train_time (0m 23s), Eval_time (0m 5s)\u001b[0m\n",
      "14\n",
      "\u001b[32m\n",
      " Epoch (14): Train_auc (0.9947079675610514), Valid_auc (0.7704584640334592) ,Training Average_loss (0.15849958470091224), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "15\n",
      "\u001b[32m\n",
      " Epoch (15): Train_auc (0.9959211800615063), Valid_auc (0.7688839098364737) ,Training Average_loss (0.1467575747668743), Train_time (0m 22s), Eval_time (0m 5s)\u001b[0m\n",
      "16\n",
      "\u001b[32m\n",
      " Epoch (16): Train_auc (0.996351948284842), Valid_auc (0.7698904964115257) ,Training Average_loss (0.13772847735136748), Train_time (0m 21s), Eval_time (0m 5s)\u001b[0m\n",
      "17\n",
      "\u001b[32m\n",
      " Epoch (17): Train_auc (0.9972242175144399), Valid_auc (0.7665776838870751) ,Training Average_loss (0.12883694822341205), Train_time (0m 22s), Eval_time (0m 5s)\u001b[0m\n",
      "18\n",
      "\u001b[32m\n",
      " Epoch (18): Train_auc (0.9977250404480705), Valid_auc (0.7646931836772436) ,Training Average_loss (0.11760730772837996), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "19\n",
      "\u001b[32m\n",
      " Epoch (19): Train_auc (0.9979599767860186), Valid_auc (0.7650851417962998) ,Training Average_loss (0.10946379075944425), Train_time (0m 22s), Eval_time (0m 4s)\u001b[0m\n",
      "20\n",
      "\u001b[32m\n",
      " Epoch (20): Train_auc (0.9982367337746663), Valid_auc (0.765370868275612) ,Training Average_loss (0.10537425739597531), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "21\n",
      "\u001b[32m\n",
      " Epoch (21): Train_auc (0.9985795235802226), Valid_auc (0.7648527978718108) ,Training Average_loss (0.09728910680254922), Train_time (0m 21s), Eval_time (0m 5s)\u001b[0m\n",
      "22\n",
      "\u001b[32m\n",
      " Epoch (22): Train_auc (0.9988434100300002), Valid_auc (0.7597993441876677) ,Training Average_loss (0.09007456733798608), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "23\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "I0520 21:33:03.378924 140179506145024 configuration_utils.py:283] loading configuration file pretrained_py_models/orig_45M_chkp_transcli/config.json\n",
      "I0520 21:33:03.380889 140179506145024 configuration_utils.py:321] Model config BertConfig {\n",
      "  \"attention_probs_dropout_prob\": 0.1,\n",
      "  \"hidden_act\": \"gelu\",\n",
      "  \"hidden_dropout_prob\": 0.1,\n",
      "  \"hidden_size\": 192,\n",
      "  \"initializer_range\": 0.02,\n",
      "  \"intermediate_size\": 64,\n",
      "  \"layer_norm_eps\": 1e-12,\n",
      "  \"max_position_embeddings\": 512,\n",
      "  \"model_type\": \"bert\",\n",
      "  \"num_attention_heads\": 6,\n",
      "  \"num_hidden_layers\": 6,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"type_vocab_size\": 1000,\n",
      "  \"vocab_size\": 82603\n",
      "}\n",
      "\n",
      "I0520 21:33:03.381677 140179506145024 modeling_utils.py:615] loading weights file pretrained_py_models/orig_45M_chkp_transcli/pytorch_model.bin\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32m\n",
      " Epoch (23): Train_auc (0.9989485929027963), Valid_auc (0.7634038936189886) ,Training Average_loss (0.08579230152210222), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[32mBestValidAuc 0.834894 has a TestAuc of 0.830015 at epoch 2 \u001b[0m\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "I0520 21:33:03.975311 140179506145024 modeling_utils.py:708] Weights of BertForSequenceClassification not initialized from pretrained model: ['classifier.weight', 'classifier.bias']\n",
      "I0520 21:33:03.976801 140179506145024 modeling_utils.py:714] Weights from pretrained model not used in BertForSequenceClassification: ['cls.predictions.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.transform.dense.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.decoder.weight', 'cls.predictions.decoder.bias', 'cls.seq_relationship.weight', 'cls.seq_relationship.bias']\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "\u001b[32m\n",
      " Epoch (0): Train_auc (0.8350525299439123), Valid_auc (0.819059243610809) ,Training Average_loss (0.5963910191655158), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[33m\n",
      " Test_AUC1 (0.8146679703468753) ,Test_AUC2 (0.8143491257990754) , Test_eval_time (0m 1s) \u001b[0m\n",
      "1\n",
      "\u001b[32m\n",
      " Epoch (1): Train_auc (0.8694366997728258), Valid_auc (0.8323152593730662) ,Training Average_loss (0.5031238191723824), Train_time (0m 22s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[33m\n",
      " Test_AUC1 (0.8282307458502527) ,Test_AUC2 (0.8270044908069418) , Test_eval_time (0m 1s) \u001b[0m\n",
      "2\n",
      "\u001b[32m\n",
      " Epoch (2): Train_auc (0.8966716240842965), Valid_auc (0.8316490621599129) ,Training Average_loss (0.4666782378256321), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "3\n",
      "\u001b[32m\n",
      " Epoch (3): Train_auc (0.9195386161673412), Valid_auc (0.826901713505324) ,Training Average_loss (0.4309304870665073), Train_time (0m 17s), Eval_time (0m 4s)\u001b[0m\n",
      "4\n",
      "\u001b[32m\n",
      " Epoch (4): Train_auc (0.9370681588984008), Valid_auc (0.8214501845805859) ,Training Average_loss (0.3943785701990128), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "5\n",
      "\u001b[32m\n",
      " Epoch (5): Train_auc (0.9518924802911382), Valid_auc (0.8101911244834233) ,Training Average_loss (0.3587719217538833), Train_time (0m 18s), Eval_time (0m 4s)\u001b[0m\n",
      "6\n",
      "\u001b[32m\n",
      " Epoch (6): Train_auc (0.9635172906885072), Valid_auc (0.8066050331106985) ,Training Average_loss (0.32549229577183725), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "7\n",
      "\u001b[32m\n",
      " Epoch (7): Train_auc (0.9716157662496238), Valid_auc (0.7996292384183684) ,Training Average_loss (0.29676164177060127), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "8\n",
      "\u001b[32m\n",
      " Epoch (8): Train_auc (0.9776840929170628), Valid_auc (0.7946828699258122) ,Training Average_loss (0.27151625291258097), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "9\n",
      "\u001b[32m\n",
      " Epoch (9): Train_auc (0.982130801291988), Valid_auc (0.7895925356891365) ,Training Average_loss (0.24925188742578025), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "10\n",
      "\u001b[32m\n",
      " Epoch (10): Train_auc (0.9862243231029423), Valid_auc (0.7831486460533895) ,Training Average_loss (0.2273962109386921), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "11\n",
      "\u001b[32m\n",
      " Epoch (11): Train_auc (0.9889423421748231), Valid_auc (0.7815137030635398) ,Training Average_loss (0.20689871262013912), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "12\n",
      "\u001b[32m\n",
      " Epoch (12): Train_auc (0.9906299100781765), Valid_auc (0.779588516882544) ,Training Average_loss (0.1911610957682133), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "13\n",
      "\u001b[32m\n",
      " Epoch (13): Train_auc (0.9931292347969165), Valid_auc (0.7725446514307663) ,Training Average_loss (0.177325526136905), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "14\n",
      "\u001b[32m\n",
      " Epoch (14): Train_auc (0.9944456212236206), Valid_auc (0.7663729381388301) ,Training Average_loss (0.16050589357875286), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "15\n",
      "\u001b[32m\n",
      " Epoch (15): Train_auc (0.9955623770878339), Valid_auc (0.7630711862236732) ,Training Average_loss (0.15018777099624278), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "16\n",
      "\u001b[32m\n",
      " Epoch (16): Train_auc (0.9961508528154905), Valid_auc (0.7632236874862186) ,Training Average_loss (0.1409551759250462), Train_time (0m 18s), Eval_time (0m 4s)\u001b[0m\n",
      "17\n",
      "\u001b[32m\n",
      " Epoch (17): Train_auc (0.9968579379953745), Valid_auc (0.7610351449971192) ,Training Average_loss (0.12865810389630497), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "18\n",
      "\u001b[32m\n",
      " Epoch (18): Train_auc (0.9974020416862154), Valid_auc (0.7633050594285471) ,Training Average_loss (0.11861827936861662), Train_time (0m 17s), Eval_time (0m 4s)\u001b[0m\n",
      "19\n",
      "\u001b[32m\n",
      " Epoch (19): Train_auc (0.9978211434585161), Valid_auc (0.7618915064478727) ,Training Average_loss (0.10888933372125029), Train_time (0m 18s), Eval_time (0m 4s)\u001b[0m\n",
      "20\n",
      "\u001b[32m\n",
      " Epoch (20): Train_auc (0.9981482441195935), Valid_auc (0.7620138133139862) ,Training Average_loss (0.10656937459204348), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "21\n",
      "\u001b[32m\n",
      " Epoch (21): Train_auc (0.9984367875680301), Valid_auc (0.7604112341648351) ,Training Average_loss (0.10003265821468087), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "22\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "I0520 21:42:45.312570 140179506145024 configuration_utils.py:283] loading configuration file pretrained_py_models/orig_45M_chkp_transcli/config.json\n",
      "I0520 21:42:45.314456 140179506145024 configuration_utils.py:321] Model config BertConfig {\n",
      "  \"attention_probs_dropout_prob\": 0.1,\n",
      "  \"hidden_act\": \"gelu\",\n",
      "  \"hidden_dropout_prob\": 0.1,\n",
      "  \"hidden_size\": 192,\n",
      "  \"initializer_range\": 0.02,\n",
      "  \"intermediate_size\": 64,\n",
      "  \"layer_norm_eps\": 1e-12,\n",
      "  \"max_position_embeddings\": 512,\n",
      "  \"model_type\": \"bert\",\n",
      "  \"num_attention_heads\": 6,\n",
      "  \"num_hidden_layers\": 6,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"type_vocab_size\": 1000,\n",
      "  \"vocab_size\": 82603\n",
      "}\n",
      "\n",
      "I0520 21:42:45.315254 140179506145024 modeling_utils.py:615] loading weights file pretrained_py_models/orig_45M_chkp_transcli/pytorch_model.bin\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32m\n",
      " Epoch (22): Train_auc (0.9985865488926994), Valid_auc (0.7576536571139989) ,Training Average_loss (0.09320290236780421), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[32mBestValidAuc 0.832315 has a TestAuc of 0.828231 at epoch 1 \u001b[0m\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "I0520 21:42:45.903815 140179506145024 modeling_utils.py:708] Weights of BertForSequenceClassification not initialized from pretrained model: ['classifier.weight', 'classifier.bias']\n",
      "I0520 21:42:45.904995 140179506145024 modeling_utils.py:714] Weights from pretrained model not used in BertForSequenceClassification: ['cls.predictions.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.transform.dense.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.decoder.weight', 'cls.predictions.decoder.bias', 'cls.seq_relationship.weight', 'cls.seq_relationship.bias']\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "\u001b[32m\n",
      " Epoch (0): Train_auc (0.8330959640179064), Valid_auc (0.8176760272852073) ,Training Average_loss (0.5994077806472777), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[33m\n",
      " Test_AUC1 (0.8140763702522191) ,Test_AUC2 (0.8134981493241707) , Test_eval_time (0m 1s) \u001b[0m\n",
      "1\n",
      "\u001b[32m\n",
      " Epoch (1): Train_auc (0.8656440647107173), Valid_auc (0.8324247629615404) ,Training Average_loss (0.507281952381134), Train_time (0m 19s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[33m\n",
      " Test_AUC1 (0.8275206301810785) ,Test_AUC2 (0.8253920595902737) , Test_eval_time (0m 1s) \u001b[0m\n",
      "2\n",
      "\u001b[32m\n",
      " Epoch (2): Train_auc (0.8916863324212285), Valid_auc (0.8335593467483231) ,Training Average_loss (0.47075086289644247), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[33m\n",
      " Test_AUC1 (0.8290833237644428) ,Test_AUC2 (0.8260134316200362) , Test_eval_time (0m 1s) \u001b[0m\n",
      "3\n",
      "\u001b[32m\n",
      " Epoch (3): Train_auc (0.9170662550155178), Valid_auc (0.8287508624430076) ,Training Average_loss (0.43584743118286134), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "4\n",
      "\u001b[32m\n",
      " Epoch (4): Train_auc (0.937776579375691), Valid_auc (0.8214416134974998) ,Training Average_loss (0.39769813296198847), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "5\n",
      "\u001b[32m\n",
      " Epoch (5): Train_auc (0.9510341208762223), Valid_auc (0.8159179238774015) ,Training Average_loss (0.36138463993370534), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "6\n",
      "\u001b[32m\n",
      " Epoch (6): Train_auc (0.963480349593758), Valid_auc (0.8091194190157125) ,Training Average_loss (0.32917658248543735), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "7\n",
      "\u001b[32m\n",
      " Epoch (7): Train_auc (0.9695190396992928), Valid_auc (0.8044706200342842) ,Training Average_loss (0.3006113824397325), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "8\n",
      "\u001b[32m\n",
      " Epoch (8): Train_auc (0.9782372276666941), Valid_auc (0.7964083961049584) ,Training Average_loss (0.2722775126248598), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "9\n",
      "\u001b[32m\n",
      " Epoch (9): Train_auc (0.9826388511528029), Valid_auc (0.7908370854049748) ,Training Average_loss (0.24865016180276872), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "10\n",
      "\u001b[32m\n",
      " Epoch (10): Train_auc (0.9867563363118163), Valid_auc (0.7835418489355497) ,Training Average_loss (0.2293191659152508), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "11\n",
      "\u001b[32m\n",
      " Epoch (11): Train_auc (0.9882595659682107), Valid_auc (0.7891981236085326) ,Training Average_loss (0.20816778076440096), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "12\n",
      "\u001b[32m\n",
      " Epoch (12): Train_auc (0.9918996731038338), Valid_auc (0.7809843230978242) ,Training Average_loss (0.19138961584120984), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "13\n",
      "\u001b[32m\n",
      " Epoch (13): Train_auc (0.9932352425298838), Valid_auc (0.7741422870921624) ,Training Average_loss (0.1746600242517888), Train_time (0m 22s), Eval_time (0m 4s)\u001b[0m\n",
      "14\n",
      "\u001b[32m\n",
      " Epoch (14): Train_auc (0.994771036161729), Valid_auc (0.7735132905134825) ,Training Average_loss (0.16222583230212334), Train_time (0m 22s), Eval_time (0m 4s)\u001b[0m\n",
      "15\n",
      "\u001b[32m\n",
      " Epoch (15): Train_auc (0.9956953699892936), Valid_auc (0.7692102155929695) ,Training Average_loss (0.15105213383026422), Train_time (0m 22s), Eval_time (0m 4s)\u001b[0m\n",
      "16\n",
      "\u001b[32m\n",
      " Epoch (16): Train_auc (0.9964719682399615), Valid_auc (0.7659791662221084) ,Training Average_loss (0.1404548904635012), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "17\n",
      "\u001b[32m\n",
      " Epoch (17): Train_auc (0.9971518610362372), Valid_auc (0.766681212612651) ,Training Average_loss (0.12876540729030972), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "18\n",
      "\u001b[32m\n",
      " Epoch (18): Train_auc (0.9973584537065944), Valid_auc (0.7700628427544117) ,Training Average_loss (0.11865247628930956), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "19\n",
      "\u001b[32m\n",
      " Epoch (19): Train_auc (0.998082349712781), Valid_auc (0.7665789642148391) ,Training Average_loss (0.11364776728115977), Train_time (0m 22s), Eval_time (0m 4s)\u001b[0m\n",
      "20\n",
      "\u001b[32m\n",
      " Epoch (20): Train_auc (0.998277660760175), Valid_auc (0.7681676020172276) ,Training Average_loss (0.10403350455872712), Train_time (0m 23s), Eval_time (0m 4s)\u001b[0m\n",
      "21\n",
      "\u001b[32m\n",
      " Epoch (21): Train_auc (0.9985118442432046), Valid_auc (0.7686160368165361) ,Training Average_loss (0.09922537684487179), Train_time (0m 23s), Eval_time (0m 4s)\u001b[0m\n",
      "22\n",
      "\u001b[32m\n",
      " Epoch (22): Train_auc (0.9987694206326807), Valid_auc (0.7669630269793511) ,Training Average_loss (0.09348245119350031), Train_time (0m 23s), Eval_time (0m 4s)\u001b[0m\n",
      "23\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "I0520 21:53:26.369896 140179506145024 configuration_utils.py:283] loading configuration file pretrained_py_models/orig_45M_chkp_transcli/config.json\n",
      "I0520 21:53:26.372846 140179506145024 configuration_utils.py:321] Model config BertConfig {\n",
      "  \"attention_probs_dropout_prob\": 0.1,\n",
      "  \"hidden_act\": \"gelu\",\n",
      "  \"hidden_dropout_prob\": 0.1,\n",
      "  \"hidden_size\": 192,\n",
      "  \"initializer_range\": 0.02,\n",
      "  \"intermediate_size\": 64,\n",
      "  \"layer_norm_eps\": 1e-12,\n",
      "  \"max_position_embeddings\": 512,\n",
      "  \"model_type\": \"bert\",\n",
      "  \"num_attention_heads\": 6,\n",
      "  \"num_hidden_layers\": 6,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"type_vocab_size\": 1000,\n",
      "  \"vocab_size\": 82603\n",
      "}\n",
      "\n",
      "I0520 21:53:26.373793 140179506145024 modeling_utils.py:615] loading weights file pretrained_py_models/orig_45M_chkp_transcli/pytorch_model.bin\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32m\n",
      " Epoch (23): Train_auc (0.9989646444737132), Valid_auc (0.7651444636493608) ,Training Average_loss (0.08868849254818632), Train_time (0m 23s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[32mBestValidAuc 0.833559 has a TestAuc of 0.829083 at epoch 2 \u001b[0m\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "I0520 21:53:26.927020 140179506145024 modeling_utils.py:708] Weights of BertForSequenceClassification not initialized from pretrained model: ['classifier.weight', 'classifier.bias']\n",
      "I0520 21:53:26.928144 140179506145024 modeling_utils.py:714] Weights from pretrained model not used in BertForSequenceClassification: ['cls.predictions.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.transform.dense.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.decoder.weight', 'cls.predictions.decoder.bias', 'cls.seq_relationship.weight', 'cls.seq_relationship.bias']\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "\u001b[32m\n",
      " Epoch (0): Train_auc (0.8348611087802745), Valid_auc (0.8182579006892431) ,Training Average_loss (0.597093949317932), Train_time (0m 22s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[33m\n",
      " Test_AUC1 (0.8136349657371501) ,Test_AUC2 (0.8130792612144971) , Test_eval_time (0m 1s) \u001b[0m\n",
      "1\n",
      "\u001b[32m\n",
      " Epoch (1): Train_auc (0.8685117242984548), Valid_auc (0.8311971420239137) ,Training Average_loss (0.5056465235352516), Train_time (0m 22s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[33m\n",
      " Test_AUC1 (0.8283004080836209) ,Test_AUC2 (0.8263982390699084) , Test_eval_time (0m 1s) \u001b[0m\n",
      "2\n",
      "\u001b[32m\n",
      " Epoch (2): Train_auc (0.8954337473846572), Valid_auc (0.8325071307143518) ,Training Average_loss (0.4667284547388554), Train_time (0m 22s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[33m\n",
      " Test_AUC1 (0.8298485772202169) ,Test_AUC2 (0.8264550401695777) , Test_eval_time (0m 1s) \u001b[0m\n",
      "3\n",
      "\u001b[32m\n",
      " Epoch (3): Train_auc (0.91975767294692), Valid_auc (0.8284334122868788) ,Training Average_loss (0.4315815242826938), Train_time (0m 22s), Eval_time (0m 4s)\u001b[0m\n",
      "4\n",
      "\u001b[32m\n",
      " Epoch (4): Train_auc (0.9374261266111431), Valid_auc (0.8234825626471489) ,Training Average_loss (0.39587852439284327), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "5\n",
      "\u001b[32m\n",
      " Epoch (5): Train_auc (0.9528420735613085), Valid_auc (0.8122684562803633) ,Training Average_loss (0.35806987321376804), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "6\n",
      "\u001b[32m\n",
      " Epoch (6): Train_auc (0.9637733541676212), Valid_auc (0.8034232763587479) ,Training Average_loss (0.32763032074272647), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "7\n",
      "\u001b[32m\n",
      " Epoch (7): Train_auc (0.9720252209181953), Valid_auc (0.7965276088456422) ,Training Average_loss (0.297490239828825), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "8\n",
      "\u001b[32m\n",
      " Epoch (8): Train_auc (0.9777612713470132), Valid_auc (0.7944342018223332) ,Training Average_loss (0.2706149638444185), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "9\n",
      "\u001b[32m\n",
      " Epoch (9): Train_auc (0.9828167121272628), Valid_auc (0.7830854832170369) ,Training Average_loss (0.24657302077859636), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "10\n",
      "\u001b[32m\n",
      " Epoch (10): Train_auc (0.9866986617046151), Valid_auc (0.7801031730789749) ,Training Average_loss (0.2250226450115442), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "11\n",
      "\u001b[32m\n",
      " Epoch (11): Train_auc (0.989404530290152), Valid_auc (0.7743410935421691) ,Training Average_loss (0.20852662071585656), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "12\n",
      "\u001b[32m\n",
      " Epoch (12): Train_auc (0.9914227903165699), Valid_auc (0.7730668828998001) ,Training Average_loss (0.19056722608953713), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "13\n",
      "\u001b[32m\n",
      " Epoch (13): Train_auc (0.9930849019629605), Valid_auc (0.7678910512202235) ,Training Average_loss (0.17429955976828929), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "14\n",
      "\u001b[32m\n",
      " Epoch (14): Train_auc (0.9945137293919206), Valid_auc (0.7625339109034134) ,Training Average_loss (0.15880397664196788), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "15\n",
      "\u001b[32m\n",
      " Epoch (15): Train_auc (0.9954708688125615), Valid_auc (0.7663237166492399) ,Training Average_loss (0.14715070457383989), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "16\n",
      "\u001b[32m\n",
      " Epoch (16): Train_auc (0.9963654220677166), Valid_auc (0.7662012319598261) ,Training Average_loss (0.13855120271816848), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "17\n",
      "\u001b[32m\n",
      " Epoch (17): Train_auc (0.9970565468833368), Valid_auc (0.758377113429927) ,Training Average_loss (0.12851998869981618), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "18\n",
      "\u001b[32m\n",
      " Epoch (18): Train_auc (0.997502085784152), Valid_auc (0.7584092638826649) ,Training Average_loss (0.11905571860913186), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "19\n",
      "\u001b[32m\n",
      " Epoch (19): Train_auc (0.9979909638464379), Valid_auc (0.76300457361529) ,Training Average_loss (0.11057049933169036), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "20\n",
      "\u001b[32m\n",
      " Epoch (20): Train_auc (0.9983376787383186), Valid_auc (0.7624321248461828) ,Training Average_loss (0.10482882055686789), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "21\n",
      "\u001b[32m\n",
      " Epoch (21): Train_auc (0.9985020931318882), Valid_auc (0.7612684847320914) ,Training Average_loss (0.09747830725181847), Train_time (0m 19s), Eval_time (0m 4s)\u001b[0m\n",
      "22\n",
      "\u001b[32m\n",
      " Epoch (22): Train_auc (0.9986791044443677), Valid_auc (0.7625903164543457) ,Training Average_loss (0.09287868666090074), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "23\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "I0520 22:03:30.106102 140179506145024 configuration_utils.py:283] loading configuration file pretrained_py_models/orig_45M_chkp_transcli/config.json\n",
      "I0520 22:03:30.107589 140179506145024 configuration_utils.py:321] Model config BertConfig {\n",
      "  \"attention_probs_dropout_prob\": 0.1,\n",
      "  \"hidden_act\": \"gelu\",\n",
      "  \"hidden_dropout_prob\": 0.1,\n",
      "  \"hidden_size\": 192,\n",
      "  \"initializer_range\": 0.02,\n",
      "  \"intermediate_size\": 64,\n",
      "  \"layer_norm_eps\": 1e-12,\n",
      "  \"max_position_embeddings\": 512,\n",
      "  \"model_type\": \"bert\",\n",
      "  \"num_attention_heads\": 6,\n",
      "  \"num_hidden_layers\": 6,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"type_vocab_size\": 1000,\n",
      "  \"vocab_size\": 82603\n",
      "}\n",
      "\n",
      "I0520 22:03:30.108331 140179506145024 modeling_utils.py:615] loading weights file pretrained_py_models/orig_45M_chkp_transcli/pytorch_model.bin\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32m\n",
      " Epoch (23): Train_auc (0.9988704656036285), Valid_auc (0.7587428603944832) ,Training Average_loss (0.08895108831021935), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[32mBestValidAuc 0.832507 has a TestAuc of 0.829849 at epoch 2 \u001b[0m\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "I0520 22:03:30.662605 140179506145024 modeling_utils.py:708] Weights of BertForSequenceClassification not initialized from pretrained model: ['classifier.weight', 'classifier.bias']\n",
      "I0520 22:03:30.663679 140179506145024 modeling_utils.py:714] Weights from pretrained model not used in BertForSequenceClassification: ['cls.predictions.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.transform.dense.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.decoder.weight', 'cls.predictions.decoder.bias', 'cls.seq_relationship.weight', 'cls.seq_relationship.bias']\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "\u001b[32m\n",
      " Epoch (0): Train_auc (0.8379883161012923), Valid_auc (0.8203654980119354) ,Training Average_loss (0.5848884753584861), Train_time (0m 15s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[33m\n",
      " Test_AUC1 (0.8146202192281239) ,Test_AUC2 (0.8131664629027219) , Test_eval_time (0m 1s) \u001b[0m\n",
      "1\n",
      "\u001b[32m\n",
      " Epoch (1): Train_auc (0.8683454193669756), Valid_auc (0.8311485962628655) ,Training Average_loss (0.5035987617969513), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[33m\n",
      " Test_AUC1 (0.8265046300185186) ,Test_AUC2 (0.822951132333922) , Test_eval_time (0m 1s) \u001b[0m\n",
      "2\n",
      "\u001b[32m\n",
      " Epoch (2): Train_auc (0.8949668453254593), Valid_auc (0.8327145438121047) ,Training Average_loss (0.4661092875003814), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[33m\n",
      " Test_AUC1 (0.8288906037336077) ,Test_AUC2 (0.8251059740516576) , Test_eval_time (0m 1s) \u001b[0m\n",
      "3\n",
      "\u001b[32m\n",
      " Epoch (3): Train_auc (0.9174870545116629), Valid_auc (0.8298718249649688) ,Training Average_loss (0.4338305563330651), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "4\n",
      "\u001b[32m\n",
      " Epoch (4): Train_auc (0.9363194874849174), Valid_auc (0.8217563607394605) ,Training Average_loss (0.3976950737833976), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "5\n",
      "\u001b[32m\n",
      " Epoch (5): Train_auc (0.9506303082192196), Valid_auc (0.8147419783909124) ,Training Average_loss (0.36166959789395325), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "6\n",
      "\u001b[32m\n",
      " Epoch (6): Train_auc (0.9618916841050588), Valid_auc (0.8069873532068654) ,Training Average_loss (0.32836607035994525), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "7\n",
      "\u001b[32m\n",
      " Epoch (7): Train_auc (0.9700020845360611), Valid_auc (0.8018536300848573) ,Training Average_loss (0.2980728425979614), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "8\n",
      "\u001b[32m\n",
      " Epoch (8): Train_auc (0.9771709866874001), Valid_auc (0.7941569397321271) ,Training Average_loss (0.270300386980176), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "9\n",
      "\u001b[32m\n",
      " Epoch (9): Train_auc (0.9818894532863268), Valid_auc (0.7886089238845145) ,Training Average_loss (0.24550208356976508), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "10\n",
      "\u001b[32m\n",
      " Epoch (10): Train_auc (0.9858088415947295), Valid_auc (0.7824436122313979) ,Training Average_loss (0.22595634550601246), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "11\n",
      "\u001b[32m\n",
      " Epoch (11): Train_auc (0.9883249771397724), Valid_auc (0.7829421932014595) ,Training Average_loss (0.20631757616251709), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "12\n",
      "\u001b[32m\n",
      " Epoch (12): Train_auc (0.9910930790650573), Valid_auc (0.7783597934404541) ,Training Average_loss (0.19226312931627035), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "13\n",
      "\u001b[32m\n",
      " Epoch (13): Train_auc (0.9926125867088851), Valid_auc (0.7770091187788518) ,Training Average_loss (0.17604410395026207), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "14\n",
      "\u001b[32m\n",
      " Epoch (14): Train_auc (0.9941632166229957), Valid_auc (0.7757929496617801) ,Training Average_loss (0.1612446140535176), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "15\n",
      "\u001b[32m\n",
      " Epoch (15): Train_auc (0.9953379951197976), Valid_auc (0.7689508425267979) ,Training Average_loss (0.14874578392505644), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "16\n",
      "\u001b[32m\n",
      " Epoch (16): Train_auc (0.9961461156699289), Valid_auc (0.7676480378977018) ,Training Average_loss (0.1366737064700574), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "17\n",
      "\u001b[32m\n",
      " Epoch (17): Train_auc (0.9968445586193866), Valid_auc (0.7657213935656416) ,Training Average_loss (0.12670445405133068), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "18\n",
      "\u001b[32m\n",
      " Epoch (18): Train_auc (0.997276780548766), Valid_auc (0.7684606548165219) ,Training Average_loss (0.11955429209675641), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "19\n",
      "\u001b[32m\n",
      " Epoch (19): Train_auc (0.9979386544306126), Valid_auc (0.7667272688474915) ,Training Average_loss (0.10947810021974146), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "20\n",
      "\u001b[32m\n",
      " Epoch (20): Train_auc (0.998338495597906), Valid_auc (0.7639870829154487) ,Training Average_loss (0.10398897944949567), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "21\n",
      "\u001b[32m\n",
      " Epoch (21): Train_auc (0.9986008163334692), Valid_auc (0.7620338362176272) ,Training Average_loss (0.0963604437271133), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "22\n",
      "\u001b[32m\n",
      " Epoch (22): Train_auc (0.9988368783535324), Valid_auc (0.7613612373656545) ,Training Average_loss (0.09066359709575772), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "23\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "I0520 22:11:48.597309 140179506145024 configuration_utils.py:283] loading configuration file pretrained_py_models/orig_45M_chkp_transcli/config.json\n",
      "I0520 22:11:48.598996 140179506145024 configuration_utils.py:321] Model config BertConfig {\n",
      "  \"attention_probs_dropout_prob\": 0.1,\n",
      "  \"hidden_act\": \"gelu\",\n",
      "  \"hidden_dropout_prob\": 0.1,\n",
      "  \"hidden_size\": 192,\n",
      "  \"initializer_range\": 0.02,\n",
      "  \"intermediate_size\": 64,\n",
      "  \"layer_norm_eps\": 1e-12,\n",
      "  \"max_position_embeddings\": 512,\n",
      "  \"model_type\": \"bert\",\n",
      "  \"num_attention_heads\": 6,\n",
      "  \"num_hidden_layers\": 6,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"type_vocab_size\": 1000,\n",
      "  \"vocab_size\": 82603\n",
      "}\n",
      "\n",
      "I0520 22:11:48.599742 140179506145024 modeling_utils.py:615] loading weights file pretrained_py_models/orig_45M_chkp_transcli/pytorch_model.bin\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32m\n",
      " Epoch (23): Train_auc (0.9989565950865331), Valid_auc (0.7632044470050999) ,Training Average_loss (0.08301712638000026), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[32mBestValidAuc 0.832715 has a TestAuc of 0.828891 at epoch 2 \u001b[0m\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "I0520 22:11:49.154232 140179506145024 modeling_utils.py:708] Weights of BertForSequenceClassification not initialized from pretrained model: ['classifier.weight', 'classifier.bias']\n",
      "I0520 22:11:49.155282 140179506145024 modeling_utils.py:714] Weights from pretrained model not used in BertForSequenceClassification: ['cls.predictions.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.transform.dense.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.decoder.weight', 'cls.predictions.decoder.bias', 'cls.seq_relationship.weight', 'cls.seq_relationship.bias']\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "\u001b[32m\n",
      " Epoch (0): Train_auc (0.838662646091777), Valid_auc (0.8208756019318723) ,Training Average_loss (0.5861842641830445), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[33m\n",
      " Test_AUC1 (0.8158333927555651) ,Test_AUC2 (0.8144028068383404) , Test_eval_time (0m 1s) \u001b[0m\n",
      "1\n",
      "\u001b[32m\n",
      " Epoch (1): Train_auc (0.8706608994747661), Valid_auc (0.8324763317186977) ,Training Average_loss (0.5030413975119591), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[33m\n",
      " Test_AUC1 (0.8276131368625465) ,Test_AUC2 (0.8246087644256792) , Test_eval_time (0m 1s) \u001b[0m\n",
      "2\n",
      "\u001b[32m\n",
      " Epoch (2): Train_auc (0.8970667649087132), Valid_auc (0.833283720632482) ,Training Average_loss (0.46614590415358537), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[33m\n",
      " Test_AUC1 (0.8274166657199998) ,Test_AUC2 (0.824605084354433) , Test_eval_time (0m 1s) \u001b[0m\n",
      "3\n",
      "\u001b[32m\n",
      " Epoch (3): Train_auc (0.9198462946116224), Valid_auc (0.8288082993690828) ,Training Average_loss (0.4292477672100067), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "4\n",
      "\u001b[32m\n",
      " Epoch (4): Train_auc (0.9382188028346542), Valid_auc (0.8228979507642845) ,Training Average_loss (0.3939723122119904), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "5\n",
      "\u001b[32m\n",
      " Epoch (5): Train_auc (0.9517522580623193), Valid_auc (0.8151016437985903) ,Training Average_loss (0.3560449776053429), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "6\n",
      "\u001b[32m\n",
      " Epoch (6): Train_auc (0.9624197330247488), Valid_auc (0.809851268591426) ,Training Average_loss (0.32559606802463537), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "7\n",
      "\u001b[32m\n",
      " Epoch (7): Train_auc (0.9703212230163196), Valid_auc (0.8024426164209149) ,Training Average_loss (0.29823442666232586), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "8\n",
      "\u001b[32m\n",
      " Epoch (8): Train_auc (0.9762930802465831), Valid_auc (0.7970123195982616) ,Training Average_loss (0.2698057988286019), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "9\n",
      "\u001b[32m\n",
      " Epoch (9): Train_auc (0.9809717719440424), Valid_auc (0.7940814715233766) ,Training Average_loss (0.24941714996844533), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "10\n",
      "\u001b[32m\n",
      " Epoch (10): Train_auc (0.9849449505762985), Valid_auc (0.7887734104375164) ,Training Average_loss (0.22682784565538167), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "11\n",
      "\u001b[32m\n",
      " Epoch (11): Train_auc (0.9878128845840543), Valid_auc (0.7803795460526783) ,Training Average_loss (0.21040679143369195), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "12\n",
      "\u001b[32m\n",
      " Epoch (12): Train_auc (0.9901539081551689), Valid_auc (0.7802888561694017) ,Training Average_loss (0.19341388042643667), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "13\n",
      "\u001b[32m\n",
      " Epoch (13): Train_auc (0.9919963017526231), Valid_auc (0.7757130358705162) ,Training Average_loss (0.17955237048864364), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "14\n",
      "\u001b[32m\n",
      " Epoch (14): Train_auc (0.9934169749867577), Valid_auc (0.7724266478885261) ,Training Average_loss (0.16403307732194666), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "15\n",
      "\u001b[32m\n",
      " Epoch (15): Train_auc (0.994696545927875), Valid_auc (0.7692331547987394) ,Training Average_loss (0.15237439526617527), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "16\n",
      "\u001b[32m\n",
      " Epoch (16): Train_auc (0.9955884917928284), Valid_auc (0.769087517515595) ,Training Average_loss (0.141953891415149), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "17\n",
      "\u001b[32m\n",
      " Epoch (17): Train_auc (0.9964916288741499), Valid_auc (0.7680164877764263) ,Training Average_loss (0.1323879230795428), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "18\n",
      "\u001b[32m\n",
      " Epoch (18): Train_auc (0.9970166583735816), Valid_auc (0.7680471089487799) ,Training Average_loss (0.12208616754971442), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "19\n",
      "\u001b[32m\n",
      " Epoch (19): Train_auc (0.9974835668332463), Valid_auc (0.7684243077338909) ,Training Average_loss (0.11626626969035714), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "20\n",
      "\u001b[32m\n",
      " Epoch (20): Train_auc (0.9979436755968925), Valid_auc (0.7618757868681049) ,Training Average_loss (0.10576773128705097), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "21\n",
      "\u001b[32m\n",
      " Epoch (21): Train_auc (0.9982044474194669), Valid_auc (0.7653839205058717) ,Training Average_loss (0.10096149150794374), Train_time (0m 15s), Eval_time (0m 4s)\u001b[0m\n",
      "22\n",
      "\u001b[32m\n",
      " Epoch (22): Train_auc (0.99854644756742), Valid_auc (0.7609992246904096) ,Training Average_loss (0.09424494258314371), Train_time (0m 16s), Eval_time (0m 4s)\u001b[0m\n",
      "23\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "I0520 22:20:55.465837 140179506145024 configuration_utils.py:283] loading configuration file pretrained_py_models/orig_45M_chkp_transcli/config.json\n",
      "I0520 22:20:55.467652 140179506145024 configuration_utils.py:321] Model config BertConfig {\n",
      "  \"attention_probs_dropout_prob\": 0.1,\n",
      "  \"hidden_act\": \"gelu\",\n",
      "  \"hidden_dropout_prob\": 0.1,\n",
      "  \"hidden_size\": 192,\n",
      "  \"initializer_range\": 0.02,\n",
      "  \"intermediate_size\": 64,\n",
      "  \"layer_norm_eps\": 1e-12,\n",
      "  \"max_position_embeddings\": 512,\n",
      "  \"model_type\": \"bert\",\n",
      "  \"num_attention_heads\": 6,\n",
      "  \"num_hidden_layers\": 6,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"type_vocab_size\": 1000,\n",
      "  \"vocab_size\": 82603\n",
      "}\n",
      "\n",
      "I0520 22:20:55.468709 140179506145024 modeling_utils.py:615] loading weights file pretrained_py_models/orig_45M_chkp_transcli/pytorch_model.bin\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32m\n",
      " Epoch (23): Train_auc (0.998796325795337), Valid_auc (0.7590093819573367) ,Training Average_loss (0.08781277851620689), Train_time (0m 17s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[32mBestValidAuc 0.833284 has a TestAuc of 0.827417 at epoch 2 \u001b[0m\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "I0520 22:20:56.063505 140179506145024 modeling_utils.py:708] Weights of BertForSequenceClassification not initialized from pretrained model: ['classifier.weight', 'classifier.bias']\n",
      "I0520 22:20:56.064536 140179506145024 modeling_utils.py:714] Weights from pretrained model not used in BertForSequenceClassification: ['cls.predictions.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.transform.dense.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.decoder.weight', 'cls.predictions.decoder.bias', 'cls.seq_relationship.weight', 'cls.seq_relationship.bias']\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "\u001b[32m\n",
      " Epoch (0): Train_auc (0.8343471704899152), Valid_auc (0.81815529664483) ,Training Average_loss (0.6024545248150825), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[33m\n",
      " Test_AUC1 (0.8123951788721175) ,Test_AUC2 (0.8119798799304754) , Test_eval_time (0m 1s) \u001b[0m\n",
      "1\n",
      "\u001b[32m\n",
      " Epoch (1): Train_auc (0.8674745446391027), Valid_auc (0.8327873802360071) ,Training Average_loss (0.5085749540925025), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[33m\n",
      " Test_AUC1 (0.8288468615043867) ,Test_AUC2 (0.827998750055801) , Test_eval_time (0m 1s) \u001b[0m\n",
      "2\n",
      "\u001b[32m\n",
      " Epoch (2): Train_auc (0.8918132864821717), Valid_auc (0.8338979578772165) ,Training Average_loss (0.4682834231257438), Train_time (0m 20s), Eval_time (0m 5s)\u001b[0m\n",
      "\u001b[33m\n",
      " Test_AUC1 (0.8304875817669021) ,Test_AUC2 (0.8298523059406429) , Test_eval_time (0m 1s) \u001b[0m\n",
      "3\n",
      "\u001b[32m\n",
      " Epoch (3): Train_auc (0.9163713387233201), Valid_auc (0.82762125770864) ,Training Average_loss (0.43542053022980687), Train_time (0m 21s), Eval_time (0m 5s)\u001b[0m\n",
      "4\n",
      "\u001b[32m\n",
      " Epoch (4): Train_auc (0.9368908819665451), Valid_auc (0.8219447823087154) ,Training Average_loss (0.39808391124010084), Train_time (0m 21s), Eval_time (0m 5s)\u001b[0m\n",
      "5\n",
      "\u001b[32m\n",
      " Epoch (5): Train_auc (0.9512577835917913), Valid_auc (0.8134022932092838) ,Training Average_loss (0.3628966775536536), Train_time (0m 22s), Eval_time (0m 5s)\u001b[0m\n",
      "6\n",
      "\u001b[32m\n",
      " Epoch (6): Train_auc (0.9621829989556613), Valid_auc (0.8063179551743024) ,Training Average_loss (0.3299170748442412), Train_time (0m 23s), Eval_time (0m 4s)\u001b[0m\n",
      "7\n",
      "\u001b[32m\n",
      " Epoch (7): Train_auc (0.9705280397030178), Valid_auc (0.7977204831103429) ,Training Average_loss (0.3007042882740497), Train_time (0m 23s), Eval_time (0m 4s)\u001b[0m\n",
      "8\n",
      "\u001b[32m\n",
      " Epoch (8): Train_auc (0.977515687032325), Valid_auc (0.7891623455604636) ,Training Average_loss (0.2717801353484392), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "9\n",
      "\u001b[32m\n",
      " Epoch (9): Train_auc (0.9822797801595792), Valid_auc (0.7866708988612194) ,Training Average_loss (0.2502219793125987), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "10\n",
      "\u001b[32m\n",
      " Epoch (10): Train_auc (0.9864556943808293), Valid_auc (0.7770892103934162) ,Training Average_loss (0.22598991046845907), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "11\n",
      "\u001b[32m\n",
      " Epoch (11): Train_auc (0.9892354139535886), Valid_auc (0.777309640156769) ,Training Average_loss (0.20672753287106752), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "12\n",
      "\u001b[32m\n",
      " Epoch (12): Train_auc (0.9912570910292708), Valid_auc (0.7760757954036233) ,Training Average_loss (0.1874169176854193), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "13\n",
      "\u001b[32m\n",
      " Epoch (13): Train_auc (0.9929694023375862), Valid_auc (0.7643399910377056) ,Training Average_loss (0.17387282488495115), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "14\n",
      "\u001b[32m\n",
      " Epoch (14): Train_auc (0.9944476805738444), Valid_auc (0.7644427373407592) ,Training Average_loss (0.16071011853590608), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "15\n",
      "\u001b[32m\n",
      " Epoch (15): Train_auc (0.9955180674555703), Valid_auc (0.7592727027007803) ,Training Average_loss (0.1473411712963134), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "16\n",
      "\u001b[32m\n",
      " Epoch (16): Train_auc (0.996294925725206), Valid_auc (0.761544573188514) ,Training Average_loss (0.13911794411949813), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "17\n",
      "\u001b[32m\n",
      " Epoch (17): Train_auc (0.9967614565573237), Valid_auc (0.7605727688510481) ,Training Average_loss (0.12489033639244734), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "18\n",
      "\u001b[32m\n",
      " Epoch (18): Train_auc (0.9973182635748365), Valid_auc (0.7627613824694678) ,Training Average_loss (0.11840777126373721), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "19\n",
      "\u001b[32m\n",
      " Epoch (19): Train_auc (0.9977869457638899), Valid_auc (0.7605655847897062) ,Training Average_loss (0.11049831810127944), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "20\n",
      "\u001b[32m\n",
      " Epoch (20): Train_auc (0.9979887380840747), Valid_auc (0.7629705738002261) ,Training Average_loss (0.10070413832366468), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "21\n",
      "\u001b[32m\n",
      " Epoch (21): Train_auc (0.9983302829988199), Valid_auc (0.7621856973162907) ,Training Average_loss (0.09605863199569283), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "22\n",
      "\u001b[32m\n",
      " Epoch (22): Train_auc (0.9985733295283837), Valid_auc (0.7594084885730747) ,Training Average_loss (0.08936428741645067), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "23\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "I0520 22:31:19.639662 140179506145024 configuration_utils.py:283] loading configuration file pretrained_py_models/orig_45M_chkp_transcli/config.json\n",
      "I0520 22:31:19.641436 140179506145024 configuration_utils.py:321] Model config BertConfig {\n",
      "  \"attention_probs_dropout_prob\": 0.1,\n",
      "  \"hidden_act\": \"gelu\",\n",
      "  \"hidden_dropout_prob\": 0.1,\n",
      "  \"hidden_size\": 192,\n",
      "  \"initializer_range\": 0.02,\n",
      "  \"intermediate_size\": 64,\n",
      "  \"layer_norm_eps\": 1e-12,\n",
      "  \"max_position_embeddings\": 512,\n",
      "  \"model_type\": \"bert\",\n",
      "  \"num_attention_heads\": 6,\n",
      "  \"num_hidden_layers\": 6,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"type_vocab_size\": 1000,\n",
      "  \"vocab_size\": 82603\n",
      "}\n",
      "\n",
      "I0520 22:31:19.642180 140179506145024 modeling_utils.py:615] loading weights file pretrained_py_models/orig_45M_chkp_transcli/pytorch_model.bin\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32m\n",
      " Epoch (23): Train_auc (0.9987877571702797), Valid_auc (0.7511952570969279) ,Training Average_loss (0.08451054384559392), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[32mBestValidAuc 0.833898 has a TestAuc of 0.830488 at epoch 2 \u001b[0m\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "I0520 22:31:20.218342 140179506145024 modeling_utils.py:708] Weights of BertForSequenceClassification not initialized from pretrained model: ['classifier.weight', 'classifier.bias']\n",
      "I0520 22:31:20.219385 140179506145024 modeling_utils.py:714] Weights from pretrained model not used in BertForSequenceClassification: ['cls.predictions.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.transform.dense.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.decoder.weight', 'cls.predictions.decoder.bias', 'cls.seq_relationship.weight', 'cls.seq_relationship.bias']\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "\u001b[32m\n",
      " Epoch (0): Train_auc (0.8339869994164438), Valid_auc (0.8210636322898661) ,Training Average_loss (0.5941779354810715), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[33m\n",
      " Test_AUC1 (0.812851623389593) ,Test_AUC2 (0.8121201226455744) , Test_eval_time (0m 1s) \u001b[0m\n",
      "1\n",
      "\u001b[32m\n",
      " Epoch (1): Train_auc (0.8660068647759662), Valid_auc (0.8330637887743708) ,Training Average_loss (0.5046506046652794), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[33m\n",
      " Test_AUC1 (0.826829127848216) ,Test_AUC2 (0.8243316390605321) , Test_eval_time (0m 1s) \u001b[0m\n",
      "2\n",
      "\u001b[32m\n",
      " Epoch (2): Train_auc (0.8916466855290988), Valid_auc (0.835479980652825) ,Training Average_loss (0.47032388734817504), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[33m\n",
      " Test_AUC1 (0.8287144437054221) ,Test_AUC2 (0.8253458986965988) , Test_eval_time (0m 1s) \u001b[0m\n",
      "3\n",
      "\u001b[32m\n",
      " Epoch (3): Train_auc (0.9155322487140534), Valid_auc (0.830075005868169) ,Training Average_loss (0.43308531907200803), Train_time (0m 22s), Eval_time (0m 4s)\u001b[0m\n",
      "4\n",
      "\u001b[32m\n",
      " Epoch (4): Train_auc (0.9368418527900039), Valid_auc (0.8249936694905007) ,Training Average_loss (0.39565566325187684), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "5\n",
      "\u001b[32m\n",
      " Epoch (5): Train_auc (0.9532959882731156), Valid_auc (0.8130414541678225) ,Training Average_loss (0.35596134448051453), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "6\n",
      "\u001b[32m\n",
      " Epoch (6): Train_auc (0.9644602754766072), Valid_auc (0.8040069635604492) ,Training Average_loss (0.32105516254901895), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "7\n",
      "\u001b[32m\n",
      " Epoch (7): Train_auc (0.9724398687655947), Valid_auc (0.7970984927697047) ,Training Average_loss (0.2909412908405065), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "8\n",
      "\u001b[32m\n",
      " Epoch (8): Train_auc (0.9781591163686935), Valid_auc (0.7953094125429443) ,Training Average_loss (0.26294847433269025), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "9\n",
      "\u001b[32m\n",
      " Epoch (9): Train_auc (0.9827799054423182), Valid_auc (0.7857964705631307) ,Training Average_loss (0.23876441429555417), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "10\n",
      "\u001b[32m\n",
      " Epoch (10): Train_auc (0.9856319982945061), Valid_auc (0.7862576730754185) ,Training Average_loss (0.22077889522165065), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "11\n",
      "\u001b[32m\n",
      " Epoch (11): Train_auc (0.9887504321755259), Valid_auc (0.7744441243625034) ,Training Average_loss (0.20115966553986073), Train_time (0m 22s), Eval_time (0m 4s)\u001b[0m\n",
      "12\n",
      "\u001b[32m\n",
      " Epoch (12): Train_auc (0.9907518229713808), Valid_auc (0.7746578324050957) ,Training Average_loss (0.1869544427990913), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "13\n",
      "\u001b[32m\n",
      " Epoch (13): Train_auc (0.9920756515409679), Valid_auc (0.7748107604435626) ,Training Average_loss (0.17136969229206442), Train_time (0m 22s), Eval_time (0m 5s)\u001b[0m\n",
      "14\n",
      "\u001b[32m\n",
      " Epoch (14): Train_auc (0.9933355202448623), Valid_auc (0.7715130984643179) ,Training Average_loss (0.15893637101724742), Train_time (0m 21s), Eval_time (0m 5s)\u001b[0m\n",
      "15\n",
      "\u001b[32m\n",
      " Epoch (15): Train_auc (0.9945554188330487), Valid_auc (0.76691675735655) ,Training Average_loss (0.14740876599587502), Train_time (0m 22s), Eval_time (0m 4s)\u001b[0m\n",
      "16\n",
      "\u001b[32m\n",
      " Epoch (16): Train_auc (0.9951464835495648), Valid_auc (0.7697225245218331) ,Training Average_loss (0.13551803675107657), Train_time (0m 22s), Eval_time (0m 5s)\u001b[0m\n",
      "17\n",
      "\u001b[32m\n",
      " Epoch (17): Train_auc (0.9959305159425325), Valid_auc (0.7660404796961355) ,Training Average_loss (0.1294288121815771), Train_time (0m 22s), Eval_time (0m 4s)\u001b[0m\n",
      "18\n",
      "\u001b[32m\n",
      " Epoch (18): Train_auc (0.9966598627463398), Valid_auc (0.7614119525709693) ,Training Average_loss (0.12089320296235381), Train_time (0m 22s), Eval_time (0m 4s)\u001b[0m\n",
      "19\n",
      "\u001b[32m\n",
      " Epoch (19): Train_auc (0.9971527395003188), Valid_auc (0.762559304070731) ,Training Average_loss (0.11174115028232336), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "20\n",
      "\u001b[32m\n",
      " Epoch (20): Train_auc (0.9975497556615365), Valid_auc (0.7616063490031226) ,Training Average_loss (0.1057051745755598), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "21\n",
      "\u001b[32m\n",
      " Epoch (21): Train_auc (0.9977543865887895), Valid_auc (0.7633166179430824) ,Training Average_loss (0.10115300284139814), Train_time (0m 21s), Eval_time (0m 5s)\u001b[0m\n",
      "22\n",
      "\u001b[32m\n",
      " Epoch (22): Train_auc (0.9982081908925426), Valid_auc (0.7602361849077808) ,Training Average_loss (0.09266224930994214), Train_time (0m 19s), Eval_time (0m 4s)\u001b[0m\n",
      "23\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "I0520 22:41:55.789855 140179506145024 configuration_utils.py:283] loading configuration file pretrained_py_models/orig_45M_chkp_transcli/config.json\n",
      "I0520 22:41:55.791754 140179506145024 configuration_utils.py:321] Model config BertConfig {\n",
      "  \"attention_probs_dropout_prob\": 0.1,\n",
      "  \"hidden_act\": \"gelu\",\n",
      "  \"hidden_dropout_prob\": 0.1,\n",
      "  \"hidden_size\": 192,\n",
      "  \"initializer_range\": 0.02,\n",
      "  \"intermediate_size\": 64,\n",
      "  \"layer_norm_eps\": 1e-12,\n",
      "  \"max_position_embeddings\": 512,\n",
      "  \"model_type\": \"bert\",\n",
      "  \"num_attention_heads\": 6,\n",
      "  \"num_hidden_layers\": 6,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"type_vocab_size\": 1000,\n",
      "  \"vocab_size\": 82603\n",
      "}\n",
      "\n",
      "I0520 22:41:55.792600 140179506145024 modeling_utils.py:615] loading weights file pretrained_py_models/orig_45M_chkp_transcli/pytorch_model.bin\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32m\n",
      " Epoch (23): Train_auc (0.9983602491847694), Valid_auc (0.7588791441720192) ,Training Average_loss (0.0879961883528158), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[32mBestValidAuc 0.835480 has a TestAuc of 0.828714 at epoch 2 \u001b[0m\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "I0520 22:41:56.449581 140179506145024 modeling_utils.py:708] Weights of BertForSequenceClassification not initialized from pretrained model: ['classifier.weight', 'classifier.bias']\n",
      "I0520 22:41:56.450545 140179506145024 modeling_utils.py:714] Weights from pretrained model not used in BertForSequenceClassification: ['cls.predictions.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.transform.dense.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.decoder.weight', 'cls.predictions.decoder.bias', 'cls.seq_relationship.weight', 'cls.seq_relationship.bias']\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "\u001b[32m\n",
      " Epoch (0): Train_auc (0.8301259289621652), Valid_auc (0.818085518781697) ,Training Average_loss (0.6035055741071701), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[33m\n",
      " Test_AUC1 (0.8115126720642497) ,Test_AUC2 (0.8108060972060419) , Test_eval_time (0m 1s) \u001b[0m\n",
      "1\n",
      "\u001b[32m\n",
      " Epoch (1): Train_auc (0.8634336498671435), Valid_auc (0.834423674682941) ,Training Average_loss (0.5069363422989845), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[33m\n",
      " Test_AUC1 (0.8277637324421974) ,Test_AUC2 (0.8258984693943674) , Test_eval_time (0m 1s) \u001b[0m\n",
      "2\n",
      "\u001b[32m\n",
      " Epoch (2): Train_auc (0.8900960220125369), Valid_auc (0.8371665990938124) ,Training Average_loss (0.4710833481550217), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[33m\n",
      " Test_AUC1 (0.8305882662274559) ,Test_AUC2 (0.8276773038326022) , Test_eval_time (0m 1s) \u001b[0m\n",
      "3\n",
      "\u001b[32m\n",
      " Epoch (3): Train_auc (0.9150307129284221), Valid_auc (0.831788653450839) ,Training Average_loss (0.43605574980378153), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "4\n",
      "\u001b[32m\n",
      " Epoch (4): Train_auc (0.9370248973425913), Valid_auc (0.8224341164671489) ,Training Average_loss (0.39861209723353386), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "5\n",
      "\u001b[32m\n",
      " Epoch (5): Train_auc (0.9525415324376751), Valid_auc (0.8120010456010073) ,Training Average_loss (0.35998521035909653), Train_time (0m 17s), Eval_time (0m 4s)\u001b[0m\n",
      "6\n",
      "\u001b[32m\n",
      " Epoch (6): Train_auc (0.9643747772397501), Valid_auc (0.8018389063155722) ,Training Average_loss (0.32490974980592724), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "7\n",
      "\u001b[32m\n",
      " Epoch (7): Train_auc (0.9725490143274579), Valid_auc (0.7950866710766846) ,Training Average_loss (0.2938415859490633), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "8\n",
      "\u001b[32m\n",
      " Epoch (8): Train_auc (0.9780282532225936), Valid_auc (0.7883223082886996) ,Training Average_loss (0.2695764558017254), Train_time (0m 18s), Eval_time (0m 4s)\u001b[0m\n",
      "9\n",
      "\u001b[32m\n",
      " Epoch (9): Train_auc (0.9831313926823282), Valid_auc (0.7833095761403808) ,Training Average_loss (0.24452226147055625), Train_time (0m 18s), Eval_time (0m 4s)\u001b[0m\n",
      "10\n",
      "\u001b[32m\n",
      " Epoch (10): Train_auc (0.9867241403632118), Valid_auc (0.7789490287291324) ,Training Average_loss (0.22123391902446746), Train_time (0m 18s), Eval_time (0m 4s)\u001b[0m\n",
      "11\n",
      "\u001b[32m\n",
      " Epoch (11): Train_auc (0.9892332833981705), Valid_auc (0.7753342011110399) ,Training Average_loss (0.20397202359884978), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "12\n",
      "\u001b[32m\n",
      " Epoch (12): Train_auc (0.9913540172997708), Valid_auc (0.7691747576268414) ,Training Average_loss (0.18929640534147624), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "13\n",
      "\u001b[32m\n",
      " Epoch (13): Train_auc (0.9930104301304489), Valid_auc (0.7665437552013314) ,Training Average_loss (0.17310041538625956), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "14\n",
      "\u001b[32m\n",
      " Epoch (14): Train_auc (0.9939818633937854), Valid_auc (0.7680312826750315) ,Training Average_loss (0.15842328776046632), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "15\n",
      "\u001b[32m\n",
      " Epoch (15): Train_auc (0.9953453108534599), Valid_auc (0.7603149250652612) ,Training Average_loss (0.14810986867733303), Train_time (0m 19s), Eval_time (0m 4s)\u001b[0m\n",
      "16\n",
      "\u001b[32m\n",
      " Epoch (16): Train_auc (0.9960552258397702), Valid_auc (0.7639784407030421) ,Training Average_loss (0.1390450201081112), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "17\n",
      "\u001b[32m\n",
      " Epoch (17): Train_auc (0.996581914660245), Valid_auc (0.756701555598233) ,Training Average_loss (0.13041794713865967), Train_time (0m 17s), Eval_time (0m 4s)\u001b[0m\n",
      "18\n",
      "\u001b[32m\n",
      " Epoch (18): Train_auc (0.9971688510756131), Valid_auc (0.7606572349188059) ,Training Average_loss (0.12216800561919804), Train_time (0m 18s), Eval_time (0m 4s)\u001b[0m\n",
      "19\n",
      "\u001b[32m\n",
      " Epoch (19): Train_auc (0.9975244978190473), Valid_auc (0.7652660592222721) ,Training Average_loss (0.11360853126086294), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "20\n",
      "\u001b[32m\n",
      " Epoch (20): Train_auc (0.9979342101064113), Valid_auc (0.7659794863040494) ,Training Average_loss (0.10710323501192033), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "21\n",
      "\u001b[32m\n",
      " Epoch (21): Train_auc (0.9982714803093283), Valid_auc (0.7599791235445161) ,Training Average_loss (0.09879627855261788), Train_time (0m 19s), Eval_time (0m 4s)\u001b[0m\n",
      "22\n",
      "\u001b[32m\n",
      " Epoch (22): Train_auc (0.99840019849896), Valid_auc (0.7596774996621357) ,Training Average_loss (0.09435057895025237), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "23\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "I0520 22:52:06.101148 140179506145024 configuration_utils.py:283] loading configuration file pretrained_py_models/orig_45M_chkp_transcli/config.json\n",
      "I0520 22:52:06.103366 140179506145024 configuration_utils.py:321] Model config BertConfig {\n",
      "  \"attention_probs_dropout_prob\": 0.1,\n",
      "  \"hidden_act\": \"gelu\",\n",
      "  \"hidden_dropout_prob\": 0.1,\n",
      "  \"hidden_size\": 192,\n",
      "  \"initializer_range\": 0.02,\n",
      "  \"intermediate_size\": 64,\n",
      "  \"layer_norm_eps\": 1e-12,\n",
      "  \"max_position_embeddings\": 512,\n",
      "  \"model_type\": \"bert\",\n",
      "  \"num_attention_heads\": 6,\n",
      "  \"num_hidden_layers\": 6,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"type_vocab_size\": 1000,\n",
      "  \"vocab_size\": 82603\n",
      "}\n",
      "\n",
      "I0520 22:52:06.104133 140179506145024 modeling_utils.py:615] loading weights file pretrained_py_models/orig_45M_chkp_transcli/pytorch_model.bin\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32m\n",
      " Epoch (23): Train_auc (0.9986690437104654), Valid_auc (0.763382021353022) ,Training Average_loss (0.0882242513142992), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[32mBestValidAuc 0.837167 has a TestAuc of 0.830588 at epoch 2 \u001b[0m\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "I0520 22:52:06.758632 140179506145024 modeling_utils.py:708] Weights of BertForSequenceClassification not initialized from pretrained model: ['classifier.weight', 'classifier.bias']\n",
      "I0520 22:52:06.759719 140179506145024 modeling_utils.py:714] Weights from pretrained model not used in BertForSequenceClassification: ['cls.predictions.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.transform.dense.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.decoder.weight', 'cls.predictions.decoder.bias', 'cls.seq_relationship.weight', 'cls.seq_relationship.bias']\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "\u001b[32m\n",
      " Epoch (0): Train_auc (0.8376651413265975), Valid_auc (0.8220339429116077) ,Training Average_loss (0.600064018368721), Train_time (0m 17s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[33m\n",
      " Test_AUC1 (0.8151119259734637) ,Test_AUC2 (0.8135775108606104) , Test_eval_time (0m 1s) \u001b[0m\n",
      "1\n",
      "\u001b[32m\n",
      " Epoch (1): Train_auc (0.8723321461875344), Valid_auc (0.8351496560897368) ,Training Average_loss (0.5025597510933876), Train_time (0m 19s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[33m\n",
      " Test_AUC1 (0.8295651371748665) ,Test_AUC2 (0.8254028597993658) , Test_eval_time (0m 1s) \u001b[0m\n",
      "2\n",
      "\u001b[32m\n",
      " Epoch (2): Train_auc (0.9000102316263685), Valid_auc (0.8342374581226126) ,Training Average_loss (0.4621628828644754), Train_time (0m 19s), Eval_time (0m 4s)\u001b[0m\n",
      "3\n",
      "\u001b[32m\n",
      " Epoch (3): Train_auc (0.9241180102213152), Valid_auc (0.8307160944312855) ,Training Average_loss (0.42433583843708045), Train_time (0m 18s), Eval_time (0m 4s)\u001b[0m\n",
      "4\n",
      "\u001b[32m\n",
      " Epoch (4): Train_auc (0.9435316327907235), Valid_auc (0.8228363527729765) ,Training Average_loss (0.383978241890669), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "5\n",
      "\u001b[32m\n",
      " Epoch (5): Train_auc (0.9586000863922219), Valid_auc (0.8152616136397585) ,Training Average_loss (0.34698220898210996), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "6\n",
      "\u001b[32m\n",
      " Epoch (6): Train_auc (0.967403026942087), Valid_auc (0.8071290428127378) ,Training Average_loss (0.3102362406998873), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "7\n",
      "\u001b[32m\n",
      " Epoch (7): Train_auc (0.9749653009888044), Valid_auc (0.802947919111737) ,Training Average_loss (0.2803702809065581), Train_time (0m 19s), Eval_time (0m 4s)\u001b[0m\n",
      "8\n",
      "\u001b[32m\n",
      " Epoch (8): Train_auc (0.9805685681314532), Valid_auc (0.797900724807773) ,Training Average_loss (0.2548147220760584), Train_time (0m 17s), Eval_time (0m 4s)\u001b[0m\n",
      "9\n",
      "\u001b[32m\n",
      " Epoch (9): Train_auc (0.9851600006636004), Valid_auc (0.7885742483409086) ,Training Average_loss (0.23063851104676725), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "10\n",
      "\u001b[32m\n",
      " Epoch (10): Train_auc (0.9883411111167009), Valid_auc (0.7848152415907361) ,Training Average_loss (0.20948893672972918), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "11\n",
      "\u001b[32m\n",
      " Epoch (11): Train_auc (0.9907024497697439), Valid_auc (0.7842922988285002) ,Training Average_loss (0.1926160546652973), Train_time (0m 17s), Eval_time (0m 4s)\u001b[0m\n",
      "12\n",
      "\u001b[32m\n",
      " Epoch (12): Train_auc (0.992703260523286), Valid_auc (0.7782337167203692) ,Training Average_loss (0.17550936150178312), Train_time (0m 17s), Eval_time (0m 4s)\u001b[0m\n",
      "13\n",
      "\u001b[32m\n",
      " Epoch (13): Train_auc (0.9935873242132568), Valid_auc (0.7777411461778659) ,Training Average_loss (0.16105923190712929), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "14\n",
      "\u001b[32m\n",
      " Epoch (14): Train_auc (0.994813870486373), Valid_auc (0.7796810561281464) ,Training Average_loss (0.15154551466554403), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "15\n",
      "\u001b[32m\n",
      " Epoch (15): Train_auc (0.9954579158676805), Valid_auc (0.7807986755720575) ,Training Average_loss (0.13957550731487572), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "16\n",
      "\u001b[32m\n",
      " Epoch (16): Train_auc (0.9966534238766422), Valid_auc (0.7744244215408034) ,Training Average_loss (0.12713088336214423), Train_time (0m 17s), Eval_time (0m 4s)\u001b[0m\n",
      "17\n",
      "\u001b[32m\n",
      " Epoch (17): Train_auc (0.9970537986828634), Valid_auc (0.773046539914218) ,Training Average_loss (0.11914840761851518), Train_time (0m 18s), Eval_time (0m 4s)\u001b[0m\n",
      "18\n",
      "\u001b[32m\n",
      " Epoch (18): Train_auc (0.9976115353681928), Valid_auc (0.7696506839084138) ,Training Average_loss (0.11133709309995174), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "19\n",
      "\u001b[32m\n",
      " Epoch (19): Train_auc (0.9978946024171415), Valid_auc (0.772074984529373) ,Training Average_loss (0.1059601471154019), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "20\n",
      "\u001b[32m\n",
      " Epoch (20): Train_auc (0.9983404789425856), Valid_auc (0.7740875886449152) ,Training Average_loss (0.09500090651772916), Train_time (0m 20s), Eval_time (0m 4s)\u001b[0m\n",
      "21\n",
      "\u001b[32m\n",
      " Epoch (21): Train_auc (0.9984386965072817), Valid_auc (0.7712303949811151) ,Training Average_loss (0.09322904351726174), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "22\n",
      "\u001b[32m\n",
      " Epoch (22): Train_auc (0.9986648738062824), Valid_auc (0.772684136027712) ,Training Average_loss (0.08553675830061551), Train_time (0m 21s), Eval_time (0m 4s)\u001b[0m\n",
      "\u001b[32mBestValidAuc 0.835150 has a TestAuc of 0.829565 at epoch 1 \u001b[0m\n"
     ]
    }
   ],
   "source": [
    "MAX_SEQ_LENGTH = 64\n",
    "BATCH_SIZE = 100\n",
    "EARNING_RATE = 1e-5\n",
    "bert_config_file= \"config.json\"\n",
    "\n",
    "results=[]\n",
    "\n",
    "#### Data Preparation\n",
    "train_features = convert_EHRexamples_to_features(train_f, MAX_SEQ_LENGTH) \n",
    "test_features = convert_EHRexamples_to_features(test_f, MAX_SEQ_LENGTH)\n",
    "test_features2 = convert_EHRexamples_to_features(test_f2, MAX_SEQ_LENGTH)\n",
    "valid_features = convert_EHRexamples_to_features(valid_f, MAX_SEQ_LENGTH)\n",
    "train = BERTdataEHR(train_features)\n",
    "test = BERTdataEHR(test_features)\n",
    "test2 = BERTdataEHR(test_features2)\n",
    "valid = BERTdataEHR(valid_features)\n",
    "print (' creating the list of training minibatches')\n",
    "train_mbs = list(BERTdataEHRloader(train, batch_size = BATCH_SIZE))\n",
    "print (' creating the list of test minibatches')\n",
    "test_mbs = list(BERTdataEHRloader(test, batch_size = BATCH_SIZE))\n",
    "print (' creating the list of test2 minibatches')\n",
    "test_mbs2 = list(BERTdataEHRloader(test2, batch_size = BATCH_SIZE))\n",
    "print (' creating the list of valid minibatches')\n",
    "valid_mbs = list(BERTdataEHRloader(valid, batch_size = BATCH_SIZE))\n",
    "\n",
    "for run in range(10):### to average the results on 10 runs\n",
    "    for model_type in ['Bert only']:              \n",
    "            ehr_model = EHR_BERT_LR(input_size= 90000, embed_dim=192, hidden_size=192) \n",
    "            if use_cuda:\n",
    "                ehr_model.cuda()\n",
    "            optimizer = optim.Adam(ehr_model.parameters(), lr=LEARNING_RATE)\n",
    "            out_dir_name='test_LR_Bert_BiGRU_FC'#+ str(i)\n",
    "            trauc,vauc,testauc1,testauc2,bep=epochs_run(500,train = train_mbs, \n",
    "                                  valid = valid_mbs, \n",
    "                                  test1 = test_mbs,test2=test_mbs2, \n",
    "                                  model = ehr_model, \n",
    "                                  optimizer = optimizer,\n",
    "                                  shuffle = True, \n",
    "                                  #batch_size = args.batch_size, \n",
    "                                  patience = 20,\n",
    "                                  output_dir = out_dir_name,\n",
    "                                  model_prefix = 'first_run')\n",
    "            results.append([model_type,run,len(train_features),len(test_features),len(valid_features),trauc,vauc,testauc1,testauc2,bep])\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "df=pd.DataFrame(results)\n",
    "df.columns=['Model','Run','Train_size','Test_size','Valid_size','Train_AUC','Valid_AUC','Test_AUC1','Test_AUC2','Best_Epoch']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>Model</th>\n",
       "      <th>Run</th>\n",
       "      <th>Train_size</th>\n",
       "      <th>Test_size</th>\n",
       "      <th>Valid_size</th>\n",
       "      <th>Train_AUC</th>\n",
       "      <th>Valid_AUC</th>\n",
       "      <th>Test_AUC1</th>\n",
       "      <th>Test_AUC2</th>\n",
       "      <th>Best_Epoch</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>Bert only</td>\n",
       "      <td>0</td>\n",
       "      <td>49999</td>\n",
       "      <td>15000</td>\n",
       "      <td>7500</td>\n",
       "      <td>0.894582</td>\n",
       "      <td>0.834894</td>\n",
       "      <td>0.830015</td>\n",
       "      <td>0.827995</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>Bert only</td>\n",
       "      <td>1</td>\n",
       "      <td>49999</td>\n",
       "      <td>15000</td>\n",
       "      <td>7500</td>\n",
       "      <td>0.869437</td>\n",
       "      <td>0.832315</td>\n",
       "      <td>0.828231</td>\n",
       "      <td>0.827004</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>Bert only</td>\n",
       "      <td>2</td>\n",
       "      <td>49999</td>\n",
       "      <td>15000</td>\n",
       "      <td>7500</td>\n",
       "      <td>0.891686</td>\n",
       "      <td>0.833559</td>\n",
       "      <td>0.829083</td>\n",
       "      <td>0.826013</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>Bert only</td>\n",
       "      <td>3</td>\n",
       "      <td>49999</td>\n",
       "      <td>15000</td>\n",
       "      <td>7500</td>\n",
       "      <td>0.895434</td>\n",
       "      <td>0.832507</td>\n",
       "      <td>0.829849</td>\n",
       "      <td>0.826455</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>Bert only</td>\n",
       "      <td>4</td>\n",
       "      <td>49999</td>\n",
       "      <td>15000</td>\n",
       "      <td>7500</td>\n",
       "      <td>0.894967</td>\n",
       "      <td>0.832715</td>\n",
       "      <td>0.828891</td>\n",
       "      <td>0.825106</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5</th>\n",
       "      <td>Bert only</td>\n",
       "      <td>5</td>\n",
       "      <td>49999</td>\n",
       "      <td>15000</td>\n",
       "      <td>7500</td>\n",
       "      <td>0.897067</td>\n",
       "      <td>0.833284</td>\n",
       "      <td>0.827417</td>\n",
       "      <td>0.824605</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>6</th>\n",
       "      <td>Bert only</td>\n",
       "      <td>6</td>\n",
       "      <td>49999</td>\n",
       "      <td>15000</td>\n",
       "      <td>7500</td>\n",
       "      <td>0.891813</td>\n",
       "      <td>0.833898</td>\n",
       "      <td>0.830488</td>\n",
       "      <td>0.829852</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>7</th>\n",
       "      <td>Bert only</td>\n",
       "      <td>7</td>\n",
       "      <td>49999</td>\n",
       "      <td>15000</td>\n",
       "      <td>7500</td>\n",
       "      <td>0.891647</td>\n",
       "      <td>0.835480</td>\n",
       "      <td>0.828714</td>\n",
       "      <td>0.825346</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>8</th>\n",
       "      <td>Bert only</td>\n",
       "      <td>8</td>\n",
       "      <td>49999</td>\n",
       "      <td>15000</td>\n",
       "      <td>7500</td>\n",
       "      <td>0.890096</td>\n",
       "      <td>0.837167</td>\n",
       "      <td>0.830588</td>\n",
       "      <td>0.827677</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>9</th>\n",
       "      <td>Bert only</td>\n",
       "      <td>9</td>\n",
       "      <td>49999</td>\n",
       "      <td>15000</td>\n",
       "      <td>7500</td>\n",
       "      <td>0.872332</td>\n",
       "      <td>0.835150</td>\n",
       "      <td>0.829565</td>\n",
       "      <td>0.825403</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "       Model  Run  Train_size  Test_size  Valid_size  Train_AUC  Valid_AUC  \\\n",
       "0  Bert only    0       49999      15000        7500   0.894582   0.834894   \n",
       "1  Bert only    1       49999      15000        7500   0.869437   0.832315   \n",
       "2  Bert only    2       49999      15000        7500   0.891686   0.833559   \n",
       "3  Bert only    3       49999      15000        7500   0.895434   0.832507   \n",
       "4  Bert only    4       49999      15000        7500   0.894967   0.832715   \n",
       "5  Bert only    5       49999      15000        7500   0.897067   0.833284   \n",
       "6  Bert only    6       49999      15000        7500   0.891813   0.833898   \n",
       "7  Bert only    7       49999      15000        7500   0.891647   0.835480   \n",
       "8  Bert only    8       49999      15000        7500   0.890096   0.837167   \n",
       "9  Bert only    9       49999      15000        7500   0.872332   0.835150   \n",
       "\n",
       "   Test_AUC1  Test_AUC2  Best_Epoch  \n",
       "0   0.830015   0.827995           2  \n",
       "1   0.828231   0.827004           1  \n",
       "2   0.829083   0.826013           2  \n",
       "3   0.829849   0.826455           2  \n",
       "4   0.828891   0.825106           2  \n",
       "5   0.827417   0.824605           2  \n",
       "6   0.830488   0.829852           2  \n",
       "7   0.828714   0.825346           2  \n",
       "8   0.830588   0.827677           2  \n",
       "9   0.829565   0.825403           1  "
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#df.to_csv('DHF_RNN_multirun_shuffled_1.csv')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "desc2=df[['Model','Train_size','Test_AUC1']].groupby(['Model','Train_size']).describe()\n",
    "desc3=df[['Model','Train_size','Test_AUC2']].groupby(['Model','Train_size']).describe()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead tr th {\n",
       "        text-align: left;\n",
       "    }\n",
       "\n",
       "    .dataframe thead tr:last-of-type th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th colspan=\"8\" halign=\"left\">Test_AUC2</th>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th>count</th>\n",
       "      <th>mean</th>\n",
       "      <th>std</th>\n",
       "      <th>min</th>\n",
       "      <th>25%</th>\n",
       "      <th>50%</th>\n",
       "      <th>75%</th>\n",
       "      <th>max</th>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Model</th>\n",
       "      <th>Train_size</th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>Bert only</th>\n",
       "      <th>49999</th>\n",
       "      <td>10.0</td>\n",
       "      <td>0.826546</td>\n",
       "      <td>0.001613</td>\n",
       "      <td>0.824605</td>\n",
       "      <td>0.82536</td>\n",
       "      <td>0.826234</td>\n",
       "      <td>0.827509</td>\n",
       "      <td>0.829852</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                     Test_AUC2                                         \\\n",
       "                         count      mean       std       min      25%   \n",
       "Model     Train_size                                                    \n",
       "Bert only 49999           10.0  0.826546  0.001613  0.824605  0.82536   \n",
       "\n",
       "                                                    \n",
       "                           50%       75%       max  \n",
       "Model     Train_size                                \n",
       "Bert only 49999       0.826234  0.827509  0.829852  "
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "desc3"
   ]
  }
 ],
 "metadata": {
  "accelerator": "GPU",
  "colab": {
   "collapsed_sections": [],
   "name": "Predicting Movie Reviews with BERT on TF Hub.ipynb",
   "provenance": [],
   "version": "0.3.2"
  },
  "kernelspec": {
   "display_name": "py_37_env",
   "language": "python",
   "name": "py_37_env"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
