{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-02-07T16:49:01.258615Z",
     "start_time": "2020-02-07T16:49:00.044862Z"
    }
   },
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "import matplotlib.pyplot as plt\n",
    "import time\n",
    "import os\n",
    "import copy\n",
    "import pickle\n",
    "import shutil\n",
    "import torch\n",
    "import torch.optim as optim\n",
    "import torchvision\n",
    "from distutils.dir_util import copy_tree\n",
    "from sklearn.model_selection import StratifiedKFold, train_test_split\n",
    "from sklearn import metrics\n",
    "from torchvision import datasets, models, transforms"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-02-07T16:49:02.136623Z",
     "start_time": "2020-02-07T16:49:02.104708Z"
    }
   },
   "outputs": [],
   "source": [
    "train_dir = 'C:/Users/joey3/Desktop/Apnea_Train'\n",
    "test_dir = 'C:/Users/joey3/Desktop/Apnea_Test'\n",
    "raw_dir = 'C:/Users/joey3/Desktop/Apnea_Raw'\n",
    "train_df = pd.read_csv('../resources/File_train.csv')\n",
    "test_df = pd.read_csv('../resources/File_test.csv')\n",
    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-02-07T16:47:00.749413Z",
     "start_time": "2020-02-07T16:46:52.315099Z"
    }
   },
   "outputs": [],
   "source": [
    "shutil.rmtree(test_dir)\n",
    "os.mkdir(test_dir)\n",
    "os.mkdir(f'{test_dir}/0')\n",
    "os.mkdir(f'{test_dir}/1')\n",
    "\n",
    "# Prepare testing image folders\n",
    "for file in test_df['file']:\n",
    "    copy_tree(f'{raw_dir}/{file}/0/', f'{test_dir}/0/', verbose=0)\n",
    "    copy_tree(f'{raw_dir}/{file}/1/', f'{test_dir}/1/', verbose=0)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-02-07T16:49:26.918216Z",
     "start_time": "2020-02-07T16:49:26.904251Z"
    }
   },
   "outputs": [],
   "source": [
    "def train_model(model, criterion, optimizer, scheduler, num_epochs=5, batch_size=8):\n",
    "    # Preparation work\n",
    "    data_transforms = {\n",
    "        'train': transforms.Compose([\n",
    "            transforms.Resize((224, 224)),\n",
    "            transforms.ToTensor(),\n",
    "        ]),\n",
    "        'val': transforms.Compose([\n",
    "            transforms.Resize((224, 224)),\n",
    "            transforms.ToTensor(),\n",
    "        ]),\n",
    "    }\n",
    "\n",
    "    image_datasets = {\n",
    "        x: datasets.ImageFolder(\n",
    "            os.path.join(train_dir, x), \n",
    "            transform=data_transforms[x],\n",
    "        ) \n",
    "        for x in ['train', 'val']\n",
    "    }\n",
    "    dataloaders = {\n",
    "        x: torch.utils.data.DataLoader(\n",
    "            image_datasets[x], \n",
    "            batch_size=batch_size,\n",
    "            shuffle=True, \n",
    "            num_workers=8,\n",
    "        )\n",
    "        for x in ['train', 'val']\n",
    "    }\n",
    "    dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}\n",
    "    class_names = image_datasets['train'].classes\n",
    "    best_model_wts = copy.deepcopy(model.state_dict())\n",
    "    best_acc = 0.0\n",
    "    since = time.time()\n",
    "\n",
    "    for epoch in range(num_epochs):\n",
    "        print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n",
    "        print('-' * 10)\n",
    "\n",
    "        # Each epoch has a training and validation phase\n",
    "        for phase in ['train', 'val']:\n",
    "            if phase == 'train':\n",
    "                model.train()  # Set model to training mode\n",
    "            else:\n",
    "                model.eval()   # Set model to evaluate mode\n",
    "\n",
    "            running_loss = 0.0\n",
    "            running_corrects = 0\n",
    "\n",
    "            # Iterate over data.\n",
    "            for inputs, labels in dataloaders[phase]:\n",
    "                inputs = inputs.to(device)\n",
    "                labels = labels.to(device)\n",
    "\n",
    "                # zero the parameter gradients\n",
    "                optimizer.zero_grad()\n",
    "\n",
    "                # forward\n",
    "                # track history if only in train\n",
    "                with torch.set_grad_enabled(phase == 'train'):\n",
    "                    outputs = model(inputs)\n",
    "                    _, preds = torch.max(outputs, 1)\n",
    "                    loss = criterion(outputs, labels)\n",
    "\n",
    "                    # backward + optimize only if in training phase\n",
    "                    if phase == 'train':\n",
    "                        loss.backward()\n",
    "                        optimizer.step()\n",
    "\n",
    "                # statistics\n",
    "                running_loss += loss.item() * inputs.size(0)\n",
    "                running_corrects += torch.sum(preds == labels.data)\n",
    "                \n",
    "            if phase == 'train':\n",
    "                scheduler.step()\n",
    "\n",
    "            epoch_loss = running_loss / dataset_sizes[phase]\n",
    "            epoch_acc = running_corrects.double() / dataset_sizes[phase]\n",
    "\n",
    "            print('{} Loss: {:.4f} Acc: {:.4f}'.format(\n",
    "                phase, epoch_loss, epoch_acc))\n",
    "\n",
    "            # deep copy the model\n",
    "            if phase == 'val' and epoch_acc > best_acc:\n",
    "                best_acc = epoch_acc\n",
    "                best_model_wts = copy.deepcopy(model.state_dict())\n",
    "\n",
    "        print()\n",
    "\n",
    "    time_elapsed = time.time() - since\n",
    "    print('Training complete in {:.0f}m {:.0f}s'.format(\n",
    "        time_elapsed // 60, time_elapsed % 60))\n",
    "    print('Best val Acc: {:4f}'.format(best_acc))\n",
    "\n",
    "    # load best model weights\n",
    "    model.load_state_dict(best_model_wts)\n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-02-07T06:31:28.294134Z",
     "start_time": "2020-02-07T06:31:28.286130Z"
    }
   },
   "outputs": [],
   "source": [
    "def val_performance(model, batch_size=8):\n",
    "    model.eval()   # Set model to evaluate mode\n",
    "\n",
    "    running_count, running_corrects = 0, 0\n",
    "    y_true, y_pred = [], []\n",
    "    # Iterate over testing patients\n",
    "    data_transforms = transforms.Compose([\n",
    "        transforms.Resize((224, 224)),\n",
    "        transforms.ToTensor(),\n",
    "    ])\n",
    "\n",
    "    image_datasets = datasets.ImageFolder(\n",
    "        f'{train_dir}/val', \n",
    "        transform=data_transforms,\n",
    "    ) \n",
    "    dataloaders = torch.utils.data.DataLoader(\n",
    "        image_datasets, \n",
    "        batch_size=batch_size,\n",
    "        shuffle=True, \n",
    "        num_workers=8,\n",
    "    )\n",
    "\n",
    "    # Iterate over data.\n",
    "    for inputs, labels in dataloaders:\n",
    "        inputs = inputs.to(device)\n",
    "        labels = labels.to(device)\n",
    "\n",
    "        with torch.set_grad_enabled(False):\n",
    "            outputs = model(inputs)\n",
    "            _, preds = torch.max(outputs, 1)\n",
    "\n",
    "        # statistics\n",
    "        running_count += len(preds)\n",
    "        running_corrects += torch.sum(preds == labels.data)\n",
    "        \n",
    "        # Record result\n",
    "        y_true += labels.data.tolist()\n",
    "        y_pred += preds.tolist()\n",
    "\n",
    "    epoch_acc = running_corrects.double() / running_count\n",
    "    print('Validation Acc: {:.4f}'.format(epoch_acc))\n",
    "    return y_true, y_pred"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-02-07T17:51:11.237974Z",
     "start_time": "2020-02-07T17:51:11.230995Z"
    }
   },
   "outputs": [],
   "source": [
    "def test_performance(model, batch_size=8):\n",
    "    model.eval()   # Set model to evaluate mode\n",
    "\n",
    "    y_true, y_pred = [], []\n",
    "    # Iterate over testing patients\n",
    "    data_transforms = transforms.Compose([\n",
    "        transforms.Resize((224, 224)),\n",
    "        transforms.ToTensor(),\n",
    "    ])\n",
    "\n",
    "    image_datasets = datasets.ImageFolder(\n",
    "        test_dir, \n",
    "        transform=data_transforms,\n",
    "    ) \n",
    "    dataloaders = torch.utils.data.DataLoader(\n",
    "        image_datasets, \n",
    "        batch_size=batch_size,\n",
    "        shuffle=True, \n",
    "        num_workers=8,\n",
    "    )\n",
    "\n",
    "    # Iterate over data.\n",
    "    for inputs, labels in dataloaders:\n",
    "        inputs = inputs.to(device)\n",
    "        labels = labels.to(device)\n",
    "\n",
    "        with torch.set_grad_enabled(False):\n",
    "            outputs = model(inputs)\n",
    "            _, preds = torch.max(outputs, 1)\n",
    "\n",
    "        # Record result\n",
    "        y_true += labels.data.tolist()\n",
    "        y_pred += preds.tolist()\n",
    "\n",
    "    return y_true, y_pred"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-02-07T06:31:28.304106Z",
     "start_time": "2020-02-07T06:31:28.295106Z"
    }
   },
   "outputs": [],
   "source": [
    "def train_model_cv(file_df, model, criterion, optimizer, scheduler, num_epochs=5, batch_size=8, n=4):\n",
    "    skf = StratifiedKFold(n_splits=n)\n",
    "    y_true, y_pred = [], []\n",
    "    for idx_train, idx_val in skf.split(file_df, file_df['group']):\n",
    "        print('*' * 20)\n",
    "        print('Prepare folders')\n",
    "        shutil.rmtree(train_dir)\n",
    "        os.mkdir(train_dir)\n",
    "        os.mkdir(f'{train_dir}/train')\n",
    "        os.mkdir(f'{train_dir}/train/0')\n",
    "        os.mkdir(f'{train_dir}/train/1')\n",
    "        os.mkdir(f'{train_dir}/val')\n",
    "        os.mkdir(f'{train_dir}/val/0')\n",
    "        os.mkdir(f'{train_dir}/val/1')\n",
    "        file_train, file_val = file_df.loc[idx_train, 'file'], file_df.loc[idx_val, 'file']\n",
    "        \n",
    "        # Prepare training image folders\n",
    "        for file in list(file_train):\n",
    "            copy_tree(f'{raw_dir}/{file}/0/', f'{train_dir}/train/0/', verbose=0)\n",
    "            copy_tree(f'{raw_dir}/{file}/1/', f'{train_dir}/train/1/', verbose=0)\n",
    "        \n",
    "        # Prepare validation image folders\n",
    "        for file in list(file_val):\n",
    "            copy_tree(f'{raw_dir}/{file}/0/', f'{train_dir}/val/0/', verbose=0)\n",
    "            copy_tree(f'{raw_dir}/{file}/1/', f'{train_dir}/val/1/', verbose=0)\n",
    "        \n",
    "        # Model training and reporting results on the validation set\n",
    "        print('Training starts')\n",
    "        model_trained = train_model(model, criterion, optimizer, scheduler, num_epochs=num_epochs, batch_size=batch_size)\n",
    "        y_true_, y_pred_ = val_performance(model_trained, batch_size=batch_size)\n",
    "        y_true += y_true_\n",
    "        y_pred += y_pred_\n",
    "        \n",
    "    return y_true, y_pred "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-02-07T16:50:32.267090Z",
     "start_time": "2020-02-07T16:50:32.260108Z"
    }
   },
   "outputs": [],
   "source": [
    "def train_model_single_split(file_df, model, criterion, optimizer, scheduler, num_epochs=5, batch_size=8):\n",
    "    y_true, y_pred = [], []\n",
    "    file_train, file_val = train_test_split(file_df['file'], test_size=0.25, random_state=1, stratify=file_df['group'])\n",
    "    \n",
    "    print('*' * 20)\n",
    "    print('Prepare folders')\n",
    "    shutil.rmtree(train_dir)\n",
    "    os.mkdir(train_dir)\n",
    "    os.mkdir(f'{train_dir}/train')\n",
    "    os.mkdir(f'{train_dir}/train/0')\n",
    "    os.mkdir(f'{train_dir}/train/1')\n",
    "    os.mkdir(f'{train_dir}/val')\n",
    "    os.mkdir(f'{train_dir}/val/0')\n",
    "    os.mkdir(f'{train_dir}/val/1')\n",
    "    \n",
    "    # Prepare training image folders\n",
    "    for file in list(file_train):\n",
    "        copy_tree(f'{raw_dir}/{file}/0/', f'{train_dir}/train/0/', verbose=0)\n",
    "        copy_tree(f'{raw_dir}/{file}/1/', f'{train_dir}/train/1/', verbose=0)\n",
    "\n",
    "    # Prepare validation image folders\n",
    "    for file in list(file_val):\n",
    "        copy_tree(f'{raw_dir}/{file}/0/', f'{train_dir}/val/0/', verbose=0)\n",
    "        copy_tree(f'{raw_dir}/{file}/1/', f'{train_dir}/val/1/', verbose=0)\n",
    "\n",
    "    # Model training and reporting results on the validation set\n",
    "    print('Training starts')\n",
    "    model_trained = train_model(model, criterion, optimizer, scheduler, num_epochs=num_epochs, batch_size=batch_size)\n",
    "        \n",
    "    return model_trained"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Finetuning "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-02-07T16:50:46.708161Z",
     "start_time": "2020-02-07T16:50:45.442419Z"
    }
   },
   "outputs": [],
   "source": [
    "# SqueezeNet\n",
    "model_ft = models.squeezenet1_0(pretrained=True)\n",
    "model_ft.classifier[1] = torch.nn.Conv2d(512, 2, kernel_size=(1,1), stride=(1,1))\n",
    "model_ft.num_classes = 2\n",
    "model_ft = model_ft.to(device)\n",
    "criterion = torch.nn.CrossEntropyLoss()\n",
    "optimizer_ft = torch.optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)\n",
    "exp_lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-02-07T17:41:34.432544Z",
     "start_time": "2020-02-07T16:50:49.300208Z"
    },
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "********************\n",
      "Prepare folders\n",
      "Training starts\n",
      "Epoch 0/19\n",
      "----------\n",
      "train Loss: 0.4856 Acc: 0.7785\n",
      "val Loss: 0.5294 Acc: 0.7300\n",
      "\n",
      "Epoch 1/19\n",
      "----------\n",
      "train Loss: 0.4272 Acc: 0.8162\n",
      "val Loss: 0.2702 Acc: 0.8962\n",
      "\n",
      "Epoch 2/19\n",
      "----------\n",
      "train Loss: 0.4133 Acc: 0.8250\n",
      "val Loss: 0.2538 Acc: 0.9037\n",
      "\n",
      "Epoch 3/19\n",
      "----------\n",
      "train Loss: 0.4018 Acc: 0.8332\n",
      "val Loss: 0.2663 Acc: 0.8966\n",
      "\n",
      "Epoch 4/19\n",
      "----------\n",
      "train Loss: 0.3923 Acc: 0.8341\n",
      "val Loss: 0.2695 Acc: 0.9040\n",
      "\n",
      "Epoch 5/19\n",
      "----------\n",
      "train Loss: 0.3852 Acc: 0.8384\n",
      "val Loss: 0.2708 Acc: 0.8954\n",
      "\n",
      "Epoch 6/19\n",
      "----------\n",
      "train Loss: 0.3830 Acc: 0.8401\n",
      "val Loss: 0.2833 Acc: 0.8976\n",
      "\n",
      "Epoch 7/19\n",
      "----------\n",
      "train Loss: 0.3538 Acc: 0.8547\n",
      "val Loss: 0.2520 Acc: 0.9021\n",
      "\n",
      "Epoch 8/19\n",
      "----------\n",
      "train Loss: 0.3493 Acc: 0.8545\n",
      "val Loss: 0.2574 Acc: 0.9011\n",
      "\n",
      "Epoch 9/19\n",
      "----------\n",
      "train Loss: 0.3476 Acc: 0.8573\n",
      "val Loss: 0.2587 Acc: 0.9022\n",
      "\n",
      "Epoch 10/19\n",
      "----------\n",
      "train Loss: 0.3452 Acc: 0.8559\n",
      "val Loss: 0.2715 Acc: 0.8953\n",
      "\n",
      "Epoch 11/19\n",
      "----------\n",
      "train Loss: 0.3437 Acc: 0.8580\n",
      "val Loss: 0.2587 Acc: 0.8987\n",
      "\n",
      "Epoch 12/19\n",
      "----------\n",
      "train Loss: 0.3421 Acc: 0.8576\n",
      "val Loss: 0.2642 Acc: 0.9015\n",
      "\n",
      "Epoch 13/19\n",
      "----------\n",
      "train Loss: 0.3406 Acc: 0.8594\n",
      "val Loss: 0.2603 Acc: 0.9013\n",
      "\n",
      "Epoch 14/19\n",
      "----------\n",
      "train Loss: 0.3361 Acc: 0.8605\n",
      "val Loss: 0.2589 Acc: 0.9009\n",
      "\n",
      "Epoch 15/19\n",
      "----------\n",
      "train Loss: 0.3352 Acc: 0.8595\n",
      "val Loss: 0.2570 Acc: 0.9019\n",
      "\n",
      "Epoch 16/19\n",
      "----------\n",
      "train Loss: 0.3343 Acc: 0.8618\n",
      "val Loss: 0.2574 Acc: 0.9027\n",
      "\n",
      "Epoch 17/19\n",
      "----------\n",
      "train Loss: 0.3340 Acc: 0.8613\n",
      "val Loss: 0.2555 Acc: 0.9028\n",
      "\n",
      "Epoch 18/19\n",
      "----------\n",
      "train Loss: 0.3341 Acc: 0.8610\n",
      "val Loss: 0.2603 Acc: 0.9009\n",
      "\n",
      "Epoch 19/19\n",
      "----------\n",
      "train Loss: 0.3331 Acc: 0.8618\n",
      "val Loss: 0.2581 Acc: 0.9016\n",
      "\n",
      "Training complete in 50m 5s\n",
      "Best val Acc: 0.904020\n"
     ]
    }
   ],
   "source": [
    "# Train a model and test accuracy for testing set\n",
    "model_trained = train_model_single_split(\n",
    "    train_df, model_ft, \n",
    "    criterion, optimizer_ft, exp_lr_scheduler, num_epochs=20, \n",
    "    batch_size=8)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-02-07T17:51:39.257794Z",
     "start_time": "2020-02-07T17:51:16.648772Z"
    }
   },
   "outputs": [],
   "source": [
    "y_true, y_pred = test_performance(model_trained, batch_size=8)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-02-07T17:52:13.684809Z",
     "start_time": "2020-02-07T17:52:13.669820Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0       0.83      0.92      0.87      4171\n",
      "           1       0.85      0.70      0.77      2699\n",
      "\n",
      "    accuracy                           0.84      6870\n",
      "   macro avg       0.84      0.81      0.82      6870\n",
      "weighted avg       0.84      0.84      0.83      6870\n",
      "\n"
     ]
    }
   ],
   "source": [
    "print(metrics.classification_report(y_true, y_pred))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Cross-validation"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-02-07T09:53:27.189155Z",
     "start_time": "2020-02-07T06:32:10.819641Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "********************\n",
      "Prepare folders\n",
      "Training starts\n",
      "Epoch 0/19\n",
      "----------\n",
      "train Loss: 0.4037 Acc: 0.8212\n",
      "val Loss: 0.4924 Acc: 0.7634\n",
      "\n",
      "Epoch 1/19\n",
      "----------\n",
      "train Loss: 0.3607 Acc: 0.8456\n",
      "val Loss: 0.4684 Acc: 0.8114\n",
      "\n",
      "Epoch 2/19\n",
      "----------\n",
      "train Loss: 0.3438 Acc: 0.8558\n",
      "val Loss: 0.4849 Acc: 0.8072\n",
      "\n",
      "Epoch 3/19\n",
      "----------\n",
      "train Loss: 0.3376 Acc: 0.8578\n",
      "val Loss: 0.4738 Acc: 0.8038\n",
      "\n",
      "Epoch 4/19\n",
      "----------\n",
      "train Loss: 0.3287 Acc: 0.8599\n",
      "val Loss: 0.4443 Acc: 0.8257\n",
      "\n",
      "Epoch 5/19\n",
      "----------\n",
      "train Loss: 0.3235 Acc: 0.8608\n",
      "val Loss: 0.4531 Acc: 0.8313\n",
      "\n",
      "Epoch 6/19\n",
      "----------\n",
      "train Loss: 0.3161 Acc: 0.8688\n",
      "val Loss: 0.4589 Acc: 0.8263\n",
      "\n",
      "Epoch 7/19\n",
      "----------\n",
      "train Loss: 0.2905 Acc: 0.8813\n",
      "val Loss: 0.4622 Acc: 0.8247\n",
      "\n",
      "Epoch 8/19\n",
      "----------\n",
      "train Loss: 0.2871 Acc: 0.8815\n",
      "val Loss: 0.4819 Acc: 0.8244\n",
      "\n",
      "Epoch 9/19\n",
      "----------\n",
      "train Loss: 0.2858 Acc: 0.8834\n",
      "val Loss: 0.4666 Acc: 0.8272\n",
      "\n",
      "Epoch 10/19\n",
      "----------\n",
      "train Loss: 0.2827 Acc: 0.8839\n",
      "val Loss: 0.4812 Acc: 0.8290\n",
      "\n",
      "Epoch 11/19\n",
      "----------\n",
      "train Loss: 0.2814 Acc: 0.8841\n",
      "val Loss: 0.4677 Acc: 0.8248\n",
      "\n",
      "Epoch 12/19\n",
      "----------\n",
      "train Loss: 0.2796 Acc: 0.8846\n",
      "val Loss: 0.4760 Acc: 0.8311\n",
      "\n",
      "Epoch 13/19\n",
      "----------\n",
      "train Loss: 0.2787 Acc: 0.8859\n",
      "val Loss: 0.4772 Acc: 0.8292\n",
      "\n",
      "Epoch 14/19\n",
      "----------\n",
      "train Loss: 0.2732 Acc: 0.8885\n",
      "val Loss: 0.4702 Acc: 0.8283\n",
      "\n",
      "Epoch 15/19\n",
      "----------\n",
      "train Loss: 0.2712 Acc: 0.8877\n",
      "val Loss: 0.4704 Acc: 0.8280\n",
      "\n",
      "Epoch 16/19\n",
      "----------\n",
      "train Loss: 0.2716 Acc: 0.8889\n",
      "val Loss: 0.4711 Acc: 0.8286\n",
      "\n",
      "Epoch 17/19\n",
      "----------\n",
      "train Loss: 0.2714 Acc: 0.8889\n",
      "val Loss: 0.4741 Acc: 0.8287\n",
      "\n",
      "Epoch 18/19\n",
      "----------\n",
      "train Loss: 0.2710 Acc: 0.8895\n",
      "val Loss: 0.4700 Acc: 0.8278\n",
      "\n",
      "Epoch 19/19\n",
      "----------\n",
      "train Loss: 0.2705 Acc: 0.8896\n",
      "val Loss: 0.4757 Acc: 0.8287\n",
      "\n",
      "Training complete in 49m 33s\n",
      "Best val Acc: 0.831257\n",
      "Testing Acc: 0.8313\n",
      "********************\n",
      "Prepare folders\n",
      "Training starts\n",
      "Epoch 0/19\n",
      "----------\n",
      "train Loss: 0.3598 Acc: 0.8542\n",
      "val Loss: 0.2803 Acc: 0.8929\n",
      "\n",
      "Epoch 1/19\n",
      "----------\n",
      "train Loss: 0.3558 Acc: 0.8560\n",
      "val Loss: 0.2804 Acc: 0.8935\n",
      "\n",
      "Epoch 2/19\n",
      "----------\n",
      "train Loss: 0.3562 Acc: 0.8559\n",
      "val Loss: 0.2805 Acc: 0.8937\n",
      "\n",
      "Epoch 3/19\n",
      "----------\n",
      "train Loss: 0.3548 Acc: 0.8552\n",
      "val Loss: 0.2805 Acc: 0.8932\n",
      "\n",
      "Epoch 4/19\n",
      "----------\n",
      "train Loss: 0.3540 Acc: 0.8570\n",
      "val Loss: 0.2804 Acc: 0.8932\n",
      "\n",
      "Epoch 5/19\n",
      "----------\n",
      "train Loss: 0.3541 Acc: 0.8568\n",
      "val Loss: 0.2803 Acc: 0.8932\n",
      "\n",
      "Epoch 6/19\n",
      "----------\n",
      "train Loss: 0.3538 Acc: 0.8563\n",
      "val Loss: 0.2802 Acc: 0.8929\n",
      "\n",
      "Epoch 7/19\n",
      "----------\n",
      "train Loss: 0.3531 Acc: 0.8570\n",
      "val Loss: 0.2800 Acc: 0.8931\n",
      "\n",
      "Epoch 8/19\n",
      "----------\n",
      "train Loss: 0.3534 Acc: 0.8561\n",
      "val Loss: 0.2800 Acc: 0.8931\n",
      "\n",
      "Epoch 9/19\n",
      "----------\n",
      "train Loss: 0.3535 Acc: 0.8569\n",
      "val Loss: 0.2800 Acc: 0.8929\n",
      "\n",
      "Epoch 10/19\n",
      "----------\n",
      "train Loss: 0.3537 Acc: 0.8564\n",
      "val Loss: 0.2800 Acc: 0.8929\n",
      "\n",
      "Epoch 11/19\n",
      "----------\n",
      "train Loss: 0.3531 Acc: 0.8568\n",
      "val Loss: 0.2800 Acc: 0.8929\n",
      "\n",
      "Epoch 12/19\n",
      "----------\n",
      "train Loss: 0.3525 Acc: 0.8570\n",
      "val Loss: 0.2800 Acc: 0.8929\n",
      "\n",
      "Epoch 13/19\n",
      "----------\n",
      "train Loss: 0.3530 Acc: 0.8574\n",
      "val Loss: 0.2800 Acc: 0.8929\n",
      "\n",
      "Epoch 14/19\n",
      "----------\n",
      "train Loss: 0.3533 Acc: 0.8565\n",
      "val Loss: 0.2800 Acc: 0.8929\n",
      "\n",
      "Epoch 15/19\n",
      "----------\n",
      "train Loss: 0.3530 Acc: 0.8569\n",
      "val Loss: 0.2800 Acc: 0.8929\n",
      "\n",
      "Epoch 16/19\n",
      "----------\n",
      "train Loss: 0.3531 Acc: 0.8561\n",
      "val Loss: 0.2800 Acc: 0.8929\n",
      "\n",
      "Epoch 17/19\n",
      "----------\n",
      "train Loss: 0.3529 Acc: 0.8566\n",
      "val Loss: 0.2800 Acc: 0.8929\n",
      "\n",
      "Epoch 18/19\n",
      "----------\n",
      "train Loss: 0.3532 Acc: 0.8570\n",
      "val Loss: 0.2800 Acc: 0.8929\n",
      "\n",
      "Epoch 19/19\n",
      "----------\n",
      "train Loss: 0.3527 Acc: 0.8580\n",
      "val Loss: 0.2800 Acc: 0.8929\n",
      "\n",
      "Training complete in 49m 34s\n",
      "Best val Acc: 0.893671\n",
      "Testing Acc: 0.8937\n",
      "********************\n",
      "Prepare folders\n",
      "Training starts\n",
      "Epoch 0/19\n",
      "----------\n",
      "train Loss: 0.3489 Acc: 0.8593\n",
      "val Loss: 0.2989 Acc: 0.8822\n",
      "\n",
      "Epoch 1/19\n",
      "----------\n",
      "train Loss: 0.3491 Acc: 0.8591\n",
      "val Loss: 0.2989 Acc: 0.8822\n",
      "\n",
      "Epoch 2/19\n",
      "----------\n",
      "train Loss: 0.3490 Acc: 0.8606\n",
      "val Loss: 0.2989 Acc: 0.8822\n",
      "\n",
      "Epoch 3/19\n",
      "----------\n",
      "train Loss: 0.3490 Acc: 0.8606\n",
      "val Loss: 0.2989 Acc: 0.8822\n",
      "\n",
      "Epoch 4/19\n",
      "----------\n",
      "train Loss: 0.3494 Acc: 0.8603\n",
      "val Loss: 0.2989 Acc: 0.8822\n",
      "\n",
      "Epoch 5/19\n",
      "----------\n",
      "train Loss: 0.3489 Acc: 0.8607\n",
      "val Loss: 0.2989 Acc: 0.8822\n",
      "\n",
      "Epoch 6/19\n",
      "----------\n",
      "train Loss: 0.3499 Acc: 0.8597\n",
      "val Loss: 0.2989 Acc: 0.8822\n",
      "\n",
      "Epoch 7/19\n",
      "----------\n",
      "train Loss: 0.3492 Acc: 0.8595\n",
      "val Loss: 0.2989 Acc: 0.8822\n",
      "\n",
      "Epoch 8/19\n",
      "----------\n",
      "train Loss: 0.3494 Acc: 0.8595\n",
      "val Loss: 0.2989 Acc: 0.8822\n",
      "\n",
      "Epoch 9/19\n",
      "----------\n",
      "train Loss: 0.3492 Acc: 0.8600\n",
      "val Loss: 0.2989 Acc: 0.8822\n",
      "\n",
      "Epoch 10/19\n",
      "----------\n",
      "train Loss: 0.3493 Acc: 0.8592\n",
      "val Loss: 0.2989 Acc: 0.8822\n",
      "\n",
      "Epoch 11/19\n",
      "----------\n",
      "train Loss: 0.3488 Acc: 0.8593\n",
      "val Loss: 0.2989 Acc: 0.8822\n",
      "\n",
      "Epoch 12/19\n",
      "----------\n",
      "train Loss: 0.3490 Acc: 0.8599\n",
      "val Loss: 0.2989 Acc: 0.8822\n",
      "\n",
      "Epoch 13/19\n",
      "----------\n",
      "train Loss: 0.3491 Acc: 0.8588\n",
      "val Loss: 0.2989 Acc: 0.8822\n",
      "\n",
      "Epoch 14/19\n",
      "----------\n",
      "train Loss: 0.3493 Acc: 0.8597\n",
      "val Loss: 0.2989 Acc: 0.8822\n",
      "\n",
      "Epoch 15/19\n",
      "----------\n",
      "train Loss: 0.3494 Acc: 0.8590\n",
      "val Loss: 0.2989 Acc: 0.8822\n",
      "\n",
      "Epoch 16/19\n",
      "----------\n",
      "train Loss: 0.3493 Acc: 0.8603\n",
      "val Loss: 0.2989 Acc: 0.8822\n",
      "\n",
      "Epoch 17/19\n",
      "----------\n",
      "train Loss: 0.3491 Acc: 0.8598\n",
      "val Loss: 0.2989 Acc: 0.8822\n",
      "\n",
      "Epoch 18/19\n",
      "----------\n",
      "train Loss: 0.3492 Acc: 0.8587\n",
      "val Loss: 0.2989 Acc: 0.8822\n",
      "\n",
      "Epoch 19/19\n",
      "----------\n",
      "train Loss: 0.3493 Acc: 0.8578\n",
      "val Loss: 0.2989 Acc: 0.8822\n",
      "\n",
      "Training complete in 49m 24s\n",
      "Best val Acc: 0.882161\n",
      "Testing Acc: 0.8822\n",
      "********************\n",
      "Prepare folders\n",
      "Training starts\n",
      "Epoch 0/19\n",
      "----------\n",
      "train Loss: 0.3383 Acc: 0.8688\n",
      "val Loss: 0.3314 Acc: 0.8560\n",
      "\n",
      "Epoch 1/19\n",
      "----------\n",
      "train Loss: 0.3384 Acc: 0.8694\n",
      "val Loss: 0.3314 Acc: 0.8560\n",
      "\n",
      "Epoch 2/19\n",
      "----------\n",
      "train Loss: 0.3378 Acc: 0.8685\n",
      "val Loss: 0.3314 Acc: 0.8560\n",
      "\n",
      "Epoch 3/19\n",
      "----------\n",
      "train Loss: 0.3388 Acc: 0.8700\n",
      "val Loss: 0.3314 Acc: 0.8560\n",
      "\n",
      "Epoch 4/19\n",
      "----------\n",
      "train Loss: 0.3391 Acc: 0.8686\n",
      "val Loss: 0.3314 Acc: 0.8560\n",
      "\n",
      "Epoch 5/19\n",
      "----------\n",
      "train Loss: 0.3393 Acc: 0.8686\n",
      "val Loss: 0.3314 Acc: 0.8560\n",
      "\n",
      "Epoch 6/19\n",
      "----------\n",
      "train Loss: 0.3382 Acc: 0.8690\n",
      "val Loss: 0.3314 Acc: 0.8560\n",
      "\n",
      "Epoch 7/19\n",
      "----------\n",
      "train Loss: 0.3382 Acc: 0.8688\n",
      "val Loss: 0.3314 Acc: 0.8560\n",
      "\n",
      "Epoch 8/19\n",
      "----------\n",
      "train Loss: 0.3387 Acc: 0.8689\n",
      "val Loss: 0.3314 Acc: 0.8560\n",
      "\n",
      "Epoch 9/19\n",
      "----------\n",
      "train Loss: 0.3384 Acc: 0.8692\n",
      "val Loss: 0.3314 Acc: 0.8560\n",
      "\n",
      "Epoch 10/19\n",
      "----------\n",
      "train Loss: 0.3386 Acc: 0.8687\n",
      "val Loss: 0.3314 Acc: 0.8560\n",
      "\n",
      "Epoch 11/19\n",
      "----------\n",
      "train Loss: 0.3386 Acc: 0.8698\n",
      "val Loss: 0.3314 Acc: 0.8560\n",
      "\n",
      "Epoch 12/19\n",
      "----------\n",
      "train Loss: 0.3385 Acc: 0.8699\n",
      "val Loss: 0.3314 Acc: 0.8560\n",
      "\n",
      "Epoch 13/19\n",
      "----------\n",
      "train Loss: 0.3378 Acc: 0.8693\n",
      "val Loss: 0.3314 Acc: 0.8560\n",
      "\n",
      "Epoch 14/19\n",
      "----------\n",
      "train Loss: 0.3382 Acc: 0.8691\n",
      "val Loss: 0.3314 Acc: 0.8560\n",
      "\n",
      "Epoch 15/19\n",
      "----------\n",
      "train Loss: 0.3381 Acc: 0.8696\n",
      "val Loss: 0.3314 Acc: 0.8560\n",
      "\n",
      "Epoch 16/19\n",
      "----------\n",
      "train Loss: 0.3382 Acc: 0.8686\n",
      "val Loss: 0.3314 Acc: 0.8560\n",
      "\n",
      "Epoch 17/19\n",
      "----------\n",
      "train Loss: 0.3383 Acc: 0.8698\n",
      "val Loss: 0.3314 Acc: 0.8560\n",
      "\n",
      "Epoch 18/19\n",
      "----------\n",
      "train Loss: 0.3380 Acc: 0.8701\n",
      "val Loss: 0.3314 Acc: 0.8560\n",
      "\n",
      "Epoch 19/19\n",
      "----------\n",
      "train Loss: 0.3384 Acc: 0.8685\n",
      "val Loss: 0.3314 Acc: 0.8560\n",
      "\n",
      "Training complete in 49m 24s\n",
      "Best val Acc: 0.856046\n",
      "Testing Acc: 0.8560\n"
     ]
    }
   ],
   "source": [
    "y_true, y_pred = train_model_cv(\n",
    "    train_df, model_ft, \n",
    "    criterion, optimizer_ft, exp_lr_scheduler, num_epochs=20, \n",
    "    batch_size=8, n=4)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-02-07T12:50:59.951671Z",
     "start_time": "2020-02-07T12:50:59.945682Z"
    }
   },
   "outputs": [],
   "source": [
    "res = {'y_true': y_true, 'y_pred': y_pred}\n",
    "with open('../archive/Model_CNN_CV.pkl', 'wb') as f:\n",
    "    pickle.dump(res, f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-02-07T12:52:42.092445Z",
     "start_time": "2020-02-07T12:52:42.038295Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0       0.87      0.92      0.89     16549\n",
      "           1       0.85      0.79      0.82     10291\n",
      "\n",
      "    accuracy                           0.87     26840\n",
      "   macro avg       0.86      0.85      0.86     26840\n",
      "weighted avg       0.87      0.87      0.86     26840\n",
      "\n"
     ]
    }
   ],
   "source": [
    "print(metrics.classification_report(y_true, y_pred))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.1"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": false,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": true
  },
  "varInspector": {
   "cols": {
    "lenName": 16,
    "lenType": 16,
    "lenVar": 40
   },
   "kernels_config": {
    "python": {
     "delete_cmd_postfix": "",
     "delete_cmd_prefix": "del ",
     "library": "var_list.py",
     "varRefreshCmd": "print(var_dic_list())"
    },
    "r": {
     "delete_cmd_postfix": ") ",
     "delete_cmd_prefix": "rm(",
     "library": "var_list.r",
     "varRefreshCmd": "cat(var_dic_list()) "
    }
   },
   "types_to_exclude": [
    "module",
    "function",
    "builtin_function_or_method",
    "instance",
    "_Feature"
   ],
   "window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
