{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import torch\n",
    "from torch.autograd import Variable\n",
    "from torch.nn.parameter import Parameter\n",
    "\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "import torch.nn.init as init\n",
    "\n",
    "from torch.nn import Linear, Conv2d, BatchNorm2d, MaxPool2d, Dropout2d\n",
    "from torch.nn.functional import relu, elu, relu6, sigmoid, tanh, softmax"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "num_classes = 2\n",
    "\n",
    "# Image is grayscale, so number of channels is one\n",
    "channels = 1\n",
    "\n",
    "# 256x256 pixels each image\n",
    "height = 256\n",
    "width = 256\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "#  Number of filters in each convolutional layer\n",
    "num_filters_conv1 = 8 \n",
    "kernel_size_conv1 = 4 # [height, width]\n",
    "stride_conv1 = 2 # [stride_height, stride_width]\n",
    "\n",
    "# Number of Neurons in the last fully connected layer\n",
    "num_l1 = 1000\n",
    "padding_conv1 = 0\n",
    "   \n",
    "#  Source: CNN introduction jupyter notebook ROBT 407 Course, Nazarbayev University\n",
    "#  function to calculate the dimension of the image after one convolutional layer\n",
    "def compute_conv_dim(dim_size):\n",
    "    return int((dim_size - kernel_size_conv1 + 2 * padding_conv1) / stride_conv1 + 1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Net(\n",
      "  (conv_1): Conv2d(1, 8, kernel_size=(4, 4), stride=(2, 2))\n",
      "  (pool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
      "  (dropout): Dropout2d(p=0.3)\n",
      "  (conv_2): Conv2d(8, 16, kernel_size=(4, 4), stride=(2, 2))\n",
      "  (pool2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
      "  (l_1): Linear(in_features=3600, out_features=1000, bias=True)\n",
      "  (l_out): Linear(in_features=1000, out_features=1, bias=False)\n",
      ")\n"
     ]
    }
   ],
   "source": [
    "# define network\n",
    "class Net(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(Net, self).__init__()\n",
    "#         out_dim = (input_dim - filter_dim + 2*padding) / stride + 1\n",
    "        self.conv_1 = Conv2d(in_channels=channels,\n",
    "                            out_channels=num_filters_conv1,\n",
    "                            kernel_size=kernel_size_conv1,\n",
    "                            stride=(2,2))\n",
    "        self.pool1 = MaxPool2d(kernel_size=2, stride=2)\n",
    "        self.conv_out_height = compute_conv_dim(height)\n",
    "        self.conv_out_width = compute_conv_dim(width)        \n",
    "        # add dropout to network\n",
    "        self.dropout = Dropout2d(p=0.3)\n",
    "        \n",
    "        self.conv_2 = Conv2d(in_channels=num_filters_conv1,\n",
    "                            out_channels=num_filters_conv1*2,\n",
    "                            kernel_size=kernel_size_conv1,\n",
    "                            stride=stride_conv1)\n",
    "        \n",
    "        self.pool2 = MaxPool2d(kernel_size=2, stride=2)\n",
    "        \n",
    "#         self.conv_3 = Conv2d(in_channels=num_filters_conv1*2,\n",
    "#                             out_channels=num_filters_conv1*2,\n",
    "#                             kernel_size=kernel_size_conv1,\n",
    "#                             stride=stride_conv1)\n",
    "        \n",
    "#         self.pool3 = MaxPool2d(kernel_size=2, stride=2)\n",
    "        \n",
    "        self.l1_in_features = num_filters_conv1*2 *225\n",
    "        \n",
    "#         self.l1_in_features = num_filters_conv1*2 *9\n",
    "\n",
    "#         self.l1_in_features = 2883 # 3844\n",
    "        \n",
    "        \n",
    "        \n",
    "        self.l_1 = Linear(in_features=self.l1_in_features,\n",
    "                          out_features=num_l1,\n",
    "                          bias=True)        \n",
    "        self.l_out = Linear(in_features=num_l1, \n",
    "                            out_features=1,\n",
    "                            bias=False)    \n",
    "    def forward(self, x): # x.size() = [batch, channel, height, width]\n",
    "        x = x.view(-1, 1, 256,256)\n",
    "#         x = self.pool2(x)\n",
    "        x = relu(self.conv_1(x))\n",
    "#         print(x.shape)\n",
    "        x = self.pool1(x)\n",
    "#         print(x.shape)\n",
    "        x = self.pool2(relu(self.conv_2(x)))\n",
    "  \n",
    "#         x = self.pool3(relu(self.conv_3(x)))\n",
    "#         print(x.shape)\n",
    "#         print(x.shape)\n",
    "        x = x.view(-1, self.l1_in_features)\n",
    "#         print(x.shape)\n",
    "        x = self.dropout(relu(self.l_1(x)))\n",
    "#         print(x.shape)\n",
    "#         x = softmax(self.l_out(x), dim=1)\n",
    "        x = F.sigmoid(self.l_out(x))\n",
    "#         print(x.shape)\n",
    "        return x\n",
    "net = Net()\n",
    "print(net)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "x_train = np.load('../data/x_train.npy')\n",
    "x_train = x_train.tolist()\n",
    "y_train = np.load('../data/y_train.npy')\n",
    "y_train = y_train.tolist()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'train_test_split' is not defined",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-8-c665f9720629>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m      1\u001b[0m \u001b[0;31m#  Randomly split data into train and test sets\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mX_train\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mX_test\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my_train\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my_test\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtrain_test_split\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx_train\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my_train\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtest_size\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m0.2\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrandom_state\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m42\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[0;31mNameError\u001b[0m: name 'train_test_split' is not defined"
     ],
     "output_type": "error"
    }
   ],
   "source": [
    "#  Randomly split data into train and test sets\n",
    "X_train, X_test, y_train, y_test = train_test_split(x_train, y_train, test_size=0.2, random_state=42)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "X_train = X_train.reshape(-1, 256, 256)\n",
    "X_test = X_test.reshape(-1, 256, 256)\n",
    "cv = 20\n",
    "learning_rate = 0.001\n",
    "num_epochs = 6\n",
    "print_every = 3\n",
    "\n",
    "from sklearn.model_selection import KFold\n",
    "\n",
    "names = [\"Net\"]\n",
    "nets = [Net()]\n",
    "c = 1\n",
    "for i in range(len(nets)):\n",
    "  print(\"{}:\".format(names[i]))\n",
    "  net = nets[i]\n",
    "  net.cuda()\n",
    "\n",
    "  # lists to record loss values for plotting\n",
    "  train_list = []\n",
    "  val_list = []\n",
    "\n",
    "  # Binarry Cross Entropy loss\n",
    "  criterion = nn.BCELoss()\n",
    "\n",
    "  # Opimizing with Adam, choose alpha as 0.001 for stability\n",
    "  optimizer = optim.Adam(net.parameters(), lr=learning_rate)\n",
    "\n",
    "\n",
    "  for j in range(num_epochs + 1):\n",
    "  #   Here KFold used for batching with the test, not for the cross validation\n",
    "      kf = KFold(n_splits=cv)\n",
    "\n",
    "      running_loss = 0.0\n",
    "      for train_index, test_index in kf.split(X_train):\n",
    "\n",
    "  #     Convert current train set to pytorch float variable \n",
    "        x = Variable(torch.from_numpy(np.array(X_train[test_index]))).float()\n",
    "        y = Variable(torch.from_numpy(np.array(y_train[test_index]))).float()\n",
    "\n",
    "#         print(x.shape)\n",
    "        optimizer.zero_grad()\n",
    "\n",
    "  #     calculate loss of the train data\n",
    "        outputs = net(x.cuda())\n",
    "        loss = criterion(outputs, y.cuda())\n",
    "\n",
    "  #     calculate the loss for the validation set\n",
    "        x_ = Variable(torch.from_numpy(X_test))\n",
    "        x_ = x_.float()\n",
    "        y_ = Variable(torch.from_numpy(y_test))\n",
    "        y_ = y_.float()\n",
    "        outputs_ = net(x_.cuda())\n",
    "        val_loss = criterion(outputs_, y_.cuda())\n",
    "\n",
    "  #     backward step, propagate the loss\n",
    "        loss.backward()\n",
    "\n",
    "  #     update weights \n",
    "        optimizer.step()\n",
    "\n",
    "  #     update current super batch level loss\n",
    "        running_loss += loss.item()\n",
    "\n",
    "  #     fix current train and validation loss\n",
    "        train_list.append(loss.item())\n",
    "        val_list.append(val_loss.item())\n",
    "\n",
    "\n",
    "      if j%print_every == 0: \n",
    "        print(\"Epoch: {}, train loss: {}, validation_loss: {}\".format( j, running_loss/cv, val_list[-1]))\n",
    "\n",
    "  print('Finished Training')\n",
    "\n",
    "\n",
    "  import matplotlib.pyplot as plt\n",
    "  plt.title(names[i])\n",
    "  plt.plot(train_list, label=\"Train loss\")\n",
    "  plt.plot(val_list, label=\"Validation loss\")\n",
    "  plt.legend()\n",
    "  plt.savefig('opimization.png')\n",
    "  plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# plt.title(names[i])\n",
    "# plt.plot(train_list, label=\"Train loss\")\n",
    "# plt.plot(val_list, label=\"Validation loss\")\n",
    "# plt.legend()\n",
    "# plt.savefig('opimization.png', dpi=400)\n",
    "# plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_results = []\n",
    "for i in range(len(nets)):\n",
    "    print(\"{}:\".format(names[i]))\n",
    "    net = nets[i]\n",
    "\n",
    "\n",
    "    x_test = Variable(torch.from_numpy(X_test))\n",
    "    x_test = x_test.float()\n",
    "    outputs = net(x_test.cuda() )\n",
    "    outputs = outputs.cpu().detach().numpy()\n",
    "    print('Accuracy test: {}'.format(accuracy_score(y_test, np.round(outputs ))))\n",
    "    print('AUC test: {}'.format(roc_auc_score(y_test, outputs)))\n",
    "    print('F1 test: {}'.format(f1_score(y_test, outputs.round()  )))\n",
    "\n",
    "    test_results.append(roc_auc_score(y_test, outputs))\n",
    "\n",
    "\n",
    "    x_train = Variable(torch.from_numpy(X_train))\n",
    "    x_train = x_train.float()\n",
    "    outputs = net(x_train.cuda() )\n",
    "    outputs = outputs.cpu().detach().numpy()\n",
    "    print('Accuracy train: {}'.format(accuracy_score(y_train, np.round(outputs ))))\n",
    "    print('AUC train:{}'.format(roc_auc_score(y_train, outputs)))\n",
    "    print('F1 test: {}'.format(f1_score(y_train,  outputs.round()  )))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
