{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Data Source 1 - https://www.kaggle.com/c/digit-recognizer/data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Listing 6-1 Import required packages\n",
    "\n",
    "#pytorch utility imports\n",
    "import torch\n",
    "from torch.utils.data import DataLoader, TensorDataset\n",
    "\n",
    "#neural net imports\n",
    "import torch.nn as nn, torch.nn.functional as F, torch.optim as optim\n",
    "from torch.autograd import Variable\n",
    "\n",
    "#import external libraries\n",
    "import pandas as pd,numpy as np,matplotlib.pyplot as plt, os\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import confusion_matrix, accuracy_score\n",
    "%matplotlib inline\n",
    "\n",
    "#Set device to GPU or CPU based on availability\n",
    "if torch.cuda.is_available():\n",
    "    device = torch.device('cuda')\n",
    "else:\n",
    "    device = torch.device('cpu')\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Listing 6-2 -  Load dataset into memory\n",
    "\n",
    "\n",
    "input_folder_path = \"/input/data/MNIST/\"\n",
    "\n",
    "#The CSV contains a flat file of images, \n",
    "#i.e. each 28*28 image is flattened into a row of 784 colums \n",
    "#(1 column represents a pixel value)\n",
    "#For CNN, we would need to reshape this to our desired shape\n",
    "\n",
    "train_df = pd.read_csv(input_folder_path+\"train.csv\")\n",
    "\n",
    "#First column is the target/label\n",
    "train_labels = train_df['label'].values\n",
    "\n",
    "#Pixels values start from the 2nd column\n",
    "train_images = (train_df.iloc[:,1:].values).astype('float32')\n",
    "\n",
    "#Training and Validation Split\n",
    "train_images, val_images, train_labels, val_labels =\n",
    "train_test_split(\n",
    "train_images                                                                     ,train_labels                                                                      ,random_state=2020                                                         ,test_size=0.2)\n",
    "\n",
    "#Here we reshape the flat row into [#images,#Channels,#Width,#Height]\n",
    "#Given this a simple grayscale image, we will have just 1 channel\n",
    "train_images = train_images.reshape(train_images.shape[0],1,28, 28)\n",
    "val_images = val_images.reshape(val_images.shape[0],1,28, 28)\n",
    "\n",
    "#Also, let's plot few samples\n",
    "for i in range(0, 6):\n",
    "    plt.subplot(160 + (i+1))\n",
    "    plt.imshow(train_images[i].reshape(28,28), cmap=plt.get_cmap('gray'))\n",
    "    plt.title(train_labels[i])\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Listing 6-3 – Normalize data and prepare train/val datasets\n",
    "\n",
    "#Covert Train Images from pandas/numpy to tensor and normalize the values\n",
    "train_images_tensor = torch.tensor(train_images)/255.0\n",
    "train_images_tensor = train_images_tensor.view(-1,1,28,28)\n",
    "train_labels_tensor = torch.tensor(train_labels)\n",
    "\n",
    "#Create a train TensorDataset\n",
    "train_tensor = TensorDataset(train_images_tensor, train_labels_tensor)\n",
    "\n",
    "#Covert Validation Images from pandas/numpy to tensor and normalize the values\n",
    "val_images_tensor = torch.tensor(val_images)/255.0\n",
    "val_images_tensor = val_images_tensor.view(-1,1,28,28)\n",
    "val_labels_tensor = torch.tensor(val_labels)\n",
    "\n",
    "#Create a Validation TensorDataset\n",
    "val_tensor = TensorDataset(val_images_tensor, val_labels_tensor)\n",
    "\n",
    "print(\"Train Labels Shape:\",train_labels_tensor.shape)\n",
    "print(\"Train Images Shape:\",train_images_tensor.shape)\n",
    "print(\"Validation Labels Shape:\",val_labels_tensor.shape)\n",
    "print(\"Validation Images Shape:\",val_images_tensor.shape)\n",
    "\n",
    "#Load Train and Validation TensorDatasets into the data generator for Training \n",
    "train_loader = DataLoader(train_tensor, batch_size=64\n",
    ", num_workers=2, shuffle=True)\n",
    "val_loader = DataLoader(val_tensor, batch_size=64, num_workers=2, shuffle=True)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Listing 6-4 – Define Convolutional Neural Network, function to train and predict\n",
    "\n",
    "#Define conv-net\n",
    "class ConvNet(nn.Module):\n",
    "    def __init__(self, num_classes=10):\n",
    "        super(ConvNet, self).__init__()\n",
    "        #First unit of convolution\n",
    "        self.conv_unit_1 = nn.Sequential(\n",
    "            nn.Conv2d(1, 16, kernel_size=3, stride=1, padding=1),\n",
    "            nn.BatchNorm2d(16),\n",
    "            nn.ReLU(),\n",
    "            nn.MaxPool2d(kernel_size=2, stride=2))\n",
    "\n",
    "        #Second unit of convolution        \n",
    "        self.conv_unit_2 = nn.Sequential(\n",
    "            nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1),\n",
    "            nn.BatchNorm2d(32),\n",
    "            nn.ReLU(),\n",
    "            nn.MaxPool2d(kernel_size=2, stride=2))\n",
    "\n",
    "        #Fully connected layers\n",
    "        self.fc1 = nn.Linear(7*7*32, 128)       \n",
    "        self.fc2 = nn.Linear(128, 10)        \n",
    "    \n",
    "    #Connect the units\n",
    "    def forward(self, x):       \n",
    "        out = self.conv_unit_1(x)\n",
    "        out = self.conv_unit_2(out)\n",
    "        out = out.view(out.size(0), -1)\n",
    "        out = self.fc1(out)\n",
    "        out = self.fc2(out)        \n",
    "        out = F.log_softmax(out,dim=1)                                \n",
    "        return out\n",
    "\n",
    "    \n",
    "    \n",
    "#Define Functions for Model Evaluation and generating Predictions    \n",
    "def make_predictions(data_loader):\n",
    "    #Explcitly set the model to eval mode\n",
    "    model.eval()\n",
    "    test_preds = torch.LongTensor()\n",
    "    actual = torch.LongTensor()\n",
    "    \n",
    "    for data, target in data_loader:\n",
    "        \n",
    "        if torch.cuda.is_available():\n",
    "            data = data.cuda()\n",
    "        output = model(data)\n",
    "\n",
    "        #Predict output/Take the index of the output with max value\n",
    "        preds = output.cpu().data.max(1, keepdim=True)[1]\n",
    "\n",
    "        #Combine tensors from each batch\n",
    "        test_preds = torch.cat((test_preds, preds), dim=0)\n",
    "        actual  = torch.cat((actual,target),dim=0)\n",
    "        \n",
    "    return actual,test_preds\n",
    "\n",
    "#Evalute model\n",
    "def evaluate(data_loader):\n",
    "    model.eval()\n",
    "    loss = 0\n",
    "    correct = 0\n",
    "    \n",
    "    for data, target in data_loader:        \n",
    "        if torch.cuda.is_available():\n",
    "            data = data.cuda()\n",
    "            target = target.cuda()\n",
    "        output = model(data)\n",
    "        loss += F.cross_entropy(output, target, size_average=False).data.item()\n",
    "        predicted = output.data.max(1, keepdim=True)[1]   \n",
    "        correct += (target.reshape(-1,1) == predicted.reshape(-1,1)).float().sum()        \n",
    "        \n",
    "    loss /= len(data_loader.dataset)\n",
    "        \n",
    "    print('\\nAverage Val Loss: {:.4f}, Val Accuracy: {}/{} ({:.3f}%)\\n'.format(\n",
    "        loss, correct, len(data_loader.dataset),\n",
    "        100. * correct / len(data_loader.dataset)))    \n",
    "\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Listing 6-5 – Create model instance, define Loss and optimizer\n",
    "#Create Model  instance\n",
    "model = ConvNet(10).to(device)\n",
    "\n",
    "#Define Loss and optimizer\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=0.001)    \n",
    "print(model)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Listing 6-6 – Model training\n",
    "\n",
    "num_epochs = 5\n",
    "\n",
    "# Train the model\n",
    "total_step = len(train_loader)\n",
    "for epoch in range(num_epochs):\n",
    "    for i, (images, labels) in enumerate(train_loader):\n",
    "        images = images.to(device)\n",
    "        labels = labels.to(device)\n",
    "        \n",
    "        # Forward pass\n",
    "        outputs = model(images)\n",
    "        loss = criterion(outputs, labels)\n",
    "        \n",
    "        # Backward and optimize\n",
    "        optimizer.zero_grad()\n",
    "        loss.backward()\n",
    "        optimizer.step()        \n",
    "        \n",
    "    #After each epoch print Train loss and validation loss + accuracy\n",
    "    print ('Epoch [{}/{}], Loss: {:.4f}' .format(epoch+1, num_epochs, loss.item()))\n",
    "    evaluate(val_loader)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Listing 6-6 – Make predictions\n",
    "\n",
    "#Make Predictions on Validation Dataset\n",
    "\n",
    "actual, predicted = make_predictions(val_loader)\n",
    "actual,predicted = np.array(actual).reshape(-1,1)\n",
    ",np.array(predicted).reshape(-1,1)\n",
    "\n",
    "print(\"Validation Accuracy-\",round(accuracy_score(actual,predicted),4)*100)\n",
    "print(\"\\n Confusion Matrix\\n\",confusion_matrix(actual,predicted))\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Data Source 2  - https://www.kaggle.com/c/dogs-vs-cats/data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Listing 6-7 – Import packages for fresh exercise (CNN)\n",
    "# Import required libraries\n",
    "import torch\n",
    "import torchvision.transforms as transforms\n",
    "import torchvision.datasets as datasets\n",
    "import torchvision.models as models\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "from PIL import Image\n",
    "import matplotlib.pyplot as plt\n",
    "import glob,os\n",
    "import matplotlib.image as mpimg\n",
    "\n",
    "new_path = \"/kaggle/input/catsvsdogs/\"\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Listing 6-8 – Enable GPU in the kernel (if available)\n",
    "#Check if GPU is available\n",
    "if torch.cuda.is_available():\n",
    "    device = torch.device('cuda')\n",
    "else:\n",
    "    device = torch.device('cpu')\n",
    "print(\"Device:\",device)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Listing 6-9 – Print sample images from training dataset\n",
    "\n",
    "%matplotlib inline\n",
    "images = []\n",
    "#Collect Cat images\n",
    "for img_path in glob.glob(os.path.join(new_path,\"train\",\"cat\",\"*.jpg\"))[:5]:\n",
    "    images.append(mpimg.imread(img_path))\n",
    "\n",
    "#Collect Dog images\n",
    "for img_path in glob.glob(os.path.join(new_path,\"train\",\"dog\",\"*.jpg\"))[:5]:\n",
    "    images.append(mpimg.imread(img_path))\n",
    "\n",
    "#Plot a grid of cats and Dogs\n",
    "plt.figure(figsize=(20,10))\n",
    "columns = 5\n",
    "for i, image in enumerate(images):\n",
    "    plt.subplot(len(images) / columns + 1, columns, i + 1)\n",
    "    plt.imshow(image)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Listing 6-10 – Transform data and create train and validation sets\n",
    "\n",
    "#Compose sequence of transformations for image\n",
    "transformations = transforms.Compose([\n",
    "    transforms.Resize(255),\n",
    "    transforms.CenterCrop(224),\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n",
    "])\n",
    "\n",
    "# Load in each dataset and apply transformations using\n",
    "# the torchvision.datasets as datasets library\n",
    "train_set = datasets.ImageFolder(os.path.join(new_path,\"train\")\n",
    ", transform = transformations)\n",
    "val_set = datasets.ImageFolder(os.path.join(new_path,\"test\")\n",
    ", transform = transformations)\n",
    "\n",
    "# Put into a Dataloader using torch library\n",
    "train_loader = torch.utils.data.DataLoader(train_set\n",
    ", batch_size=32, shuffle=True)\n",
    "val_loader = torch.utils.data.DataLoader(val_set, batch_size =32, shuffle=True)\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Listing 6-11 – Define Convolution Neural Net\n",
    "\n",
    "#Define Convolutional network\n",
    "class ConvNet(nn.Module):\n",
    "    def __init__(self, num_classes=2):\n",
    "        super(ConvNet, self).__init__()\n",
    "        #First unit of convolution\n",
    "        self.conv_unit_1 = nn.Sequential(\n",
    "            nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1),\n",
    "            nn.ReLU(),\n",
    "            nn.MaxPool2d(kernel_size=2, stride=2)) #112\n",
    "\n",
    "        #Second unit of convolution        \n",
    "        self.conv_unit_2 = nn.Sequential(\n",
    "            nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1),\n",
    "            nn.ReLU(),\n",
    "            nn.MaxPool2d(kernel_size=2, stride=2)) #56\n",
    "\n",
    "        #Third unit of convolution        \n",
    "        self.conv_unit_3 = nn.Sequential(\n",
    "            nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),\n",
    "            nn.ReLU(),\n",
    "            nn.MaxPool2d(kernel_size=2, stride=2)) #28\n",
    "\n",
    "        #Fourth unit of convolution        \n",
    "        self.conv_unit_4 = nn.Sequential(\n",
    "            nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),           \n",
    "            nn.ReLU(),\n",
    "            nn.MaxPool2d(kernel_size=2, stride=2)) #14\n",
    "        \n",
    "        \n",
    "        #Fully connected layers\n",
    "        self.fc1 = nn.Linear(14*14*128, 128)       \n",
    "        self.fc2 = nn.Linear(128, 1)        \n",
    "        self.final = nn.Sigmoid()\n",
    "        \n",
    "        \n",
    "    def forward(self, x):       \n",
    "        out = self.conv_unit_1(x)\n",
    "        out = self.conv_unit_2(out)\n",
    "        out = self.conv_unit_3(out)        \n",
    "        out = self.conv_unit_4(out)                        \n",
    "\n",
    "        #Reshape the output\n",
    "        out = out.view(out.size(0),-1)\n",
    "        out = self.fc1(out)\n",
    "        out = self.fc2(out)  \n",
    "        out  = self.final(out)\n",
    "        \n",
    "        return(out)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Listing 6-12 – Define Evaluation function\n",
    "\n",
    "def evaluate(model,data_loader):\n",
    "    loss = []\n",
    "    correct = 0\n",
    "    with torch.no_grad():\n",
    "            for images, labels in data_loader:\n",
    "                images = images.to(device)\n",
    "                labels = labels.to(device)\n",
    "\n",
    "                model.eval()\n",
    "\n",
    "                output = model(images)\n",
    "\n",
    "                predicted = output > 0.5\n",
    "                correct += (labels.reshape(-1,1) == predicted.reshape(-1,1)).float().sum()        \n",
    "                \n",
    "                #Clear memory\n",
    "                del([images,labels])\n",
    "                if device == \"cuda\":\n",
    "                    torch.cuda.empty_cache()\n",
    "                \n",
    "    print('\\nVal Accuracy: {}/{} ({:.3f}%)\\n'.format(\n",
    "        correct, len(data_loader.dataset),\n",
    "        100. * correct / len(data_loader.dataset)))  \n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Listing 6-13 – Define loss, optimizer, create model instance and train for defined epochs\n",
    "num_epochs = 10\n",
    "loss_function = nn.BCELoss()  #Binary Crosss Entropy Loss\n",
    "model = ConvNet()\n",
    "model.cuda()\n",
    "adam_optimizer = torch.optim.Adam(model.parameters(), lr= 0.001)\n",
    "\n",
    "\n",
    "\n",
    "# Train the model\n",
    "total_step = len(train_loader)\n",
    "print(\"Total Batches:\",total_step)\n",
    "\n",
    "for epoch in range(num_epochs):\n",
    "    model.train()\n",
    "    train_loss = 0\n",
    "    for i, (images, labels) in enumerate(train_loader):\n",
    "        images = images.to(device)\n",
    "        labels = labels.to(device)\n",
    "        \n",
    "        # Forward pass\n",
    "        outputs = model(images)\n",
    "        loss = loss_function(outputs.float(), labels.float().view(-1,1))\n",
    "        \n",
    "        # Backward and optimize\n",
    "        adam_optimizer.zero_grad()\n",
    "        loss.backward()\n",
    "        adam_optimizer.step()                \n",
    "        train_loss += loss.item()* labels.size(0)\n",
    "\n",
    "        #After each epoch print Train loss and validation loss + accuracy\n",
    "    print ('Epoch [{}/{}], Loss: {:.4f}' .format(epoch+1, num_epochs, loss.item()))\n",
    "    #Evaluate model after each training epoch\n",
    "    evaluate(model,val_loader) \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Listing 6-14 –Download and initialize pretrained model\n",
    "\n",
    "#Download the model (pretrained)\n",
    "from torchvision import models\n",
    "new_model = models.vgg16(pretrained=True)\n",
    "\n",
    "# Freeze model weights\n",
    "for param in new_model.parameters():\n",
    "    param.requires_grad = False\n",
    "\n",
    "print(new_model.classifier)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Listing 6-15 – Replace the last layer with our custom layer\n",
    "\n",
    "#Define our custom model last layer\n",
    "new_model.classifier[6] = nn.Sequential(\n",
    "                      nn.Linear(new_model.classifier[6].in_features, 256), \n",
    "                      nn.ReLU(), \n",
    "                      nn.Dropout(0.4),\n",
    "                      nn.Linear(256, 1),                   \n",
    "                      nn.Sigmoid())\n",
    "\n",
    "# Find total parameters and trainable parameters\n",
    "total_params = sum(p.numel() for p in new_model.parameters())\n",
    "print(f'{total_params:,} total parameters.')\n",
    "total_trainable_params = sum(\n",
    "    p.numel() for p in new_model.parameters() if p.requires_grad)\n",
    "print(f'{total_trainable_params:,} training parameters.')\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Listing 6-16 – Train pretrained model for the defined use-case\n",
    "\n",
    "\n",
    "#Define epochs, optimizer and loss function\n",
    "num_epochs = 10\n",
    "loss_function = nn.BCELoss()  #Binary Crosss Entropy Loss\n",
    "new_model.cuda()\n",
    "adam_optimizer = torch.optim.Adam(new_model.parameters(), lr= 0.001)\n",
    "\n",
    "\n",
    "\n",
    "# Train the model\n",
    "total_step = len(train_loader)\n",
    "print(\"Total Batches:\",total_step)\n",
    "\n",
    "for epoch in range(num_epochs):\n",
    "    new_model.train()\n",
    "    train_loss = 0\n",
    "    for i, (images, labels) in enumerate(train_loader):\n",
    "        images = images.to(device)\n",
    "        labels = labels.to(device)\n",
    "        \n",
    "        # Forward pass\n",
    "        outputs = new_model(images)\n",
    "        loss = loss_function(outputs.float(), labels.float().view(-1,1))\n",
    "        \n",
    "        # Backward and optimize\n",
    "        adam_optimizer.zero_grad()\n",
    "        loss.backward()\n",
    "        adam_optimizer.step()                \n",
    "        train_loss += loss.item()* labels.size(0)\n",
    "\n",
    "    #After each epoch print Train loss and validation loss + accuracy\n",
    "    print ('Epoch [{}/{}], Loss: {:.4f}' .format(epoch+1, num_epochs, loss.item()))\n",
    "\n",
    "    #After each epoch evaluate model\n",
    "    evaluate(new_model,val_loader)\n",
    "\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
