{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import glob \n",
    "import os \n",
    "import torch\n",
    "import torchvision\n",
    "import torchvision.transforms as transforms\n",
    "import torch.nn as nn\n",
    "import numpy as np\n",
    "\n",
    "import pandas as pd \n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "from torchvision import  utils\n",
    "\n",
    "from PIL import Image\n",
    "\n",
    "import p3_model2 as model \n",
    "\n",
    "#import p3_hw as hw\n",
    "import sys\n",
    "\n",
    "\n",
    "\n",
    "from itertools import cycle\n",
    "import torch.optim as optim\n",
    "import p3_test2 as test\n",
    "import time "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "cwd = os.getcwd()\n",
    "use_cuda = torch.cuda.is_available()\n",
    "torch.manual_seed(123)\n",
    "device = torch.device(\"cuda\" if use_cuda else \"cpu\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "target_domain='real' # change here \n",
    "#domain_name=['sketch','quickdraw','infograph','real']\n",
    "domain_name=['infograph']\n",
    "#domain_name.remove(target_domain)\n",
    "\n",
    "class_name=[file  for file in os.listdir(domain_name[0]) if file[-3:] !='csv' ]\n",
    "\n",
    "csv_name_train={name: pd.read_csv(cwd+'/'+name+'/'+name+ '_train.csv',index_col=0) for name in domain_name}\n",
    "csv_name_val={target_domain: pd.read_csv(cwd+'/'+target_domain+'/'+target_domain+ '_train.csv',index_col=0)}\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_cvs=pd.concat(csv_name_train[key] for key in domain_name)\n",
    "test_csv=csv_name_val[target_domain]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "color_transform=transforms.Compose([\n",
    "                transforms.Resize((224, 224)),\n",
    "                transforms.ToTensor(),\n",
    "                transforms.Normalize(mean=[0.485, 0.456, 0.406],\n",
    "                                     std=[0.229, 0.224, 0.225])\n",
    "            ])\n",
    "\n",
    "class finalset(Dataset):\n",
    "    def __init__(self, train_cvs,train, transform=None):\n",
    "        \"\"\" Intialize the MNIST dataset \"\"\"\n",
    "        self.images = None\n",
    "        self.labels = None\n",
    "        self.csv=train_cvs\n",
    "        self.filenames = list(train_cvs.index)\n",
    "        self.train=train\n",
    "        self.transform = transform\n",
    "        self.len = len(self.filenames)                      \n",
    "    def __getitem__(self, index):\n",
    "\n",
    "        \"\"\" Get a sample from the dataset \"\"\"\n",
    "        \n",
    "        image_fn=self.filenames[index]\n",
    "        image = Image.open(image_fn)\n",
    "    \n",
    "        if image.mode != 'RGB':\n",
    "            image = np.expand_dims(image, axis=2)\n",
    "            image=np.concatenate((image,image,image),axis=2)\n",
    "\n",
    "        if self.transform is not None:\n",
    "            image = self.transform(image)\n",
    "        \n",
    "        if self.train is True:\n",
    "            label=self.csv.loc[image_fn,'label']\n",
    "            return image,label,image_fn\n",
    "        if self.train is False:\n",
    "            return image, -1, image_fn\n",
    "    \n",
    "    def __len__(self):\n",
    "        return self.len  \n",
    "\n",
    "train_data  =finalset(train_cvs,train=True,transform=color_transform)\n",
    "trainloader = DataLoader(train_data, batch_size=64,shuffle=True) \n",
    "\n",
    "\n",
    "test_data  =finalset(test_csv,train=True,transform=color_transform)\n",
    "testloader = DataLoader(test_data, batch_size=64,shuffle=True) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "feature_size=2048\n",
    "cnn_feature_extractor=torchvision.models.resnet50(pretrained=True) #resnet50 fc is for 1000 calsses\n",
    "modules = list(cnn_feature_extractor.children())[:-1] # delete the last fc layer.\n",
    "cnn_feature_extractor = nn.Sequential(*modules).to(device)\n",
    "\n",
    "# set requires_grad to false\n",
    "for param in cnn_feature_extractor.parameters():\n",
    "    param.requires_grad = False\n",
    "#encoder=model.SVHN_Extractor().cuda()\n",
    "encoder=cnn_feature_extractor.cuda()\n",
    "classifier=model.SVHN_Class_classifier().cuda()\n",
    "discriminator=model.SVHN_Domain_classifier().cuda()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def save_checkpoint(encoder,classifier,discriminator,save_name):\n",
    "    print('Save models  ...')\n",
    "\n",
    "    save_folder = 'models'\n",
    "    if not os.path.exists(save_folder):\n",
    "        os.makedirs(save_folder)\n",
    "\n",
    "    state_enc={\"state_dict\": encoder.state_dict()}\n",
    "    torch.save(state_enc, str(save_folder) + '/' + str(save_name) + '_enc.pth')\n",
    "    \n",
    "    state_cls={\"state_dict\": classifier.state_dict()}\n",
    "    torch.save(state_cls, str(save_folder) + '/' + str(save_name)  +'_cls.pth')\n",
    "    print(' p3 Model '+ str(save_name)+' is saved !!!')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "DANN training.......\n",
      "Epoch : 0\n",
      "[3136/37120 (8%)]\tLoss: 6.129664\tClass Loss: 5.753774\tDomain Loss: 0.375890\n",
      "[6336/37120 (17%)]\tLoss: 5.778780\tClass Loss: 5.537865\tDomain Loss: 0.240915\n",
      "[9536/37120 (26%)]\tLoss: 5.895702\tClass Loss: 5.657181\tDomain Loss: 0.238521\n",
      "[12736/37120 (34%)]\tLoss: 5.640420\tClass Loss: 5.427755\tDomain Loss: 0.212665\n",
      "[15936/37120 (43%)]\tLoss: 5.824214\tClass Loss: 5.539944\tDomain Loss: 0.284270\n",
      "[19136/37120 (52%)]\tLoss: 5.547801\tClass Loss: 5.336491\tDomain Loss: 0.211310\n",
      "[22336/37120 (60%)]\tLoss: 5.636568\tClass Loss: 5.439458\tDomain Loss: 0.197110\n",
      "[25536/37120 (69%)]\tLoss: 5.557771\tClass Loss: 5.288949\tDomain Loss: 0.268823\n",
      "[28736/37120 (77%)]\tLoss: 5.526441\tClass Loss: 5.339215\tDomain Loss: 0.187226\n",
      "[31936/37120 (86%)]\tLoss: 5.467163\tClass Loss: 5.262992\tDomain Loss: 0.204171\n",
      "[35136/37120 (95%)]\tLoss: 5.523991\tClass Loss: 5.336222\tDomain Loss: 0.187769\n",
      "\n",
      "Target Accuracy: 1504/37120 \n",
      "\n",
      "\n",
      "Source Accuracy: 490/6464 (7.5804%)\n",
      "\n",
      "\n",
      "Target Accuracy: 191/6464 (2.9548%)\n",
      "\n",
      "Domain Accuracy: 11563/12928 (89.4415%)\n",
      "\n",
      "Save models  ...\n",
      " p3 Model wang is saved !!!\n",
      "DANN training.......\n",
      "Epoch : 1\n",
      "[3136/37120 (8%)]\tLoss: 5.251245\tClass Loss: 5.028158\tDomain Loss: 0.223087\n",
      "[6336/37120 (17%)]\tLoss: 5.458335\tClass Loss: 5.262497\tDomain Loss: 0.195838\n",
      "[9536/37120 (26%)]\tLoss: 5.401237\tClass Loss: 5.144515\tDomain Loss: 0.256722\n",
      "[12736/37120 (34%)]\tLoss: 5.215915\tClass Loss: 5.027895\tDomain Loss: 0.188019\n",
      "[15936/37120 (43%)]\tLoss: 5.373968\tClass Loss: 5.171260\tDomain Loss: 0.202708\n",
      "[19136/37120 (52%)]\tLoss: 5.368195\tClass Loss: 5.183920\tDomain Loss: 0.184274\n",
      "[22336/37120 (60%)]\tLoss: 5.398623\tClass Loss: 5.117960\tDomain Loss: 0.280663\n",
      "[25536/37120 (69%)]\tLoss: 5.108480\tClass Loss: 4.972360\tDomain Loss: 0.136120\n",
      "[28736/37120 (77%)]\tLoss: 5.323550\tClass Loss: 5.093762\tDomain Loss: 0.229788\n",
      "[31936/37120 (86%)]\tLoss: 4.858229\tClass Loss: 4.658441\tDomain Loss: 0.199788\n",
      "[35136/37120 (95%)]\tLoss: 5.134714\tClass Loss: 4.949149\tDomain Loss: 0.185564\n",
      "\n",
      "Target Accuracy: 3426/37120 \n",
      "\n",
      "\n",
      "Source Accuracy: 792/6464 (12.2525%)\n",
      "\n",
      "\n",
      "Target Accuracy: 380/6464 (5.8787%)\n",
      "\n",
      "Domain Accuracy: 11717/12928 (90.6327%)\n",
      "\n",
      "Save models  ...\n",
      " p3 Model wang is saved !!!\n",
      "DANN training.......\n",
      "Epoch : 2\n"
     ]
    }
   ],
   "source": [
    "\n",
    "srt_time = time.time()\n",
    "epochs=40\n",
    "max_target_acc=0\n",
    "for epoch in range(epochs):\n",
    "    print(\"DANN training.......\")\n",
    "    print('Epoch : {}'.format(epoch))\n",
    "\n",
    "    encoder = encoder.train()\n",
    "    classifier = classifier.train()\n",
    "    discriminator = discriminator.train()\n",
    "\n",
    "    classifier_criterion = nn.NLLLoss().cuda()#nn.CrossEntropyLoss().cuda()\n",
    "    discriminator_criterion = nn.NLLLoss().cuda()#nn.CrossEntropyLoss().cuda()\n",
    "\n",
    "    start_steps = epoch * len(trainloader)\n",
    "    total_steps = epochs * len(testloader)\n",
    "    lens_use=min(len(trainloader),len(testloader))\n",
    "    #print(time.time()-srt_time)\n",
    "    \n",
    "    target_correct=0\n",
    "    for batch_idx, (source_data, target_data) in enumerate(zip(trainloader, testloader)):\n",
    "#         if batch_idx > 50:\n",
    "#             break \n",
    "        str_time=time.time()\n",
    "        source_image, source_label,_name = source_data\n",
    "        target_image, target_label ,_name= target_data\n",
    "\n",
    "        p = float(batch_idx + start_steps) / total_steps\n",
    "        alpha = 2. / (1. + np.exp(-10 * p)) - 1\n",
    "\n",
    "            #source_image = torch.cat((source_image, source_image, source_image), 1)\n",
    "\n",
    "        source_image, source_label = source_image.cuda(), source_label.cuda()\n",
    "        target_image, target_label = target_image.cuda(), target_label.cuda()\n",
    "        combined_image = torch.cat((source_image, target_image), 0)\n",
    "\n",
    "        optimizer = optim.SGD(\n",
    "            list(encoder.parameters()) +\n",
    "            list(classifier.parameters()) +\n",
    "            list(discriminator.parameters()),\n",
    "            lr=0.01,\n",
    "            momentum=0.9)\n",
    "\n",
    "        #optimizer =p3_utils.optimizer_scheduler(optimizer=optimizer, p=p)\n",
    "        optimizer.zero_grad()\n",
    "\n",
    "        combined_feature = encoder(combined_image)\n",
    "        combined_feature=combined_feature.view(-1, feature_size)\n",
    "        #print(time.time()-str_time)\n",
    "        source_feature = encoder(source_image)\n",
    "        source_feature=source_feature.view(-1, feature_size)\n",
    "        #print(time.time()-str_time)\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "            # 1.Classification loss\n",
    "        class_pred = classifier(source_feature)\n",
    "        pred2 = class_pred.data.max(1, keepdim=True)[1]\n",
    "        \n",
    "        target_correct += pred2.eq(source_label.data.view_as(pred2)).cpu().sum()\n",
    "        \n",
    "\n",
    "        class_loss = classifier_criterion(class_pred, source_label)\n",
    "        \n",
    "            # 2. Domain loss\n",
    "        domain_pred = discriminator(combined_feature, alpha)\n",
    "        #print(time.time()-str_time)\n",
    "        domain_source_labels = torch.zeros(source_label.shape[0]).type(torch.LongTensor)\n",
    "        domain_target_labels = torch.ones(target_label.shape[0]).type(torch.LongTensor)\n",
    "        domain_combined_label = torch.cat((domain_source_labels, domain_target_labels), 0).cuda()\n",
    "        domain_loss = discriminator_criterion(domain_pred, domain_combined_label)\n",
    "    \n",
    "        total_loss = class_loss + domain_loss\n",
    "        #print(time.time()-str_time)\n",
    "        total_loss.backward()\n",
    "        optimizer.step()\n",
    "        #print(time.time()-srt_time)\n",
    "\n",
    "        if (batch_idx + 1) % 50 == 0:\n",
    "            print('[{}/{} ({:.0f}%)]\\tLoss: {:.6f}\\tClass Loss: {:.6f}\\tDomain Loss: {:.6f}'.format(\n",
    "                batch_idx * len(target_image), lens_use*64, 100. * batch_idx / lens_use, total_loss.item(), class_loss.item(), domain_loss.item()))\n",
    "    print(\"\\nTarget Accuracy: {}/{} \\n\".format(target_correct,lens_use * 64))\n",
    "    if (epoch) % 1 == 0:\n",
    "        source_acc,target_acc=test.tester(encoder, classifier, discriminator, trainloader, testloader)\n",
    "    \n",
    "    save_name='wang'\n",
    "    if(target_acc>max_target_acc):\n",
    "        save_checkpoint(encoder, classifier, discriminator,  save_name)\n",
    "        max_target_acc=target_acc\n",
    "    #visualize(encoder, 'source', save_name)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
