{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "from __future__ import absolute_import, division, print_function, \\\n",
    "    unicode_literals\n",
    "\n",
    "import argparse\n",
    "from collections import OrderedDict\n",
    "from glob import glob\n",
    "from IPython.display import Image, display, clear_output\n",
    "import os\n",
    "from os import listdir\n",
    "import PIL\n",
    "\n",
    "import numpy as np\n",
    "import random\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch.autograd import Variable\n",
    "import torchvision.transforms as transforms\n",
    "import torchvision.models as models\n",
    "import yaml\n",
    "\n",
    "from dataset_utils import text_processing\n",
    "from train_model.Engineer import one_stage_run_model, masked_unk_softmax\n",
    "from train_model.model_factory import prepare_model\n",
    "\n",
    "\n",
    "\n",
    "# Get test images\n",
    "# wget http://images.cocodataset.org/zips/test2015.zip, unzip and update path to image directory\n",
    "im_dir = '/private/home/nvivek/vqa_2.0/test2015'\n",
    "\n",
    "# Get the models\n",
    "# wget https://s3-us-west-1.amazonaws.com/pythia-vqa/pretrained_models/detectron_100_resnet_most_data.tar.gz\n",
    "# move the best_model.pth and config.yaml from the uncompressed file to the folder vqa_detectron_master\n",
    "\n",
    "# mkdir vqa_detectron_master\n",
    "\n",
    "config_file = '/private/home/nvivek/VQA/pythia/vqa_detectron_master/config.yaml'\n",
    "model_file = '/private/home/nvivek/VQA/pythia/vqa_detectron_master/best_model.pth'\n",
    "\n",
    "\n",
    "# Get data\n",
    "# mkdir data\n",
    "# cd data\n",
    "# wget https://s3-us-west-1.amazonaws.com/pythia-vqa/data/vqa2.0_glove.6B.300d.txt.npy\n",
    "# wget https://s3-us-west-1.amazonaws.com/pythia-vqa/data/vocabulary_vqa.txt\n",
    "# wget https://s3-us-west-1.amazonaws.com/pythia-vqa/data/answers_vqa.txt\n",
    "# wget https://s3-us-west-1.amazonaws.com/pythia-vqa/data/large_vocabulary_vqa.txt\n",
    "# wget https://s3-us-west-1.amazonaws.com/pythia-vqa/data/large_vqa2.0_glove.6B.300d.txt.npy\n",
    "\n",
    "# Ensure the files fc7_b.pkl and fc7_w.pkl are also present\n",
    "\n",
    "TARGET_IMAGE_SIZE = [448, 448]\n",
    "CHANNEL_MEAN = [0.485, 0.456, 0.406]\n",
    "CHANNEL_STD = [0.229, 0.224, 0.225]\n",
    "data_transforms = transforms.Compose([transforms.Resize(TARGET_IMAGE_SIZE),\n",
    "                                      transforms.ToTensor(),\n",
    "                                      transforms.Normalize(CHANNEL_MEAN,\n",
    "                                                           CHANNEL_STD)])\n",
    "\n",
    "use_cuda = torch.cuda.is_available()\n",
    "\n",
    "# NOTE feat path \"https://download.pytorch.org/models/resnet152-b121ed2d.pth\"\n",
    "RESNET152_MODEL = models.resnet152(pretrained=True)\n",
    "RESNET152_MODEL.eval()\n",
    "\n",
    "if use_cuda:\n",
    "    RESNET152_MODEL = RESNET152_MODEL.cuda()\n",
    "\n",
    "class ResNet152FeatModule(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(ResNet152FeatModule, self).__init__()\n",
    "        modules = list(RESNET152_MODEL.children())[:-2]\n",
    "        self.feature_module = nn.Sequential(*modules)\n",
    "\n",
    "    def forward(self, x):\n",
    "        return self.feature_module(x)\n",
    "\n",
    "resnet_module = ResNet152FeatModule()\n",
    "if use_cuda:\n",
    "    resnet_module = resnet_module.cuda()\n",
    "    \n",
    "def get_image():\n",
    "    im_files = [f for f in os.listdir(im_dir)]\n",
    "    im_file = random.choice(im_files)\n",
    "    im_path = os.path.join(im_dir, im_file)\n",
    "    print(im_path)\n",
    "    clear_output()\n",
    "    display(Image(filename=im_path))\n",
    "    return im_file\n",
    "\n",
    "def extract_resnet_feat(im_file):\n",
    "    if not os.path.isabs(im_file):\n",
    "        im_file = os.path.join(im_dir, im_file)\n",
    "    img = PIL.Image.open(im_file).convert('RGB')\n",
    "    img_transform = data_transforms(img)\n",
    "    # make sure grey scale image is processed correctly\n",
    "    if img_transform.shape[0] == 1:\n",
    "        img_transform = img_transform.expand(3, -1, -1)\n",
    "    img_var = Variable(img_transform.unsqueeze(0))\n",
    "    if use_cuda:\n",
    "        img_var = img_var.cuda()\n",
    "\n",
    "    img_feat = resnet_module(img_var)\n",
    "    img_feat = img_feat.permute(0, 2, 3, 1).view(196,-1)\n",
    "    return img_feat\n",
    "\n",
    "def print_result(soft_max_result, ans_dict):\n",
    "    predicted_answers = np.argmax(soft_max_result, axis=1)\n",
    "    for idx, pred_idx in enumerate(predicted_answers):\n",
    "        pred_ans = ans_dict.idx2word(pred_idx)\n",
    "        print(pred_ans)\n",
    "\n",
    "def prepare_sample(vocab_dict, im_file, question_tokens, question_max_len=14):\n",
    "    input_seq = np.zeros((question_max_len), np.int32)\n",
    "    question_inds = (\n",
    "        [vocab_dict.word2idx(w) for w in question_tokens])\n",
    "    seq_length = len(question_inds)\n",
    "    read_len = min(seq_length, question_max_len)\n",
    "    input_seq[:read_len] = question_inds[:read_len]\n",
    "    sample = dict(input_seq_batch=torch.from_numpy(input_seq).unsqueeze(0),\n",
    "                  seq_length_batch=torch.tensor((seq_length,)).unsqueeze(0))\n",
    "\n",
    "    image_feats = []\n",
    "    # append detectron feature\n",
    "    image_feats.append(torch.rand(100,2048))\n",
    "    # append resnet feature\n",
    "    image_feats.append(extract_resnet_feat(im_file))\n",
    "\n",
    "    for im_idx, image_feat in enumerate(image_feats):\n",
    "        if im_idx == 0:\n",
    "            sample['image_feat_batch'] = image_feat.unsqueeze(0)\n",
    "        else:\n",
    "            feat_key = \"image_feat_batch_%s\" % str(im_idx)\n",
    "            sample[feat_key] = image_feat.unsqueeze(0)\n",
    "\n",
    "\n",
    "    # harcoded for now - set to 100\n",
    "    sample['image_dim'] = torch.tensor((100,))\n",
    "    return sample\n",
    "    \n",
    "def evaluate_sample(model, data_set, im_file, question, UNK_idx=0):\n",
    "    question_tokens = text_processing.tokenize(question)\n",
    "    sample = prepare_sample(data_set, im_file, question_tokens)\n",
    "    logit_res = one_stage_run_model(sample, model, eval_mode=True)\n",
    "    softmax_res = masked_unk_softmax(logit_res, dim=1, mask_idx=UNK_idx)\n",
    "    softmax_res = softmax_res.data.cpu().numpy().astype(np.float16)\n",
    "    return softmax_res\n",
    "\n",
    "def demo():\n",
    "\n",
    "    with open(config_file, 'r') as f:\n",
    "        config = yaml.load(f)\n",
    "    \n",
    "    data_root_dir = config['data']['data_root_dir']\n",
    "        \n",
    "    vocab_dict = text_processing.VocabDict(os.path.join(data_root_dir, config['data']['vocab_question_file']))\n",
    "    ans_dict = text_processing.VocabDict(os.path.join(data_root_dir, config['data']['vocab_answer_file']))\n",
    "\n",
    "    num_image_feat = len(config['data']['image_feat_train'][0].split(','))\n",
    "    num_vocab_txt = vocab_dict.num_vocab\n",
    "    num_answers = ans_dict.num_vocab\n",
    "\n",
    "    myModel = prepare_model(num_vocab_txt, num_answers, **config['model'],\n",
    "                            num_image_feat=num_image_feat)\n",
    "    state_dict = torch.load(model_file)['state_dict']\n",
    "    new_state_dict = OrderedDict()\n",
    "    for k, v in state_dict.items():\n",
    "        name = k.replace('module.','')\n",
    "        new_state_dict[name] = v\n",
    "    \n",
    "    myModel.load_state_dict(new_state_dict)\n",
    "\n",
    "    \n",
    "    print('VQA Demo')\n",
    "    print('Say next to go to next image')\n",
    "    print('Say stop to stop demo')\n",
    "    im_file = get_image()\n",
    "      \n",
    "    while(True):\n",
    "        print(\"What question would you like to ask?\")\n",
    "        question_str = input()\n",
    "        if question_str.lower() == 'next':\n",
    "            im_file = get_image()\n",
    "            continue\n",
    "        if question_str.lower() == 'stop':\n",
    "            print('Bye')\n",
    "            break\n",
    "    \n",
    "        soft_max_result = evaluate_sample(myModel, vocab_dict, im_file, question_str)\n",
    "        print_result(soft_max_result, ans_dict)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "demo()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
