{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import argparse\n",
    "import os\n",
    "import random\n",
    "import torch\n",
    "import yaml\n",
    "from torch.utils.data import DataLoader\n",
    "from train_model.dataset_utils import prepare_test_data_set\n",
    "import numpy as np\n",
    "from os import listdir\n",
    "from dataset_utils import text_processing\n",
    "from train_model.helper import run_model, build_model\n",
    "from IPython.display import Image, display, clear_output\n",
    "\n",
    "# Get test images\n",
    "# wget http://images.cocodataset.org/zips/test2015.zip, unzip and update path to image directory\n",
    "im_dir = './test2015'\n",
    "# Get the models\n",
    "# wget https://s3-us-west-1.amazonaws.com/vqa-suite/pretrained_models/most_data_models.tar.gz and \n",
    "# move the best_model.pth from the uncompressed file to the folder best_model\n",
    "# Get features\n",
    "# mkdir data\n",
    "# cd data\n",
    "# wget https://s3-us-west-1.amazonaws.com/vqa-suite/data/vqa2.0_glove.6B.300d.txt.npy\n",
    "# wget https://s3-us-west-1.amazonaws.com/vqa-suite/data/vocabulary_vqa.txt\n",
    "# wget https://s3-us-west-1.amazonaws.com/vqa-suite/data/answers_vqa.txt\n",
    "# wget https://s3-us-west-1.amazonaws.com/vqa-suite/data/imdb.tar.gz\n",
    "# wget https://s3-us-west-1.amazonaws.com/vqa-suite/data/rcnn_10_100.tar.gz\n",
    "# wget https://s3-us-west-1.amazonaws.com/vqa-suite/data/large_vocabulary_vqa.txt\n",
    "# wget https://s3-us-west-1.amazonaws.com/vqa-suite/data/large_vqa2.0_glove.6B.300d.txt.npy\n",
    "# tar -xf rcnn_10_100.tar.gz\n",
    "# tar -xf imdb.tar.gz\n",
    "\n",
    "config_file = 'best_model/config.yaml'\n",
    "model_file = 'best_model/best_model.pth'\n",
    "\n",
    "\n",
    "def get_image():\n",
    "    im_files = [f for f in os.listdir(im_dir)]\n",
    "    im_file = random.choice(im_files)\n",
    "    im_path = os.path.join(im_dir, im_file)\n",
    "    print(im_path)\n",
    "    clear_output()\n",
    "    display(Image(filename=im_path))\n",
    "    return im_file\n",
    "\n",
    "def get_imdb(im_file, question_str):\n",
    "    imdb = []\n",
    "    imdb.append({'dataset_name': 'vqa', 'version': 1, 'has_answer': False, 'has_gt_layout': False})\n",
    "    iminfo = {}\n",
    "    iminfo['image_name'] = im_file.replace('.jpg', '')\n",
    "    iminfo['img_id'] = int(iminfo['image_name'].split('_')[-1])\n",
    "    iminfo['question_id'] = 0\n",
    "    iminfo['feature_path'] = iminfo['image_name'] + '.npy'\n",
    "    iminfo['question_str'] = question_str\n",
    "    iminfo['question_tokens'] = text_processing.tokenize(iminfo['question_str'])\n",
    "    imdb.append(iminfo)\n",
    "    return imdb\n",
    "\n",
    "def print_result(question_ids, soft_max_result, ans_dic):\n",
    "    predicted_answers = np.argmax(soft_max_result, axis=1)\n",
    "    for idx, pred_idx in enumerate(predicted_answers):\n",
    "        question_id = question_ids[idx]\n",
    "        pred_ans = ans_dic.idx2word(pred_idx)\n",
    "        print(pred_ans)\n",
    "\n",
    "def demo():\n",
    "\n",
    "    with open(config_file, 'r') as f:\n",
    "        config = yaml.load(f)\n",
    "\n",
    "    data_set_test = prepare_test_data_set(**config['data'], **config['model'], verbose=True, test_mode=True)\n",
    "\n",
    "    myModel = build_model(config, data_set_test)\n",
    "    myModel.load_state_dict(torch.load(model_file)['state_dict'])\n",
    "\n",
    "    print('VQA Demo')\n",
    "    print('Say next to go to next image')\n",
    "    print('Say stop to stop demo')\n",
    "    im_file = get_image()\n",
    "    while(True):\n",
    "        print(\"What question would you like to ask?\")\n",
    "        question_str = input()\n",
    "        if question_str.lower() == 'next':\n",
    "            im_file = get_image()\n",
    "            continue\n",
    "        if question_str.lower() == 'stop':\n",
    "            print('Bye')\n",
    "            break\n",
    "        data_set_test.datasets[0].imdb = get_imdb(im_file, question_str)\n",
    "        data_reader_test = DataLoader(data_set_test, shuffle=False, batch_size=1)\n",
    "        ans_dic = data_set_test.answer_dict\n",
    "\n",
    "        question_ids, soft_max_result = run_model(myModel, data_reader_test, ans_dic.UNK_idx)\n",
    "        print_result(question_ids, soft_max_result, ans_dic)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "demo()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
