{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "I1201 13:38:43.279632 139725318354752 file_utils.py:39] PyTorch version 0.4.1 available.\n",
      "I1201 13:38:47.842504 139725318354752 configuration_utils.py:151] loading configuration file https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json from cache at /root/.cache/torch/transformers/4dad0251492946e18ac39290fcfe91b89d370fee250efe9521476438fe8ca185.bf3b9ea126d8c0001ee8a1e8b92229871d06d36d8808208cc2449280da87785c\n",
      "I1201 13:38:47.847089 139725318354752 configuration_utils.py:168] Model config {\n",
      "  \"attention_probs_dropout_prob\": 0.1,\n",
      "  \"finetuning_task\": \"sst-2\",\n",
      "  \"hidden_act\": \"gelu\",\n",
      "  \"hidden_dropout_prob\": 0.1,\n",
      "  \"hidden_size\": 768,\n",
      "  \"initializer_range\": 0.02,\n",
      "  \"intermediate_size\": 3072,\n",
      "  \"layer_norm_eps\": 1e-12,\n",
      "  \"max_position_embeddings\": 512,\n",
      "  \"num_attention_heads\": 12,\n",
      "  \"num_hidden_layers\": 12,\n",
      "  \"num_labels\": 1,\n",
      "  \"output_attentions\": false,\n",
      "  \"output_hidden_states\": false,\n",
      "  \"pruned_heads\": {},\n",
      "  \"torchscript\": false,\n",
      "  \"type_vocab_size\": 2,\n",
      "  \"use_bfloat16\": false,\n",
      "  \"vocab_size\": 30522\n",
      "}\n",
      "\n",
      "I1201 13:38:48.786530 139725318354752 tokenization_utils.py:373] loading file https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt from cache at /root/.cache/torch/transformers/26bc1ad6c0ac742e9b52263248f6d0f00068293b33709fae12320c0e35ccfbbb.542ce4285a40d23a559526243235df47c5f75c197f04f37d1a0c124c32c9a084\n"
     ]
    }
   ],
   "source": [
    "from create_inputs_utils import * \n",
    "import os\n",
    "import numpy as np\n",
    "import h5py\n",
    "import json\n",
    "from tqdm import tqdm\n",
    "from collections import Counter\n",
    "from random import seed, choice, sample\n",
    "import pickle\n",
    "import argparse\n",
    "import glob\n",
    "import logging\n",
    "import os\n",
    "import random\n",
    "import sys\n",
    "sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname('__file__'))))\n",
    "import numpy as np\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "from tqdm import tqdm, trange\n",
    "import csv\n",
    "import logging\n",
    "logger = logging.getLogger(__name__)\n",
    "\n",
    "from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,\n",
    "                              TensorDataset)\n",
    "from transformers import (WEIGHTS_NAME, BertConfig,\n",
    "                                  BertForSequenceClassification, BertTokenizer,\n",
    "                                  )\n",
    "\n",
    "dataset = 'coco'\n",
    "karpathy_json_path='../data/caption_datasets/dataset_coco.json'\n",
    "captions_per_image=5\n",
    "output_folder='../preprocessed_data'\n",
    "base_filename = 'preprocessed_' + dataset\n",
    "max_len=50\n",
    "\n",
    "train_image_captions = []\n",
    "val_image_captions = []\n",
    "test_image_captions = []\n",
    "\n",
    "train_image_det = []\n",
    "val_image_det = []\n",
    "test_image_det = []\n",
    "\n",
    "with open(karpathy_json_path, 'r') as j:\n",
    "    data = json.load(j)\n",
    "with open(os.path.join(output_folder,'train36_imgid2idx.pkl'), 'rb') as j:\n",
    "    train_data = pickle.load(j)       \n",
    "with open(os.path.join(output_folder,'val36_imgid2idx.pkl'), 'rb') as j:\n",
    "    val_data = pickle.load(j)\n",
    "    \n",
    "processor = CaptionProcessor()\n",
    "task_name = \"sst-2\"#임의 설정 (필요없는 값)\n",
    "output_mode = \"classification\" #임의 설정 (필요없는 값)\n",
    "model_name_or_path = \"bert-base-uncased\" \n",
    "max_seq_length = 50\n",
    "device = torch.device(\"cuda:0\" if torch.cuda.is_available()  else \"cpu\")\n",
    "label_list = processor.get_labels()\n",
    "num_labels = len(label_list)\n",
    "\n",
    "config_class = BertConfig\n",
    "model_class = BertForSequenceClassification\n",
    "tokenizer_class = BertTokenizer\n",
    "\n",
    "config = config_class.from_pretrained(model_name_or_path, num_labels=num_labels, finetuning_task = task_name)\n",
    "tokenizer = tokenizer_class.from_pretrained(model_name_or_path, do_lower_case = True)\n",
    "#model = model_class.from_pretrained(model_name_or_path, from_tf=bool('.ckpt' in model_name_or_path), config=config)\n",
    "\n",
    "#model.to(device)    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 113287/113287 [00:01<00:00, 103118.00it/s]\n",
      "I1201 13:38:52.146618 139725318354752 create_inputs_utils.py:128] Writing example 0 of 566435\n",
      "I1201 13:38:52.147467 139725318354752 create_inputs_utils.py:218] *** Example ***\n",
      "I1201 13:38:52.147825 139725318354752 create_inputs_utils.py:219] guid: TRAIN-0-0\n",
      "I1201 13:38:52.148168 139725318354752 create_inputs_utils.py:221] tokens: [CLS] a woman marking a cake with the back of a chefs knife [SEP]\n",
      "I1201 13:38:52.148506 139725318354752 create_inputs_utils.py:222] input_ids: 101 1037 2450 10060 1037 9850 2007 1996 2067 1997 1037 27828 5442 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:38:52.148839 139725318354752 create_inputs_utils.py:223] input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:38:52.149182 139725318354752 create_inputs_utils.py:224] segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:38:52.149505 139725318354752 create_inputs_utils.py:225] label: None (id = 0)\n",
      "I1201 13:38:52.150097 139725318354752 create_inputs_utils.py:218] *** Example ***\n",
      "I1201 13:38:52.150418 139725318354752 create_inputs_utils.py:219] guid: TRAIN-0-1\n",
      "I1201 13:38:52.150846 139725318354752 create_inputs_utils.py:221] tokens: [CLS] a woman wearing a hair net cutting a large sheet cake [SEP]\n",
      "I1201 13:38:52.151145 139725318354752 create_inputs_utils.py:222] input_ids: 101 1037 2450 4147 1037 2606 5658 6276 1037 2312 7123 9850 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:38:52.152615 139725318354752 create_inputs_utils.py:223] input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:38:52.152878 139725318354752 create_inputs_utils.py:224] segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:38:52.153182 139725318354752 create_inputs_utils.py:225] label: None (id = 0)\n",
      "I1201 13:38:52.153712 139725318354752 create_inputs_utils.py:218] *** Example ***\n",
      "I1201 13:38:52.154020 139725318354752 create_inputs_utils.py:219] guid: TRAIN-0-2\n",
      "I1201 13:38:52.154317 139725318354752 create_inputs_utils.py:221] tokens: [CLS] a woman wearing a net on her head cutting a cake [SEP]\n",
      "I1201 13:38:52.154639 139725318354752 create_inputs_utils.py:222] input_ids: 101 1037 2450 4147 1037 5658 2006 2014 2132 6276 1037 9850 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:38:52.154975 139725318354752 create_inputs_utils.py:223] input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:38:52.155268 139725318354752 create_inputs_utils.py:224] segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:38:52.155567 139725318354752 create_inputs_utils.py:225] label: None (id = 0)\n",
      "I1201 13:38:52.156083 139725318354752 create_inputs_utils.py:218] *** Example ***\n",
      "I1201 13:38:52.156376 139725318354752 create_inputs_utils.py:219] guid: TRAIN-0-3\n",
      "I1201 13:38:52.156689 139725318354752 create_inputs_utils.py:221] tokens: [CLS] a woman cutting a large white sheet cake [SEP]\n",
      "I1201 13:38:52.156989 139725318354752 create_inputs_utils.py:222] input_ids: 101 1037 2450 6276 1037 2312 2317 7123 9850 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:38:52.157281 139725318354752 create_inputs_utils.py:223] input_mask: 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:38:52.157591 139725318354752 create_inputs_utils.py:224] segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:38:52.157879 139725318354752 create_inputs_utils.py:225] label: None (id = 0)\n",
      "I1201 13:38:52.158403 139725318354752 create_inputs_utils.py:218] *** Example ***\n",
      "I1201 13:38:52.158730 139725318354752 create_inputs_utils.py:219] guid: TRAIN-0-4\n",
      "I1201 13:38:52.159024 139725318354752 create_inputs_utils.py:221] tokens: [CLS] there is a woman that is cutting a white cake [SEP]\n",
      "I1201 13:38:52.159325 139725318354752 create_inputs_utils.py:222] input_ids: 101 2045 2003 1037 2450 2008 2003 6276 1037 2317 9850 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:38:52.159655 139725318354752 create_inputs_utils.py:223] input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:38:52.159956 139725318354752 create_inputs_utils.py:224] segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:38:52.160252 139725318354752 create_inputs_utils.py:225] label: None (id = 0)\n",
      "I1201 13:38:54.549698 139725318354752 create_inputs_utils.py:128] Writing example 10000 of 566435\n",
      "I1201 13:38:56.936333 139725318354752 create_inputs_utils.py:128] Writing example 20000 of 566435\n",
      "I1201 13:38:59.320235 139725318354752 create_inputs_utils.py:128] Writing example 30000 of 566435\n",
      "I1201 13:39:02.131003 139725318354752 create_inputs_utils.py:128] Writing example 40000 of 566435\n",
      "I1201 13:39:04.502406 139725318354752 create_inputs_utils.py:128] Writing example 50000 of 566435\n",
      "I1201 13:39:06.885188 139725318354752 create_inputs_utils.py:128] Writing example 60000 of 566435\n",
      "I1201 13:39:09.274291 139725318354752 create_inputs_utils.py:128] Writing example 70000 of 566435\n",
      "I1201 13:39:11.653275 139725318354752 create_inputs_utils.py:128] Writing example 80000 of 566435\n",
      "I1201 13:39:14.030758 139725318354752 create_inputs_utils.py:128] Writing example 90000 of 566435\n",
      "I1201 13:39:16.403339 139725318354752 create_inputs_utils.py:128] Writing example 100000 of 566435\n",
      "I1201 13:39:18.781220 139725318354752 create_inputs_utils.py:128] Writing example 110000 of 566435\n",
      "I1201 13:39:21.165068 139725318354752 create_inputs_utils.py:128] Writing example 120000 of 566435\n",
      "I1201 13:39:23.539678 139725318354752 create_inputs_utils.py:128] Writing example 130000 of 566435\n",
      "I1201 13:39:25.925226 139725318354752 create_inputs_utils.py:128] Writing example 140000 of 566435\n",
      "I1201 13:39:28.302229 139725318354752 create_inputs_utils.py:128] Writing example 150000 of 566435\n",
      "I1201 13:39:30.685905 139725318354752 create_inputs_utils.py:128] Writing example 160000 of 566435\n",
      "I1201 13:39:33.068316 139725318354752 create_inputs_utils.py:128] Writing example 170000 of 566435\n",
      "I1201 13:39:35.990342 139725318354752 create_inputs_utils.py:128] Writing example 180000 of 566435\n",
      "I1201 13:39:38.373432 139725318354752 create_inputs_utils.py:128] Writing example 190000 of 566435\n",
      "I1201 13:39:40.767719 139725318354752 create_inputs_utils.py:128] Writing example 200000 of 566435\n",
      "I1201 13:39:43.154496 139725318354752 create_inputs_utils.py:128] Writing example 210000 of 566435\n",
      "I1201 13:39:45.536080 139725318354752 create_inputs_utils.py:128] Writing example 220000 of 566435\n",
      "I1201 13:39:47.928596 139725318354752 create_inputs_utils.py:128] Writing example 230000 of 566435\n",
      "I1201 13:39:50.311191 139725318354752 create_inputs_utils.py:128] Writing example 240000 of 566435\n",
      "I1201 13:39:52.696313 139725318354752 create_inputs_utils.py:128] Writing example 250000 of 566435\n",
      "I1201 13:39:55.071496 139725318354752 create_inputs_utils.py:128] Writing example 260000 of 566435\n",
      "I1201 13:39:57.461616 139725318354752 create_inputs_utils.py:128] Writing example 270000 of 566435\n",
      "I1201 13:39:59.838580 139725318354752 create_inputs_utils.py:128] Writing example 280000 of 566435\n",
      "I1201 13:40:02.219065 139725318354752 create_inputs_utils.py:128] Writing example 290000 of 566435\n",
      "I1201 13:40:04.606491 139725318354752 create_inputs_utils.py:128] Writing example 300000 of 566435\n",
      "I1201 13:40:06.985610 139725318354752 create_inputs_utils.py:128] Writing example 310000 of 566435\n",
      "I1201 13:40:09.367398 139725318354752 create_inputs_utils.py:128] Writing example 320000 of 566435\n",
      "I1201 13:40:11.759597 139725318354752 create_inputs_utils.py:128] Writing example 330000 of 566435\n",
      "I1201 13:40:14.163764 139725318354752 create_inputs_utils.py:128] Writing example 340000 of 566435\n",
      "I1201 13:40:16.542769 139725318354752 create_inputs_utils.py:128] Writing example 350000 of 566435\n",
      "I1201 13:40:19.596394 139725318354752 create_inputs_utils.py:128] Writing example 360000 of 566435\n",
      "I1201 13:40:21.974144 139725318354752 create_inputs_utils.py:128] Writing example 370000 of 566435\n",
      "I1201 13:40:24.367855 139725318354752 create_inputs_utils.py:128] Writing example 380000 of 566435\n",
      "I1201 13:40:26.746558 139725318354752 create_inputs_utils.py:128] Writing example 390000 of 566435\n",
      "I1201 13:40:29.135419 139725318354752 create_inputs_utils.py:128] Writing example 400000 of 566435\n",
      "I1201 13:40:31.521217 139725318354752 create_inputs_utils.py:128] Writing example 410000 of 566435\n",
      "I1201 13:40:33.896837 139725318354752 create_inputs_utils.py:128] Writing example 420000 of 566435\n",
      "I1201 13:40:36.268753 139725318354752 create_inputs_utils.py:128] Writing example 430000 of 566435\n",
      "I1201 13:40:38.659086 139725318354752 create_inputs_utils.py:128] Writing example 440000 of 566435\n",
      "I1201 13:40:41.036405 139725318354752 create_inputs_utils.py:128] Writing example 450000 of 566435\n",
      "I1201 13:40:43.436507 139725318354752 create_inputs_utils.py:128] Writing example 460000 of 566435\n",
      "I1201 13:40:45.824514 139725318354752 create_inputs_utils.py:128] Writing example 470000 of 566435\n",
      "I1201 13:40:48.207521 139725318354752 create_inputs_utils.py:128] Writing example 480000 of 566435\n",
      "I1201 13:40:50.570992 139725318354752 create_inputs_utils.py:128] Writing example 490000 of 566435\n",
      "I1201 13:40:52.956751 139725318354752 create_inputs_utils.py:128] Writing example 500000 of 566435\n",
      "I1201 13:40:55.343117 139725318354752 create_inputs_utils.py:128] Writing example 510000 of 566435\n",
      "I1201 13:40:57.730863 139725318354752 create_inputs_utils.py:128] Writing example 520000 of 566435\n",
      "I1201 13:41:00.112410 139725318354752 create_inputs_utils.py:128] Writing example 530000 of 566435\n",
      "I1201 13:41:02.483749 139725318354752 create_inputs_utils.py:128] Writing example 540000 of 566435\n",
      "I1201 13:41:04.862977 139725318354752 create_inputs_utils.py:128] Writing example 550000 of 566435\n",
      "I1201 13:41:07.254555 139725318354752 create_inputs_utils.py:128] Writing example 560000 of 566435\n",
      "I1201 13:41:08.794118 139725318354752 create_inputs_utils.py:272] Saving features into cached file ../preprocessed_data/cached_TRAIN_Caption\n",
      "100%|██████████| 5000/5000 [00:00<00:00, 99502.85it/s]\n",
      "I1201 13:42:08.364082 139725318354752 create_inputs_utils.py:128] Writing example 0 of 25000\n",
      "I1201 13:42:08.364745 139725318354752 create_inputs_utils.py:218] *** Example ***\n",
      "I1201 13:42:08.365061 139725318354752 create_inputs_utils.py:219] guid: VAL-0-0\n",
      "I1201 13:42:08.365345 139725318354752 create_inputs_utils.py:221] tokens: [CLS] a child holding a flower ##ed umbrella and pet ##ting a ya ##k [SEP]\n",
      "I1201 13:42:08.365649 139725318354752 create_inputs_utils.py:222] input_ids: 101 1037 2775 3173 1037 6546 2098 12977 1998 9004 3436 1037 8038 2243 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:42:08.365947 139725318354752 create_inputs_utils.py:223] input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:42:08.366235 139725318354752 create_inputs_utils.py:224] segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:42:08.366522 139725318354752 create_inputs_utils.py:225] label: None (id = 0)\n",
      "I1201 13:42:08.367116 139725318354752 create_inputs_utils.py:218] *** Example ***\n",
      "I1201 13:42:08.367390 139725318354752 create_inputs_utils.py:219] guid: VAL-0-1\n",
      "I1201 13:42:08.367653 139725318354752 create_inputs_utils.py:221] tokens: [CLS] a young man holding an umbrella next to a herd of cattle [SEP]\n",
      "I1201 13:42:08.367916 139725318354752 create_inputs_utils.py:222] input_ids: 101 1037 2402 2158 3173 2019 12977 2279 2000 1037 14906 1997 7125 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:42:08.368179 139725318354752 create_inputs_utils.py:223] input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:42:08.368436 139725318354752 create_inputs_utils.py:224] segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:42:08.368683 139725318354752 create_inputs_utils.py:225] label: None (id = 0)\n",
      "I1201 13:42:08.369237 139725318354752 create_inputs_utils.py:218] *** Example ***\n",
      "I1201 13:42:08.369487 139725318354752 create_inputs_utils.py:219] guid: VAL-0-2\n",
      "I1201 13:42:08.369749 139725318354752 create_inputs_utils.py:221] tokens: [CLS] a young boy barefoot holding an umbrella touching the horn of a cow [SEP]\n",
      "I1201 13:42:08.370007 139725318354752 create_inputs_utils.py:222] input_ids: 101 1037 2402 2879 22985 3173 2019 12977 7244 1996 7109 1997 1037 11190 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:42:08.370256 139725318354752 create_inputs_utils.py:223] input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:42:08.370522 139725318354752 create_inputs_utils.py:224] segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:42:08.370814 139725318354752 create_inputs_utils.py:225] label: None (id = 0)\n",
      "I1201 13:42:08.371341 139725318354752 create_inputs_utils.py:218] *** Example ***\n",
      "I1201 13:42:08.371603 139725318354752 create_inputs_utils.py:219] guid: VAL-0-3\n",
      "I1201 13:42:08.371849 139725318354752 create_inputs_utils.py:221] tokens: [CLS] a young boy with an umbrella who is touching the horn of a cow [SEP]\n",
      "I1201 13:42:08.372106 139725318354752 create_inputs_utils.py:222] input_ids: 101 1037 2402 2879 2007 2019 12977 2040 2003 7244 1996 7109 1997 1037 11190 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:42:08.372368 139725318354752 create_inputs_utils.py:223] input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:42:08.372614 139725318354752 create_inputs_utils.py:224] segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:42:08.372864 139725318354752 create_inputs_utils.py:225] label: None (id = 0)\n",
      "I1201 13:42:08.373380 139725318354752 create_inputs_utils.py:218] *** Example ***\n",
      "I1201 13:42:08.373632 139725318354752 create_inputs_utils.py:219] guid: VAL-0-4\n",
      "I1201 13:42:08.373895 139725318354752 create_inputs_utils.py:221] tokens: [CLS] a boy holding an umbrella while standing next to livestock [SEP]\n",
      "I1201 13:42:08.374151 139725318354752 create_inputs_utils.py:222] input_ids: 101 1037 2879 3173 2019 12977 2096 3061 2279 2000 11468 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:42:08.374404 139725318354752 create_inputs_utils.py:223] input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:42:08.374713 139725318354752 create_inputs_utils.py:224] segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:42:08.374989 139725318354752 create_inputs_utils.py:225] label: None (id = 0)\n",
      "I1201 13:42:11.138141 139725318354752 create_inputs_utils.py:128] Writing example 10000 of 25000\n",
      "I1201 13:42:13.499557 139725318354752 create_inputs_utils.py:128] Writing example 20000 of 25000\n",
      "I1201 13:42:14.685988 139725318354752 create_inputs_utils.py:272] Saving features into cached file ../preprocessed_data/cached_VAL_Caption\n",
      "100%|██████████| 5000/5000 [00:00<00:00, 99345.89it/s]\n",
      "I1201 13:42:17.378617 139725318354752 create_inputs_utils.py:128] Writing example 0 of 25000\n",
      "I1201 13:42:17.379264 139725318354752 create_inputs_utils.py:218] *** Example ***\n",
      "I1201 13:42:17.379577 139725318354752 create_inputs_utils.py:219] guid: TEST-0-0\n",
      "I1201 13:42:17.379863 139725318354752 create_inputs_utils.py:221] tokens: [CLS] man riding a motor bike on a dirt road on the countryside [SEP]\n",
      "I1201 13:42:17.380153 139725318354752 create_inputs_utils.py:222] input_ids: 101 2158 5559 1037 5013 7997 2006 1037 6900 2346 2006 1996 10833 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:42:17.380433 139725318354752 create_inputs_utils.py:223] input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:42:17.380721 139725318354752 create_inputs_utils.py:224] segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:42:17.380998 139725318354752 create_inputs_utils.py:225] label: None (id = 0)\n",
      "I1201 13:42:17.381935 139725318354752 create_inputs_utils.py:218] *** Example ***\n",
      "I1201 13:42:17.382231 139725318354752 create_inputs_utils.py:219] guid: TEST-0-1\n",
      "I1201 13:42:17.382502 139725318354752 create_inputs_utils.py:221] tokens: [CLS] a dirt path with a young person on a motor bike rests to the fore ##ground of a ve ##rdan ##t area with a bridge and a background of cloud wreath ##ed mountains [SEP]\n",
      "I1201 13:42:17.382825 139725318354752 create_inputs_utils.py:222] input_ids: 101 1037 6900 4130 2007 1037 2402 2711 2006 1037 5013 7997 16626 2000 1996 18921 16365 1997 1037 2310 26992 2102 2181 2007 1037 2958 1998 1037 4281 1997 6112 29586 2098 4020 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:42:17.383117 139725318354752 create_inputs_utils.py:223] input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:42:17.383389 139725318354752 create_inputs_utils.py:224] segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:42:17.383645 139725318354752 create_inputs_utils.py:225] label: None (id = 0)\n",
      "I1201 13:42:17.384264 139725318354752 create_inputs_utils.py:218] *** Example ***\n",
      "I1201 13:42:17.384518 139725318354752 create_inputs_utils.py:219] guid: TEST-0-2\n",
      "I1201 13:42:17.384803 139725318354752 create_inputs_utils.py:221] tokens: [CLS] a man in a red shirt and a red hat is on a motorcycle on a hill side [SEP]\n",
      "I1201 13:42:17.385078 139725318354752 create_inputs_utils.py:222] input_ids: 101 1037 2158 1999 1037 2417 3797 1998 1037 2417 6045 2003 2006 1037 9055 2006 1037 2940 2217 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:42:17.385341 139725318354752 create_inputs_utils.py:223] input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:42:17.385643 139725318354752 create_inputs_utils.py:224] segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:42:17.385900 139725318354752 create_inputs_utils.py:225] label: None (id = 0)\n",
      "I1201 13:42:17.386358 139725318354752 create_inputs_utils.py:218] *** Example ***\n",
      "I1201 13:42:17.386702 139725318354752 create_inputs_utils.py:219] guid: TEST-0-3\n",
      "I1201 13:42:17.386958 139725318354752 create_inputs_utils.py:221] tokens: [CLS] a man riding on the back of a motorcycle [SEP]\n",
      "I1201 13:42:17.387274 139725318354752 create_inputs_utils.py:222] input_ids: 101 1037 2158 5559 2006 1996 2067 1997 1037 9055 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:42:17.387556 139725318354752 create_inputs_utils.py:223] input_mask: 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:42:17.387820 139725318354752 create_inputs_utils.py:224] segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:42:17.388089 139725318354752 create_inputs_utils.py:225] label: None (id = 0)\n",
      "I1201 13:42:17.388638 139725318354752 create_inputs_utils.py:218] *** Example ***\n",
      "I1201 13:42:17.388904 139725318354752 create_inputs_utils.py:219] guid: TEST-0-4\n",
      "I1201 13:42:17.389189 139725318354752 create_inputs_utils.py:221] tokens: [CLS] a man with a red helmet on a small mo ##ped on a dirt road [SEP]\n",
      "I1201 13:42:17.389466 139725318354752 create_inputs_utils.py:222] input_ids: 101 1037 2158 2007 1037 2417 10412 2006 1037 2235 9587 5669 2006 1037 6900 2346 102 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:42:17.389738 139725318354752 create_inputs_utils.py:223] input_mask: 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:42:17.390031 139725318354752 create_inputs_utils.py:224] segment_ids: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      "I1201 13:42:17.390291 139725318354752 create_inputs_utils.py:225] label: None (id = 0)\n",
      "I1201 13:42:19.758776 139725318354752 create_inputs_utils.py:128] Writing example 10000 of 25000\n",
      "I1201 13:42:22.123027 139725318354752 create_inputs_utils.py:128] Writing example 20000 of 25000\n",
      "I1201 13:42:23.302554 139725318354752 create_inputs_utils.py:272] Saving features into cached file ../preprocessed_data/cached_TEST_Caption\n"
     ]
    }
   ],
   "source": [
    "train_image_captions_with_len = []\n",
    "val_image_captions_with_len = []\n",
    "test_image_captions_with_len = []\n",
    "\n",
    "for img in data['images']:\n",
    "    captions = []\n",
    "    captions_with_len = []\n",
    "    \n",
    "    for caption in img['sentences']:\n",
    "        # Update word frequency\n",
    "        if len(caption['tokens']) <= max_len:\n",
    "            #captions_forlen : [['a','man','with','a','red','helmet'],['a','man',..]] 5개의 캡션씩 들어 있음\n",
    "            #['a','man','with','a','red','helmet'] -> \"a man with a red helmet\"\n",
    "            \n",
    "            #bowonko\n",
    "            caption_len = len(caption['tokens']) + 2\n",
    "            \n",
    "            caption_sen = \" \".join(caption['tokens'])\n",
    "             \n",
    "            captions.append(caption_sen)\n",
    "            \n",
    "            #bowonko\n",
    "            \n",
    "            cwl_item = (caption_sen,caption_len)\n",
    "            captions_with_len.append(cwl_item)\n",
    "            \n",
    "    #captions : ['sen1','sen2','sen3','sen4','sen5']\n",
    "    if len(captions) == 0:\n",
    "        print('절대 발생할 수 없음')\n",
    "        continue\n",
    "\n",
    "    #ID\n",
    "    image_id = img['filename'].split('_')[2]\n",
    "    image_id = int(image_id.lstrip(\"0\").split('.')[0])\n",
    "\n",
    "    #split은 train, val, test, restval 로 구성\n",
    "    if img['split'] in {'train', 'restval'}:\n",
    "        if img['filepath'] == 'train2014':\n",
    "            if image_id in train_data:\n",
    "                train_image_det.append((\"t\",train_data[image_id]))\n",
    "        else:\n",
    "            if image_id in val_data:\n",
    "                train_image_det.append((\"v\",val_data[image_id]))\n",
    "        #bowonko\n",
    "        train_image_captions.append(captions)\n",
    "        train_image_captions_with_len.append(captions_with_len)\n",
    "        \n",
    "        #train_image_captions은 2차원, element(captions) = ['sen1','sen2','sen3','sen4','sen5'], \n",
    "\n",
    "    elif img['split'] in {'val'}:\n",
    "        if image_id in val_data:\n",
    "            val_image_det.append((\"v\",val_data[image_id]))   \n",
    "        #bowonko    \n",
    "        val_image_captions.append(captions)\n",
    "        val_image_captions_with_len.append(captions_with_len)\n",
    "        \n",
    "        #val_image_captions은 2차원, element(captions) = ['sen1','sen2','sen3','sen4','sen5']\n",
    "    elif img['split'] in {'test'}:\n",
    "        if image_id in val_data:\n",
    "            test_image_det.append((\"v\",val_data[image_id])) \n",
    "        #bowonko\n",
    "        test_image_captions.append(captions)\n",
    "        test_image_captions_with_len.append(captions_with_len)\n",
    "        \n",
    "        #test_image_captions은 2차원, element(captions) = ['sen1','sen2','sen3','sen4','sen5']\n",
    "# Sanity check\n",
    "assert len(train_image_det) == len(train_image_captions)\n",
    "assert len(train_image_det) == len(train_image_captions_with_len)\n",
    "assert len(val_image_det) == len(val_image_captions)\n",
    "assert len(val_image_det) == len(val_image_captions_with_len)\n",
    "assert len(test_image_det) == len(test_image_captions)\n",
    "assert len(test_image_det) == len(test_image_captions_with_len)\n",
    "\n",
    "caption_size = 5\n",
    "\n",
    "#captions_forlen : [['a','man','with','a','red','helmet'],['a','man',..]]\n",
    "for impaths, imcaps, split in [(train_image_det, train_image_captions_with_len, 'TRAIN'),\n",
    "                                   (val_image_det, val_image_captions_with_len, 'VAL'),\n",
    "                                   (test_image_det, test_image_captions_with_len, 'TEST')]:\n",
    "    \n",
    "    #imcaps == image_captions_with_len > captions_with_len > (caption_sen,caption_len)\n",
    "    #cwl_item = (caption_sen,caption_len)\n",
    "    #captions_with_len.append(cwl_item)\n",
    "    \n",
    "    caplens = []\n",
    "    image_captions = []\n",
    "    enc_captions = []\n",
    "    \n",
    "    #captions에 (caption_sen,caption_len) 다 들어가게 변경했으므로 captions_sen에 caption_sen만 모아놓음\n",
    "    captions_sen = []\n",
    "    \n",
    "    for i, path in enumerate(tqdm(impaths)):\n",
    "        image_captions = []\n",
    "        \n",
    "        # Sample captions\n",
    "        #이런 일은 발생하지 않음\n",
    "        if len(imcaps[i]) < captions_per_image:#['sen1','sen2','sen3','sen4','sen5']\n",
    "            #이미지 당 caption이 5개보다 적으면 중복해서 구성\n",
    "            captions = imcaps[i] + [choice(imcaps[i]) for _ in range(captions_per_image - len(imcaps[i]))]\n",
    "        else: #모두 여기로 들어옴\n",
    "            captions = sample(imcaps[i], k=captions_per_image)\n",
    "\n",
    "        # Sanity check\n",
    "        assert len(captions) == captions_per_image\n",
    "                \n",
    "        for cap_sen,cap_len in captions:\n",
    "            caplens.append(cap_len)\n",
    "            captions_sen.append(cap_sen)\n",
    "        \n",
    "        image_captions.append(captions_sen)\n",
    "        \n",
    "    \n",
    "    \n",
    "    with open(os.path.join(output_folder, split + '_CAPLENS_' + base_filename + '.json'), 'w') as j:\n",
    "        json.dump(caplens, j)\n",
    "    \n",
    "    data_dir = output_folder\n",
    "\n",
    "\n",
    "    #목적 load_and_cache_example을 불러서 cache 파일 만들기 이후 두 번째 실행할 때는 load \n",
    "    if(split == \"TRAIN\"): \n",
    "        #TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n",
    "        train_dataset = load_and_cache_examples(processor,tokenizer,max_seq_length,model_name_or_path,image_captions,data_dir,split)\n",
    "        caplens_train = []\n",
    "        batch_size = len(train_dataset)\n",
    "        \n",
    "        for b in range(batch_size):\n",
    "            train_caption = train_dataset[b][0]\n",
    "            cnt = 0\n",
    "            for i in range(len(train_caption)):\n",
    "                if(train_caption[i] == 0):\n",
    "                    break\n",
    "                else:\n",
    "                    cnt += 1\n",
    "            caplens_train.append(cnt)        \n",
    "                \n",
    "          \n",
    "        with open(os.path.join(output_folder, 'TRAIN' + '_CAPLENS_' + base_filename + '.json'), 'w') as j:\n",
    "            json.dump(caplens_train, j)\n",
    "        \n",
    "        \n",
    "    elif(split == \"VAL\"):\n",
    "        dev_dataset = load_and_cache_examples(processor,tokenizer,max_seq_length,model_name_or_path,image_captions,data_dir,split)\n",
    "        caplens_dev = []\n",
    "        batch_size = len(dev_dataset)\n",
    "        \n",
    "        for b in range(batch_size):\n",
    "            cnt = 0\n",
    "            dev_caption = dev_dataset[b][0]\n",
    "            for i in range(len(dev_caption)):\n",
    "                if(dev_caption[i] == 0):\n",
    "                    break\n",
    "                else:\n",
    "                    cnt += 1\n",
    "            caplens_dev.append(cnt)\n",
    "                                  \n",
    "        with open(os.path.join(output_folder, 'VAL' + '_CAPLENS_' + base_filename + '.json'), 'w') as j:\n",
    "            json.dump(caplens_dev, j)\n",
    "        \n",
    "    elif(split == \"TEST\"):\n",
    "        test_dataset = load_and_cache_examples(processor,tokenizer,max_seq_length,model_name_or_path,image_captions,data_dir,split)\n",
    "        caplens_test = []\n",
    "        batch_size = len(test_dataset)\n",
    "        for b in range(batch_size):\n",
    "            cnt = 0\n",
    "            test_caption = test_dataset[b][0]\n",
    "            for i in range(len(test_caption)):\n",
    "                if(test_caption[i] == 0):\n",
    "                    break\n",
    "                else:\n",
    "                    cnt += 1\n",
    "            caplens_test.append(cnt)\n",
    "                \n",
    "                    \n",
    "        with open(os.path.join(output_folder, 'TEST' + '_CAPLENS_' + base_filename + '.json'), 'w') as j:\n",
    "            json.dump(caplens_test, j)    \n",
    "\n",
    "            \n",
    "    \n",
    "# Save bottom up features indexing to JSON files\n",
    "with open(os.path.join(output_folder, 'TRAIN' + '_GENOME_DETS_' + base_filename + '.json'), 'w') as j:\n",
    "    json.dump(train_image_det, j)\n",
    "\n",
    "with open(os.path.join(output_folder, 'VAL' + '_GENOME_DETS_' + base_filename + '.json'), 'w') as j:\n",
    "    json.dump(val_image_det, j)\n",
    "\n",
    "with open(os.path.join(output_folder, 'TEST' + '_GENOME_DETS_' + base_filename + '.json'), 'w') as j:\n",
    "    json.dump(test_image_det, j)\n",
    "   \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
