{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "%matplotlib inline\n",
    "import os\n",
    "import time\n",
    "import torch\n",
    "from torch.autograd import Variable\n",
    "from torchvision import datasets, transforms\n",
    "import scipy.io\n",
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")\n",
    "import matplotlib.pyplot as plt\n",
    "import scipy.misc\n",
    "\n",
    "from darknet import Darknet\n",
    "import dataset\n",
    "from utils import *\n",
    "from MeshPly import MeshPly\n",
    "\n",
    "# Create new directory\n",
    "def makedirs(path):\n",
    "    if not os.path.exists( path ):\n",
    "        os.makedirs( path )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "ename": "IOError",
     "evalue": "[Errno 2] No such file or directory: 'LINEMOD/ape/ape.ply'",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mIOError\u001b[0m                                   Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-7-cd23ddeac3d5>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m    272\u001b[0m \u001b[0mcfgfile\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'cfg/yolo-pose.cfg'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    273\u001b[0m \u001b[0mweightfile\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'backup/ape/model_backup.weights'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 274\u001b[0;31m \u001b[0mvalid\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdatacfg\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcfgfile\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mweightfile\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[0;32m<ipython-input-7-cd23ddeac3d5>\u001b[0m in \u001b[0;36mvalid\u001b[0;34m(datacfg, cfgfile, weightfile)\u001b[0m\n\u001b[1;32m     61\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     62\u001b[0m     \u001b[0;31m# Read object model information, get 3D bounding box corners\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 63\u001b[0;31m     \u001b[0mmesh\u001b[0m          \u001b[0;34m=\u001b[0m \u001b[0mMeshPly\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmeshname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     64\u001b[0m     \u001b[0mvertices\u001b[0m      \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mc_\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0marray\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmesh\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvertices\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mones\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmesh\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvertices\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtranspose\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     65\u001b[0m     \u001b[0mcorners3D\u001b[0m     \u001b[0;34m=\u001b[0m \u001b[0mget_3D_corners\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvertices\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/cvlabdata1/home/btekin/ope/singleshotpose_release/MeshPly.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, filename, color)\u001b[0m\n\u001b[1;32m      4\u001b[0m     \u001b[0;32mdef\u001b[0m \u001b[0m__init__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfilename\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcolor\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0.\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m0.\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m0.\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      5\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 6\u001b[0;31m         \u001b[0mf\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilename\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'r'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m      7\u001b[0m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvertices\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      8\u001b[0m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcolors\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mIOError\u001b[0m: [Errno 2] No such file or directory: 'LINEMOD/ape/ape.ply'"
     ]
    }
   ],
   "source": [
    "def valid(datacfg, cfgfile, weightfile):\n",
    "    def truths_length(truths):\n",
    "        for i in range(50):\n",
    "            if truths[i][1] == 0:\n",
    "                return i\n",
    "\n",
    "    # Parse configuration files\n",
    "    options      = read_data_cfg(datacfg)\n",
    "    valid_images = options['valid']\n",
    "    meshname     = options['mesh']\n",
    "    backupdir    = options['backup']\n",
    "    name         = options['name']\n",
    "    if not os.path.exists(backupdir):\n",
    "        makedirs(backupdir)\n",
    "\n",
    "    # Parameters\n",
    "    prefix       = 'results'\n",
    "    seed         = int(time.time())\n",
    "    gpus         = '0'     # Specify which gpus to use\n",
    "    test_width   = 544\n",
    "    test_height  = 544\n",
    "    torch.manual_seed(seed)\n",
    "    use_cuda = True\n",
    "    if use_cuda:\n",
    "        os.environ['CUDA_VISIBLE_DEVICES'] = gpus\n",
    "        torch.cuda.manual_seed(seed)\n",
    "    save            = False\n",
    "    visualize       = True\n",
    "    testtime        = True\n",
    "    use_cuda        = True\n",
    "    num_classes     = 1\n",
    "    testing_samples = 0.0\n",
    "    eps             = 1e-5\n",
    "    notpredicted    = 0 \n",
    "    conf_thresh     = 0.1\n",
    "    nms_thresh      = 0.4\n",
    "    match_thresh    = 0.5\n",
    "    edges_corners = [[0, 1], [0, 2], [0, 4], [1, 3], [1, 5], [2, 3], [2, 6], [3, 7], [4, 5], [4, 6], [5, 7], [6, 7]]\n",
    "\n",
    "    if save:\n",
    "        makedirs(backupdir + '/test')\n",
    "        makedirs(backupdir + '/test/gt')\n",
    "        makedirs(backupdir + '/test/pr')\n",
    "\n",
    "    # To save\n",
    "    testing_error_trans = 0.0\n",
    "    testing_error_angle = 0.0\n",
    "    testing_error_pixel = 0.0\n",
    "    errs_2d             = []\n",
    "    errs_3d             = []\n",
    "    errs_trans          = []\n",
    "    errs_angle          = []\n",
    "    errs_corner2D       = []\n",
    "    preds_trans         = []\n",
    "    preds_rot           = []\n",
    "    preds_corners2D     = []\n",
    "    gts_trans           = []\n",
    "    gts_rot             = []\n",
    "    gts_corners2D       = []\n",
    "    ious                = []\n",
    "\n",
    "    # Read object model information, get 3D bounding box corners\n",
    "    mesh          = MeshPly(meshname)\n",
    "    vertices      = np.c_[np.array(mesh.vertices), np.ones((len(mesh.vertices), 1))].transpose()\n",
    "    corners3D     = get_3D_corners(vertices)\n",
    "    # diam          = calc_pts_diameter(np.array(mesh.vertices))\n",
    "    diam          = float(options['diam'])\n",
    "\n",
    "    # Read intrinsic camera parameters\n",
    "    internal_calibration = get_camera_intrinsic()\n",
    "\n",
    "    # Get validation file names\n",
    "    with open(valid_images) as fp:\n",
    "        tmp_files = fp.readlines()\n",
    "        valid_files = [item.rstrip() for item in tmp_files]\n",
    "    \n",
    "    # Specicy model, load pretrained weights, pass to GPU and set the module in evaluation mode\n",
    "    model = Darknet(cfgfile)\n",
    "    model.print_network()\n",
    "    model.load_weights(weightfile)\n",
    "    model.cuda()\n",
    "    model.eval()\n",
    "\n",
    "    # Get the parser for the test dataset\n",
    "    valid_dataset = dataset.listDataset(valid_images, shape=(test_width, test_height),\n",
    "                       shuffle=False,\n",
    "                       transform=transforms.Compose([\n",
    "                           transforms.ToTensor(),]))\n",
    "    valid_batchsize = 1\n",
    "\n",
    "    # Specify the number of workers for multiple processing, get the dataloader for the test dataset\n",
    "    kwargs = {'num_workers': 4, 'pin_memory': True}\n",
    "    test_loader = torch.utils.data.DataLoader(\n",
    "        valid_dataset, batch_size=valid_batchsize, shuffle=False, **kwargs) \n",
    "\n",
    "    logging(\"   Testing {}...\".format(name))\n",
    "    logging(\"   Number of test samples: %d\" % len(test_loader.dataset))\n",
    "    # Iterate through test batches (Batch size for test data is 1)\n",
    "    count = 0\n",
    "    z = np.zeros((3, 1))\n",
    "    for batch_idx, (data, target) in enumerate(test_loader):\n",
    "        \n",
    "        # Images\n",
    "        img = data[0, :, :, :]\n",
    "        img = img.numpy().squeeze()\n",
    "        img = np.transpose(img, (1, 2, 0))\n",
    "        \n",
    "        t1 = time.time()\n",
    "        # Pass data to GPU\n",
    "        if use_cuda:\n",
    "            data = data.cuda()\n",
    "            target = target.cuda()\n",
    "        \n",
    "        # Wrap tensors in Variable class, set volatile=True for inference mode and to use minimal memory during inference\n",
    "        data = Variable(data, volatile=True)\n",
    "        t2 = time.time()\n",
    "        \n",
    "        # Forward pass\n",
    "        output = model(data).data  \n",
    "        t3 = time.time()\n",
    "        \n",
    "        # Using confidence threshold, eliminate low-confidence predictions\n",
    "        all_boxes = get_region_boxes(output, conf_thresh, num_classes)        \n",
    "        t4 = time.time()\n",
    "\n",
    "        # Iterate through all images in the batch\n",
    "        for i in range(output.size(0)):\n",
    "        \n",
    "            # For each image, get all the predictions\n",
    "            boxes   = all_boxes[i]\n",
    "        \n",
    "            # For each image, get all the targets (for multiple object pose estimation, there might be more than 1 target per image)\n",
    "            truths  = target[i].view(-1, 21)\n",
    "        \n",
    "            # Get how many object are present in the scene\n",
    "            num_gts = truths_length(truths)\n",
    "\n",
    "             # Iterate through each ground-truth object\n",
    "            for k in range(num_gts):\n",
    "                box_gt        = [truths[k][1], truths[k][2], truths[k][3], truths[k][4], truths[k][5], truths[k][6], \n",
    "                                truths[k][7], truths[k][8], truths[k][9], truths[k][10], truths[k][11], truths[k][12], \n",
    "                                truths[k][13], truths[k][14], truths[k][15], truths[k][16], truths[k][17], truths[k][18], 1.0, 1.0, truths[k][0]]\n",
    "                best_conf_est = -1\n",
    "\n",
    "                # If the prediction has the highest confidence, choose it as our prediction for single object pose estimation\n",
    "                for j in range(len(boxes)):\n",
    "                    if (boxes[j][18] > best_conf_est):\n",
    "                        match         = corner_confidence9(box_gt[:18], torch.FloatTensor(boxes[j][:18]))\n",
    "                        box_pr        = boxes[j]\n",
    "                        best_conf_est = boxes[j][18]\n",
    "\n",
    "                # Denormalize the corner predictions \n",
    "                corners2D_gt = np.array(np.reshape(box_gt[:18], [9, 2]), dtype='float32')\n",
    "                corners2D_pr = np.array(np.reshape(box_pr[:18], [9, 2]), dtype='float32')\n",
    "                corners2D_gt[:, 0] = corners2D_gt[:, 0] * 640\n",
    "                corners2D_gt[:, 1] = corners2D_gt[:, 1] * 480               \n",
    "                corners2D_pr[:, 0] = corners2D_pr[:, 0] * 640\n",
    "                corners2D_pr[:, 1] = corners2D_pr[:, 1] * 480\n",
    "                preds_corners2D.append(corners2D_pr)\n",
    "                gts_corners2D.append(corners2D_gt)\n",
    "\n",
    "                # Compute corner prediction error\n",
    "                corner_norm = np.linalg.norm(corners2D_gt - corners2D_pr, axis=1)\n",
    "                corner_dist = np.mean(corner_norm)\n",
    "                errs_corner2D.append(corner_dist)\n",
    "                \n",
    "                # Compute [R|t] by pnp\n",
    "                R_gt, t_gt = pnp(np.array(np.transpose(np.concatenate((np.zeros((3, 1)), corners3D[:3, :]), axis=1)), dtype='float32'),  corners2D_gt, np.array(internal_calibration, dtype='float32'))\n",
    "                R_pr, t_pr = pnp(np.array(np.transpose(np.concatenate((np.zeros((3, 1)), corners3D[:3, :]), axis=1)), dtype='float32'),  corners2D_pr, np.array(internal_calibration, dtype='float32'))\n",
    "\n",
    "                if save:\n",
    "                    preds_trans.append(t_pr)\n",
    "                    gts_trans.append(t_gt)\n",
    "                    preds_rot.append(R_pr)\n",
    "                    gts_rot.append(R_gt)\n",
    "\n",
    "                    np.savetxt(backupdir + '/test/gt/R_' + valid_files[count][-8:-3] + 'txt', np.array(R_gt, dtype='float32'))\n",
    "                    np.savetxt(backupdir + '/test/gt/t_' + valid_files[count][-8:-3] + 'txt', np.array(R_pr, dtype='float32'))\n",
    "                    np.savetxt(backupdir + '/test/pr/R_' + valid_files[count][-8:-3] + 'txt', np.array(t_gt, dtype='float32'))\n",
    "                    np.savetxt(backupdir + '/test/pr/t_' + valid_files[count][-8:-3] + 'txt', np.array(t_pr, dtype='float32'))\n",
    "                    np.savetxt(backupdir + '/test/gt/corners_' + valid_files[count][-8:-3] + 'txt', np.array(corners2D_gt, dtype='float32'))\n",
    "                    np.savetxt(backupdir + '/test/pr/corners_' + valid_files[count][-8:-3] + 'txt', np.array(corners2D_pr, dtype='float32'))\n",
    "                \n",
    "                # Compute translation error\n",
    "                trans_dist   = np.sqrt(np.sum(np.square(t_gt - t_pr)))\n",
    "                errs_trans.append(trans_dist)\n",
    "                \n",
    "                # Compute angle error\n",
    "                angle_dist   = calcAngularDistance(R_gt, R_pr)\n",
    "                errs_angle.append(angle_dist)\n",
    "                \n",
    "                # Compute pixel error\n",
    "                Rt_gt        = np.concatenate((R_gt, t_gt), axis=1)\n",
    "                Rt_pr        = np.concatenate((R_pr, t_pr), axis=1)\n",
    "                proj_2d_gt   = compute_projection(vertices, Rt_gt, internal_calibration)\n",
    "                proj_2d_pred = compute_projection(vertices, Rt_pr, internal_calibration)  \n",
    "                proj_corners_gt = np.transpose(compute_projection(corners3D, Rt_gt, internal_calibration)) \n",
    "                proj_corners_pr = np.transpose(compute_projection(corners3D, Rt_pr, internal_calibration)) \n",
    "                norm         = np.linalg.norm(proj_2d_gt - proj_2d_pred, axis=0)\n",
    "                pixel_dist   = np.mean(norm)\n",
    "                errs_2d.append(pixel_dist)\n",
    "\n",
    "                if visualize:\n",
    "                    # Visualize\n",
    "                    plt.xlim((0, 640))\n",
    "                    plt.ylim((0, 480))\n",
    "                    plt.imshow(scipy.misc.imresize(img, (480, 640)))\n",
    "                    # Projections\n",
    "                    for edge in edges_corners:\n",
    "                        plt.plot(proj_corners_gt[edge, 0], proj_corners_gt[edge, 1], color='g', linewidth=3.0)\n",
    "                        plt.plot(proj_corners_pr[edge, 0], proj_corners_pr[edge, 1], color='b', linewidth=3.0)\n",
    "                    plt.gca().invert_yaxis()\n",
    "                    plt.show()\n",
    "                \n",
    "                # Compute IoU score\n",
    "                bb_gt        = compute_2d_bb_from_orig_pix(proj_2d_gt, output.size(3))\n",
    "                bb_pred      = compute_2d_bb_from_orig_pix(proj_2d_pred, output.size(3))\n",
    "                iou          = bbox_iou(bb_gt, bb_pred)\n",
    "                ious.append(iou)\n",
    "\n",
    "                # Compute 3D distances\n",
    "                transform_3d_gt   = compute_transformation(vertices, Rt_gt) \n",
    "                transform_3d_pred = compute_transformation(vertices, Rt_pr)  \n",
    "                norm3d            = np.linalg.norm(transform_3d_gt - transform_3d_pred, axis=0)\n",
    "                vertex_dist       = np.mean(norm3d)    \n",
    "                errs_3d.append(vertex_dist)  \n",
    "\n",
    "                # Sum errors\n",
    "                testing_error_trans  += trans_dist\n",
    "                testing_error_angle  += angle_dist\n",
    "                testing_error_pixel  += pixel_dist\n",
    "                testing_samples      += 1\n",
    "                count = count + 1\n",
    "\n",
    "        t5 = time.time()\n",
    "\n",
    "    # Compute 2D projection error, 6D pose error, 5cm5degree error\n",
    "    px_threshold = 5\n",
    "    acc         = len(np.where(np.array(errs_2d) <= px_threshold)[0]) * 100. / (len(errs_2d)+eps)\n",
    "    acciou      = len(np.where(np.array(errs_2d) >= 0.5)[0]) * 100. / (len(ious)+eps)\n",
    "    acc5cm5deg  = len(np.where((np.array(errs_trans) <= 0.05) & (np.array(errs_angle) <= 5))[0]) * 100. / (len(errs_trans)+eps)\n",
    "    acc3d10     = len(np.where(np.array(errs_3d) <= diam * 0.1)[0]) * 100. / (len(errs_3d)+eps)\n",
    "    acc5cm5deg  = len(np.where((np.array(errs_trans) <= 0.05) & (np.array(errs_angle) <= 5))[0]) * 100. / (len(errs_trans)+eps)\n",
    "    corner_acc  = len(np.where(np.array(errs_corner2D) <= px_threshold)[0]) * 100. / (len(errs_corner2D)+eps)\n",
    "    mean_err_2d = np.mean(errs_2d)\n",
    "    mean_corner_err_2d = np.mean(errs_corner2D)\n",
    "    nts = float(testing_samples)\n",
    "\n",
    "    if testtime:\n",
    "        print('-----------------------------------')\n",
    "        print('  tensor to cuda : %f' % (t2 - t1))\n",
    "        print('         predict : %f' % (t3 - t2))\n",
    "        print('get_region_boxes : %f' % (t4 - t3))\n",
    "        print('             nms : %f' % (t5 - t4))\n",
    "        print('           total : %f' % (t5 - t1))\n",
    "        print('-----------------------------------')\n",
    "\n",
    "    # Print test statistics\n",
    "    logging('Results of {}'.format(name))\n",
    "    logging('   Acc using {} px 2D Projection = {:.2f}%'.format(px_threshold, acc))\n",
    "    logging('   Acc using the IoU metric = {:.6f}%'.format(acciou))\n",
    "    logging('   Acc using 10% threshold - {} vx 3D Transformation = {:.2f}%'.format(diam * 0.1, acc3d10))\n",
    "    logging('   Acc using 5 cm 5 degree metric = {:.2f}%'.format(acc5cm5deg))\n",
    "    logging(\"   Mean 2D pixel error is %f, Mean vertex error is %f, mean corner error is %f\" % (mean_err_2d, np.mean(errs_3d), mean_corner_err_2d))\n",
    "    logging('   Translation error: %f m, angle error: %f degree, pixel error: % f pix' % (testing_error_trans/nts, testing_error_angle/nts, testing_error_pixel/nts) )\n",
    "\n",
    "    if save:\n",
    "        predfile = backupdir + '/predictions_linemod_' + name +  '.mat'\n",
    "        scipy.io.savemat(predfile, {'R_gts': gts_rot, 't_gts':gts_trans, 'corner_gts': gts_corners2D, 'R_prs': preds_rot, 't_prs':preds_trans, 'corner_prs': preds_corners2D})\n",
    "\n",
    "datacfg = 'cfg/ape.data'\n",
    "cfgfile = 'cfg/yolo-pose.cfg'\n",
    "weightfile = 'backup/ape/model_backup.weights'\n",
    "valid(datacfg, cfgfile, weightfile)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
