"""Adapted from:
    @longcw faster_rcnn_pytorch: https://github.com/longcw/faster_rcnn_pytorch
    @rbgirshick py-faster-rcnn https://github.com/rbgirshick/py-faster-rcnn
    Licensed under The MIT License [see LICENSE for details]
    
    运行命令：python eval_vis.py --confidence_threshold 0.3
    python ./eval_vis.py --trained_model weights/my_ssd300_COCO_25000.pth --confidence_threshold 0.3
"""

from __future__ import print_function
import torch
import torch.backends.cudnn as cudnn
from data import VOC_ROOT, VOCAnnotationTransform, VOCDetection, BaseTransform
from data import VOC_CLASSES as labelmap

from ssd import build_ssd

import sys
import os
import argparse
import cv2

if sys.version_info[0] == 2:
    import xml.etree.cElementTree as ET
else:
    import xml.etree.ElementTree as ET


def str2bool(v):
    return v.lower() in ("yes", "true", "t", "1")


parser = argparse.ArgumentParser(
    description='Single Shot MultiBox Detector Evaluation')
parser.add_argument('--trained_model',
                    default='weights/ssd300_mAP_77.43_v2.pth', type=str,
                    help='Trained state_dict file path to open')
parser.add_argument('--confidence_threshold', default=0.5, type=float,
                    help='Detection confidence threshold')
parser.add_argument('--cuda', default=True, type=str2bool,
                    help='Use cuda to train model')
parser.add_argument('--voc_root', default=VOC_ROOT,
                    help='Location of VOC root directory')

args = parser.parse_args()

if torch.cuda.is_available():
    if args.cuda:
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
    if not args.cuda:
        print("WARNING: It looks like you have a CUDA device, but aren't using \
              CUDA.  Run with --cuda for optimal eval speed.")
        torch.set_default_tensor_type('torch.FloatTensor')
else:
    torch.set_default_tensor_type('torch.FloatTensor')

annopath = os.path.join(args.voc_root, 'VOC2007', 'Annotations', '%s.xml')
imgpath = os.path.join(args.voc_root, 'VOC2007', 'JPEGImages', '%s.jpg')
imgsetpath = os.path.join(args.voc_root, 'VOC2007', 'ImageSets',
                          'Main', '{:s}.txt')
YEAR = '2007'
devkit_path = args.voc_root + 'VOC' + YEAR
dataset_mean = (104, 117, 123)
set_type = 'test'


def test_net(net, cuda, dataset, transform,
             im_size=300, thresh=0.05):
    num_images = len(dataset)
    # print(labelmap)
    with torch.no_grad():
        for i in range(num_images):
            origin_img = [[]]
            im, gt, h, w = dataset.pull_item(i, origin_img=origin_img)
            cv2.imwrite('./tmp/o_img_%d.jpg'%i, origin_img[0])
            x = im.unsqueeze(0)
            if args.cuda:
                x = x.cuda()
            detections = net(x)

            # skip j = 0, because it's the background class
            for j in range(1, detections.size(1)):
                dets = detections[0, j, :]
                mask = dets[:, 0].gt(thresh).expand(5, dets.size(0)).t()
                dets = torch.masked_select(dets, mask).view(-1, 5)
                if dets.size(0) == 0:
                    continue
                boxes = dets[:, 1:]
                boxes[:, 0] *= w
                boxes[:, 2] *= w
                boxes[:, 1] *= h
                boxes[:, 3] *= h
                scores = dets[:, 0].cpu().numpy()
                
                # for visualization
                for index_box in range(dets.size(0)):
                    cv2.rectangle(origin_img[0], (int(boxes[index_box, 0]), int(boxes[index_box, 1])), 
                                (int(boxes[index_box, 2]), int(boxes[index_box, 3])),
                                (0, 255, 0), 2)
                    cv2.putText(origin_img[0], labelmap[j-1] + ' ' +  str(round(scores[index_box],2)), (int(boxes[index_box, 0]), int(boxes[index_box, 1])-5), 
                                cv2.FONT_HERSHEY_PLAIN, 1.2, (0,255,255), 2)
                    # print(labelmap[j-1])
            print('im_detect: {:d}/{:d}'.format(i + 1, num_images))
            
            cv2.imwrite('./tmp/origin_img_%d.jpg'%i, origin_img[0])
            
            # cv2.imshow('origin_img', origin_img[0])
            # ret = cv2.waitKey(0)
            # if ret == 27:
            #     exit(0)

if __name__ == '__main__':
    # load net
    num_classes = len(labelmap) + 1                      # +1 for background
    net = build_ssd('test', 300, num_classes)            # initialize SSD
    net.load_state_dict(torch.load(args.trained_model))
    net.eval()
    print('Finished loading model!')
    # load data
    dataset = VOCDetection(args.voc_root, [('2007', set_type)],
                           BaseTransform(300, dataset_mean),
                           VOCAnnotationTransform())
    if args.cuda:
        net = net.cuda()
        cudnn.benchmark = True
    # evaluation
    test_net(net, args.cuda, dataset,
             BaseTransform(net.size, dataset_mean), 300,
             thresh=args.confidence_threshold)
