#!/usr/bin/env python
#-*- coding:utf-8 -*-

# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------

"""
调用须知：

1. 该源码:
   用来对图片画框(并显示类别,概率), 执行图片重命名并移动操作(仅限于丽水实地情形), 可在一张图片上画多个类别的检测结果

2. (不改变源码的前提下)适用情形:
   a. 丽水实地  b. 对VOC2007中的test set的图片进行画框

3. 调用前的准备工作

大前提: 确保CLASSES是你要检测的类别

情形a -- 丽水实地:
  (1) 确保data/demo有想要画框(并显示类别,概率)的"丽水实地"的图片
  注: 得到 -- data/test_result/"args.train_date-丽水实地"/路径下有-->类别概率大于阈值的图片(不改变图片的文件名);
          -- data/test_result/"args.train_date-丽水实地"/not_detect/路径下有-->类别概率小于阈值的图片
             (重命名图片, 命名格式见源码, 这里的图片跟类别有关, 即给定一张图片img和两个检测类别c1,c2, 如果img的c1,c2概率均小于阈值,
             那么该文件夹下有对应img的两个图片文件);
          -- data/test_result/"args.train_date-丽水实地"/路径下还有-->文件args.train_date-丽水实地.csv,
             该文件中一行为一个预测结果, 格式为"图片名(不含扩展名),类别名,概率,x,y,x1,x2"
             (点(x,y)为box左上角的顶点, 点(x1,y1)为box右下角的顶点,
             这里的坐标是“Make pixel indexes 0-based”, 该说法见pascal_voc.py);

情形b -- 对VOC2007中的test set的图片进行画框(并显示类别,概率):
  (1) 确保data/VOCdevkit2007/VOC2007/ImageSets/Main/test.txt文件里的图片列表是你想要画框的图片
  (2) 确保data/VOCdevkit2007/VOC2007/JPEGImages/路径下包含上述txt文件里的图片名的对应图片
  注: 得到 -- data/test_result/'compare_real_pred_' + args.train_date/路径下 --> 包含test.txt中的图片列表中的所有图片
             类别概率大于阈值的图片(不改变图片的文件名): 图片上画框并显示类别,概率
             类别概率大于阈值的图片(不改变图片的文件名): 与JPEGImages中对应的图片一致(即只将JPEGImages中对应的图片copy到该路径下, 不作其他改变)
      若要生成csv文件, 修改源码即可
"""

import matplotlib
matplotlib.use("Agg")
import _init_paths
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2
import argparse
import shutil
from collections import OrderedDict
from pandas import DataFrame
import time

CLASSES = ('__background__', 'hat', 'man')
dict_list = []
current_time = time.strftime('%Y-%m-%d_%H-%M-%S', time.localtime(time.time()))

def vis_detections(image_name, class_name, dets, ax, inds):
    """
    Draw detected bounding boxes.
    Add the varible 'ax', 'inds'.
    Delete 'im' .
    """

    # inds = np.where(dets[:, -1] >= thresh)[0]  # dets中scores>=thresh的行索引
    # if len(inds) == 0:
    #     return

    # im = im[:, :, (2, 1, 0)]
    # fig, ax = plt.subplots(figsize=(12, 12))  # 函数返回一个figure图像和一个子图ax的array列表
    # ax.imshow(im, aspect='equal')
   
    edgeColor = ('red', 'green', 'blue', 'yellow')
    for i in inds:
        row = OrderedDict([('ImgName', os.path.splitext(image_name)[0]), ('ClsName', class_name), ('score', dets[i, -1]),
                           ('xmin', dets[i, 0]), ('ymin', dets[i, 1]), ('xmax', dets[i, 2]), ('ymax', dets[i, 3])])
        dict_list.append(row)
        bbox = dets[i, :4]
        score = dets[i, -1]
        ind_color = CLASSES.index(class_name) - 1
        ax.add_patch(
                plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1], fill=False,
                          edgecolor=edgeColor[ind_color], linewidth=3.5)
        )
        
        #if class_name == 'man':
        #    pass
        #else:
        ax.text(bbox[0], bbox[1] - 2,
                '{:.3f}'.format(score),
                bbox=dict(facecolor='blue', alpha=0.5),
                fontsize=14, color='white')

    # print(dict_list)

def save_object_image(imagename, im, indexs, dets, obj_im_savepath):
    for ind in indexs:
        obj_image_savepath = os.path.join(obj_im_savepath, imagename[:-4] + '_patch_' + str(ind) + '.jpg')
        h_start = int(dets[ind, 1]) - 5 if int(dets[ind, 1]) > 5 else int(dets[ind, 1])
        h_end = int(dets[ind, 3]) + 5 if int(dets[ind, 3]) + 5 < im.shape[0] else im.shape[0]
        w_start = int(dets[ind, 0]) - 5 if int(dets[ind, 0]) > 1 else int(dets[ind, 0])
        w_end = int(dets[ind, 2]) + 5 if int(dets[ind, 2]) + 5 < im.shape[1] else im.shape[1]
        if h_end - h_start > 200:
            print(im.shape, dets[ind, :])
            obj_image = im[h_start:h_end, w_start:w_end, ::-1]
            cv2.imwrite(obj_image_savepath, obj_image)

def demo(net, image_name):
    """Detect object classes in an image using pre-computed object proposals."""

    ####### ----Newly added----Set the Savepath------ ############
    if args.compare_real_pred:
        test_result_savepath = os.path.join(cfg.DATA_DIR, "test_result", 'compare_real_pred_'
                                            + current_time + "_" + "".join(CLASSES[1:]))
        if not os.path.isdir(test_result_savepath):
            os.makedirs(test_result_savepath)
    elif args.save_object_im:
        object_im_savepath = os.path.join(cfg.DATA_DIR, "object_image", current_time)
        if not os.path.isdir(object_im_savepath):
            os.makedirs(object_im_savepath)
    else:
        test_result_savepath = os.path.join(cfg.DATA_DIR, "test_result", current_time + '_' + "".join(CLASSES[1:]))
        not_detect_savepath = os.path.join(test_result_savepath, "not_detect")
        if not os.path.isdir(not_detect_savepath):
            os.makedirs(not_detect_savepath)

    # Load the image
    if not args.compare_real_pred:
        im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
    else:
        im_file = os.path.join(cfg.DATA_DIR, 'VOCdevkit2007/VOC2007/JPEGImages', image_name)
    im = cv2.imread(im_file)

    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    scores, boxes = im_detect(net, im)
    timer.toc()
    print(('Detection took {:.3f}s for '
           '{:d} object proposals').format(timer.total_time, boxes.shape[0]))

    # Visualize detections for each class
    CONF_THRESH = args.conf_thresh # 原始是0.8
    NMS_THRESH = args.nms_ovthresh

    ####### ----Newly added----Set the Savepath------ ############
    im = im[:, :, (2, 1, 0)]
    if not args.save_object_im:
        # 函数返回一个figure图像和一个子图ax的array列表
        # figsize定义画布大小, 与原图保持一致
        # figsize=(im的宽，im的高)
        fig, ax = plt.subplots(figsize=(im.shape[1]*0.01, im.shape[0]*0.01))
        ax.imshow(im)
        # 使得图片充满整个画布,从而使得输出的图片与原图一样
        plt.subplots_adjust(top=1, bottom=0, left=0, right=1, hspace=0, wspace=0)

    hrs_Num = 0

    for cls_ind, cls in enumerate(CLASSES[1:]):
        cls_ind += 1  # because we skipped background
        cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
        cls_scores = scores[:, cls_ind]
        dets = np.hstack((cls_boxes,
                          cls_scores[:, np.newaxis])).astype(np.float32)
        keep = nms(dets, NMS_THRESH)  #非极大值抑制
        dets = dets[keep, :]
        ####### ----Newly added----获取概率大于CONF_THRESH的dets第一维度的索引------ ############
        inds = np.where(dets[:, -1] >= CONF_THRESH)[0]

        ####### ----Newly added----先导出概率低于CONF_THRESH的图片------ ############
        if len(inds) == 0:
            hrs_Num += 1
            if args.compare_real_pred:
                shutil.copy(im_file, test_result_savepath)
                continue
            elif args.save_object_im:
                continue
            else:
                pics_filename = os.path.join(not_detect_savepath, os.path.splitext(image_name)[0]
                                             + '_' + 'Not_' + cls + '_' + str(CONF_THRESH) + '.jpg')
                shutil.copy(im_file, pics_filename)

        # 剪切目标物并保存
        if args.save_object_im:
            if cls == 'man':
                save_object_image(image_name, im, inds, dets, object_im_savepath)

        # 在图片中把目标物框出来
        if not args.save_object_im:
            vis_detections(image_name, cls, dets, ax, inds)

    # ax.set_title(('{} {} detections with '
    #               'p({} | box) >= {:.1f}').format(args.train_date[4:], CLASSES[1:], CLASSES[1:], CONF_THRESH), fontsize=14)

    ####### ----Newly added---------- ############
    if hrs_Num == len(CLASSES) - 1:
        plt.close()
        return

    if not args.save_object_im:
        plt.axis('off')  # 不显示坐标尺寸
        #plt.tight_layout()  # 紧凑显示图片，居中显示
        plt.draw()
        plt.savefig(os.path.join(test_result_savepath, image_name))
        plt.close()

def parse_args():
    """Parse input arguments."""
    parser = argparse.ArgumentParser(description='Faster R-CNN demo')
    parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
                        default=0, type=int)
    parser.add_argument('--cpu', dest='cpu_mode',
                        help='Use CPU mode (overrides --gpu)',
                        action='store_true')
    parser.add_argument('--net', dest='caffemodel', help='model to test',
                        default=None, type=str)
    parser.add_argument('--def', dest='prototxt',
                        help='prototxt file defining the network',
                        default=None, type=str)
    parser.add_argument('--cfg', dest='cfg_file',
                        help='optional config file', default=None, type=str)
    parser.add_argument('--set', dest='set_cfgs',
                        help='set config keys', default=None,
                        nargs=argparse.REMAINDER)
    parser.add_argument('--compare', dest='compare_real_pred',
                        help='set the mode when needing compare the real boxed with predicting boxes',
                        action='store_true')
    parser.add_argument('--save_object_im', dest='save_object_im',
                        help='set the mode when needing save the image of detected man box',
                        action='store_true')
    parser.add_argument('--conf_thresh', dest='conf_thresh', help='set the thresh of confidence',
                        default=0.8, type=float)
    parser.add_argument('--nms_ovthresh', dest='nms_ovthresh', help='set the thresh in NMS',
                        default=0.4, type=float)

    args = parser.parse_args()

    return args

if __name__ == '__main__':

    cfg.TEST.HAS_RPN = True  # Use RPN for proposals

    args = parse_args()

    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)
    print(cfg.TEST.SCALES)
    caffemodel = args.caffemodel
    prototxt = args.prototxt

    if not os.path.isfile(caffemodel):
        raise IOError(('{:s} not found.\nDid you run ./data/script/'
                       'fetch_faster_rcnn_models.sh?').format(caffemodel))

    if args.cpu_mode:
        caffe.set_mode_cpu()
    else:
        caffe.set_mode_gpu()
        caffe.set_device(args.gpu_id)
        cfg.GPU_ID = args.gpu_id

    net = caffe.Net(prototxt, caffemodel, caffe.TEST)

    print('\n\nLoaded network {:s}'.format(caffemodel))

    # Warmup on a dummy image
    im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
    for i in xrange(2):
        _, _= im_detect(net, im)
    
    if not args.compare_real_pred:
        im_names = os.listdir(os.path.join(cfg.DATA_DIR, 'demo'))
        im_names.sort()
    else:
        im_names = [line.strip() + '.jpg' for line in
                open(os.path.join(cfg.DATA_DIR, "VOCdevkit2007/VOC2007/ImageSets/Main/test.txt"))]
    for im_name in im_names:
        print(im_name)
        print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
        print('Demo for data--{}'.format(im_name))
        demo(net, im_name)

    if not args.compare_real_pred and not args.save_object_im:
        if len(dict_list) > 0:
                d1 = DataFrame(dict_list)
                d1 = d1.sort_values(by=['ImgName', 'ClsName', 'score', 'xmin', 'ymin'])
                csvsavepath1 = os.path.join(cfg.DATA_DIR, 'test_result', current_time + '_' + "".join(CLASSES[1:]),
                                             current_time + '_' + "".join(CLASSES[1:]) + '.csv')
                if not os.path.exists(csvsavepath1):
                    d1.to_csv(csvsavepath1, index=False, header=True)
     
    print('')
    print('--CONF_THRESH is {} ---'.format(args.conf_thresh))
    print('--NMS_OV_THRESH is {} ---'.format(args.nms_ovthresh))

#plt.show()
