#!/usr/bin/env python
#-*- coding:utf-8 -*-

# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------

import _init_paths
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import numpy as np
import caffe, os, sys, cv2
import time
import random

CLASSES = ('__background__', 'hat', 'man')
dict_list = []
current_time = time.strftime('%Y-%m-%d_%H-%M-%S', time.localtime(time.time()))


def vis_detections(frame, class_name, dets, inds, index):
    """
    Draw detected bounding boxes.
    """ 
    for i in inds:
        bbox = dets[i, :4]
        score = dets[i, -1]
        if class_name == "man":
            print(class_name, bbox, score)
            # 设置需要进行特殊操作的帧索引数
            if index <= 26*25 + 10:
                if bbox[0] > 797 and bbox[2] < 980:
                    cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 0, 255), 3)
                else:
                    cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255, 0, 0), 3)
            else:
                cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255, 0, 0), 3)


def save_object_image(imagename, im, indexs, dets, obj_im_savepath):
    for ind in indexs:
        obj_image_savepath = os.path.join(obj_im_savepath, imagename[:-4] + '_patch_' + str(ind) + '.jpg')
        h_start = int(dets[ind, 1]) - 5 if int(dets[ind, 1]) > 5 else int(dets[ind, 1])
        h_end = int(dets[ind, 3]) + 5 if int(dets[ind, 3]) + 5 < im.shape[0] else im.shape[0]
        w_start = int(dets[ind, 0]) - 5 if int(dets[ind, 0]) > 1 else int(dets[ind, 0])
        w_end = int(dets[ind, 2]) + 5 if int(dets[ind, 2]) + 5 < im.shape[1] else im.shape[1]
        if h_end - h_start > 200:
            print(im.shape, dets[ind, :])
            obj_image = im[h_start:h_end, w_start:w_end, ::-1]
            cv2.imwrite(obj_image_savepath, obj_image)

def demo(net, image, index):
    """Detect object classes in an image using pre-computed object proposals."""

    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    scores, boxes = im_detect(net, image)
    timer.toc()
    print(('Detection took {:.3f}s for '
           '{:d} object proposals').format(timer.total_time, boxes.shape[0]))

    # Visualize detections for each class
    CONF_THRESH = 0.8  # 原始是0.8
    NMS_THRESH = 0.4

    hrs_Num = 0

    for cls_ind, cls in enumerate(CLASSES[1:]):
        cls_ind += 1  # because we skipped background
        cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
        cls_scores = scores[:, cls_ind]
        dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32)
        keep = nms(dets, NMS_THRESH)
        dets = dets[keep, :]
        inds = np.where(dets[:, -1] >= CONF_THRESH)[0]

        # if index < 700:
        #     vis_detections(frame, cls, dets, inds, (255, 0, 0))
        # else:
        #     vis_detections(frame, cls, dets, inds, (0, 0, 255))
        vis_detections(frame, cls, dets, inds, index)


if __name__ == '__main__':
    cfg.TEST.HAS_RPN = True  # Use RPN for proposals

    cfg_from_file("../models/pvanet/cfgs/submit_1019.yml")
    print("TEST.SCALES: ", cfg.TEST.SCALES)
    caffemodel = '../output/faster_rcnn_pvanet/pvanet_frcnn_hatman_all_0.001_20180424_iter_110K_from_ls.caffemodel'
    prototxt = '../models/pvanet/example_train/test_two_classes.prototxt'

    if not os.path.isfile(caffemodel):
        raise IOError(('{:s} not found.\nDid you run ./data/script/'
                       'fetch_faster_rcnn_models.sh?').format(caffemodel))

    # if only cpu
    caffe.set_mode_cpu()
    # # # if has gpu
    # caffe.set_mode_gpu()
    # caffe.set_device(0)
    # cfg.GPU_ID = 0

    net = caffe.Net(prototxt, caffemodel, caffe.TEST)

    print('\n\nLoaded network {:s}'.format(caffemodel))

    cap = cv2.VideoCapture("../吊机下有人.MOV")
    # Define the codec and create VideoWriter object
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH) / 2)
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT) / 2)
    fps = cap.get(cv2.CAP_PROP_POS_FRAMES)
    print("width, height, fps: ", width, height, fps)
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter('吊机下有人-处理-2.avi', fourcc, 25.0, (width, height))
    i = 1
    while cap.isOpened():
        ret, frame = cap.read()
        cv2.line(frame, (286, 799), (976, 730), (0, 255, 0), 3)
        # frame = cv2.flip(frame, 0) # 1: 水平翻转, 0: 垂直翻转, -1: 水平垂直翻转
        print(i)
        # 设置开始帧数
        if i >= 17 * 25:
            demo(net, frame, i)
            frame = cv2.resize(frame, None, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_LINEAR)
            # # 在某些帧里在某个点附件随机画框
            # if 4*25 <= i <= 6*25 + 25:
            #     cv2.rectangle(frame, (500/2+random.randint(-5, 5), 678/2+random.randint(-5, 5)),
            #                   (int(603/2)+random.randint(-5, 5), int(783/2)+random.randint(-5, 5)), (255, 0, 0), 2)
            # write the flipped frame
            out.write(frame)
            # cv2.imshow('frame', frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
            i += 1
        else:
            i += 1

        # 设置截止帧数
        if i > 27 * 25:
            break

    # Release everything if job is finished
    cap.release()
    out.release()
    # cv2.destroyAllWindows()