#!/usr/bin/env python

# --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Xinlei Chen, based on code from Ross Girshick
# --------------------------------------------------------

"""
Demo script showing detections in sample images.

See README.md for installation instructions before running.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import _init_paths
from model.config import cfg
from model.test import im_detect
from model.nms_wrapper import nms

from utils.timer import Timer
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import os, cv2
import argparse

from nets.vgg16 import vgg16
from nets.resnet_v1 import resnetv1

# CLASSES = ('__background__',
#            'aeroplane', 'bicycle', 'bird', 'boat',
#            'bottle', 'bus', 'car', 'cat', 'chair',
#            'cow', 'diningtable', 'dog', 'horse',
#            'motorbike', 'person', 'pottedplant',
#            'sheep', 'sofa', 'train', 'tvmonitor')

CLASSES = ('__background__',
           'strong','poor'
           )

# CLASSES = ('__background__',
#            'cme'
#            )

NETS = {'vgg16': ('vgg16_faster_rcnn_iter_50000.ckpt',),'res101': ('res101_faster_rcnn_iter_60001.ckpt',)}
DATASETS= {'pascal_voc': ('voc_2007_trainval',),'pascal_voc_0712': ('voc_2007_trainval+voc_2012_trainval',)}


def vis_detections(im, class_name, dets, thresh=0.5):
    """Draw detected bounding boxes.绘制已检测的图像框"""
    inds = np.where(dets[:, -1] >= thresh)[0]
    if len(inds) == 0:
        return

    im = im[:, :, (2, 1, 0)]
    fig, ax = plt.subplots(figsize=(12, 12))
    ax.imshow(im, aspect='equal')
    for i in inds:
        bbox = dets[i, :4]
        score = dets[i, -1]

        ax.add_patch(
            plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1], fill=False,
                          edgecolor='red', linewidth=3.5)
            )
        ax.text(bbox[0], bbox[1] - 2,
                '{:s} {:.3f}'.format(class_name, score),
                bbox=dict(facecolor='blue', alpha=0.5),
                fontsize=14, color='white')

    ax.set_title(('{} detections with '
                  'p({} | box) >= {:.1f}').format(class_name, class_name,
                                                  thresh),
                  fontsize=14)
    plt.axis('off')
    plt.tight_layout()
    plt.draw()

def detection_img(sess, net, thresh=0.):
  # timers
  _t = {'im_detect' : Timer(), 'misc' : Timer()}

  im_file = os.path.join(cfg.DATA_DIR, 'demo')
  im_list = os.listdir(im_file)
  im_list.sort()
  # 保存上两张CME图片的特征图
  feat_t1 = np.zeros([1, 38, 50, 1024], np.float32)
  feat_t2 = np.zeros([1, 38, 50, 1024], np.float32)
  all_boxs = [[[] for _ in range(len(im_list))]
               for _ in range(len(CLASSES))]
  for i, im_name in enumerate(im_list):
    im = cv2.imread(os.path.join(im_file, im_name))

    _t['im_detect'].tic()
    scores, boxes, feature = im_detect(sess, net, im, feat_t1, feat_t2)
    _t['im_detect'].toc()

    _t['misc'].tic()
    CONF_THRESH = 0.8
    # skip j = 0, because it's the background class
    for j in range(1, len(CLASSES)):
      inds = np.where(scores[:, j] > thresh)[0]
      cls_scores = scores[inds, j]
      cls_boxes = boxes[inds, j*4:(j+1)*4]
      cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
        .astype(np.float32, copy=False)
      keep = nms(cls_dets, cfg.TEST.NMS)
      cls_dets = cls_dets[keep, :]
      # vis_detections(im, CLASSES[j], cls_dets, thresh=CONF_THRESH)
      # keep1 = np.where(cls_dets[:, -1] >= 0.3)[0]
      # cls_dets = cls_dets[keep1, :]
      all_boxs[j][i] = cls_dets

    # 更换上两次的特征
    feat_t1 = feat_t2
    feat_t2 = feature
  cmeboxs = [[] for _ in range(len(im_list))]
  if len(CLASSES) > 2:
    # cmeboxs = np.hstack(cmeboxs[1], cmeboxs[2])
    for i in range(len(im_list)):
        # if all_boxs[1][i].size == 0 or all_boxs[1][i].size==0:
        #     if all_boxs[1][i].size == 0:
        #         cmeboxs[i] = all_boxs[2][i]
        #     else:
        #         cmeboxs[i]= all_boxs[1][i]
        # else:
        cmeboxs[i] = np.vstack((all_boxs[1][i], all_boxs[2][i]))
        im = cv2.imread(os.path.join(im_file, im_list[i]))
        vis_detections(im, CLASSES[2], cmeboxs[i], thresh=0.5)
  def find_start_box(start):
      while (start < len(im_list)):
          for i in range(len(cmeboxs[start])):
              if cmeboxs[start][i][4] != 0:
                  cmeboxs[start][i][4] = 0
                  return start, cmeboxs[start][i]
          start += 1
          if start >= len(im_list) - 1:
              return -1, None

  def find_laterbox(end, prebox):
      for i in range(len(cmeboxs[end])):
          if cmeboxs[end][i][4] != 0:
              if ((float(cmeboxs[end][i][0]) < float(prebox[0]) < float(cmeboxs[end][i][2]) or float(prebox[0]) < float(
                      cmeboxs[end][i][0]) < float(prebox[2]) or float(prebox[0]) < float(cmeboxs[end][i][2]) < float(prebox[2]))
                      and float(cmeboxs[end][i][3]) > float(prebox[3])-5):
                  cmeboxs[end][i][4] = 0
                  return cmeboxs[end][i]
      return np.zeros(5)

  cme_catalog = []
  cmecount = 0
  start = 0
  cme_catalog.append([])
  while (start < len(im_list)):
      start, startbox = find_start_box(start)
      if start == -1:
          break
      cme_catalog[cmecount].append(im_list[start])
      cme_catalog[cmecount].append(startbox)
      end = start + 1
      cmesum = 1
      prebox = startbox
      while (end < len(im_list)):
          laterbox = find_laterbox(end, prebox)
          if all(laterbox == np.zeros(5)):
              break
          prebox = laterbox
          cme_catalog[cmecount].append(laterbox)
          end += 1
          cmesum += 1
      if cmesum > 2:
          cmecount += 1
          cme_catalog.append([])
      else:
          cme_catalog[cmecount] = []

  # cme日志写入txt文件
  filename = os.path.join('/home/xiang/cme/faster-rcnn-3quanlian', 'cme_catalog.txt')
  with open(filename, 'wt') as f:
      for i in range(len(cme_catalog) - 1):
          starttime = cme_catalog[i][0]
          boxlist = np.array([[float(z) for z in x[:-1]] for x in cme_catalog[i][1:]])
          long = len(boxlist)-1
          # kk = boxlist[:, 0]
          xmin = np.min(boxlist[1:, 0])
          xmax = np.max(boxlist[1:, 1])
          ymin = np.min(boxlist[1:, 2])
          ymax = np.max(boxlist[1:, 3])
          # cls = np.min(boxlist[:, 4])
          # the VOCdevkit expects 1-based indices
          f.write('{:s} {:.1f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
                  format(starttime, long, xmin, xmax,
                         ymin, ymax))
def demo(sess, net, image_name):
    """Detect object classes in an image using pre-computed object proposals."""

    # Load the demo image
    im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
    im = cv2.imread(im_file)

    # Detect all object classes and regress object bounds
    timer = Timer()
    timer.tic()
    scores, boxes = im_detect(sess, net, im)  # 检测目标框
    timer.toc()
    print('Detection took {:.3f}s for {:d} object proposals'.format(timer.total_time, boxes.shape[0]))

    # Visualize detections for each class
    CONF_THRESH = 0.8
    NMS_THRESH = 0.3
    for cls_ind, cls in enumerate(CLASSES[1:]):
        cls_ind += 1  # because we skipped background
        cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
        cls_scores = scores[:, cls_ind]
        dets = np.hstack((cls_boxes,
                          cls_scores[:, np.newaxis])).astype(np.float32)
        keep = nms(dets, NMS_THRESH)
        dets = dets[keep, :]
        vis_detections(im, cls, dets, thresh=CONF_THRESH)

def parse_args():
    """Parse input arguments."""
    parser = argparse.ArgumentParser(description='Tensorflow Faster R-CNN demo')
    parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16 res101]',
                        choices=NETS.keys(), default='res101')
    parser.add_argument('--dataset', dest='dataset', help='Trained dataset [pascal_voc pascal_voc_0712]',
                        choices=DATASETS.keys(), default='pascal_voc')
    args = parser.parse_args()

    return args

if __name__ == '__main__':
    cfg.TEST.HAS_RPN = True  # Use RPN for proposals
    args = parse_args()
    os.chdir('/home/xiang/cme/faster-rcnn-3quanlian')
    # model path
    demonet = args.demo_net
    dataset = args.dataset
    tfmodel = os.path.join('output', demonet, DATASETS[dataset][0], 'flip',
                              NETS[demonet][0])


    if not os.path.isfile(tfmodel + '.meta'):
        raise IOError(('{:s} not found.\nDid you download the proper networks from '
                       'our server and place them properly?').format(tfmodel + '.meta'))

    # set config
    tfconfig = tf.ConfigProto(allow_soft_placement=True)
    tfconfig.gpu_options.allow_growth=True

    # init session
    sess = tf.Session(config=tfconfig)
    # load network
    if demonet == 'vgg16':
        net = vgg16()
    elif demonet == 'res101':
        net = resnetv1(num_layers=101)
    else:
        raise NotImplementedError
    net.create_architecture("TEST", len(CLASSES),
                          tag='flip', anchor_scales=[1,2,3,4],
                          anchor_ratios=[4,8])
    saver = tf.train.Saver()
    saver.restore(sess, tfmodel)

    print('Loaded network {:s}'.format(tfmodel))

    # im_names = ['20140105_1136.jpg', '20140105_1148.jpg', '20140105_1200.jpg',
    #             '20140105_1236.jpg', '20140105_1248.jpg']
    # for im_name in im_names:
    print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
    print('Demo for data/demo/')
    detection_img(sess, net, thresh=0.5)  #检测目标框并绘制

    plt.show()
