#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: liang kang
@contact: gangkanli1219@gmail.com
@time: 2018/1/24 21:24
@desc: 
"""
import time

import cv2
import numpy as np
import tensorflow as tf
from object_detection.utils import visualization_utils as vis_util

from crowdcounting.test.tools import rotate_image
from utils.basic import get_point_from_box
from utils.basic import is_in_polygon
from utils.detector import load_model
from utils.detector import run_detection
from utils.evaluator import convert_region_box_to_global
from utils.evaluator import merge_region_prediction
from utils.evaluator import revolve_boxes
from utils.evaluator import get_revolve_info


def get_edge_boxes_classes_scores(boxes, classes, scores, detection_graph, image):
    config = tf.ConfigProto(device_count={"CPU": 4, "GPU": 1})
    config.gpu_options.allow_growth = True
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph, config=config) as sess:
            with tf.device("/gpu:0"):
                for angle in range(0, 180, 30):
                    offset, img_buf = rotate_image(angle, image)
                    img_top = img_buf[40:400, 460:820, :]
                    img_bottom = img_buf[880:1240, 460:820, :]
                    img_bottom = np.flip(img_bottom, axis=0)
                    img_bottom = np.flip(img_bottom, axis=1)
                    img_top = np.expand_dims(img_top, axis=0).astype(np.uint8)
                    img_bottom = np.expand_dims(img_bottom, axis=0).astype(np.uint8)
                    top_boxes, top_classes, top_scores = run_detection(sess, detection_graph, img_top)
                    bottom_boxes, bottom_classes, bottom_scores = run_detection(sess, detection_graph, img_bottom)
                    bottom_boxes = np.vstack((1.0 - bottom_boxes[:, 2], 1.0 - bottom_boxes[:, 3],
                                              1.0 - bottom_boxes[:, 0], 1.0 - bottom_boxes[:, 1])).T
                    top_boxes, top_classes, top_scores = convert_region_box_to_global(
                        {'shape': (1280, 1280), 'crop_shape': (360, 360)},
                        top_boxes, top_classes, top_scores, '460_40')
                    bottom_boxes, bottom_classes, bottom_scores = convert_region_box_to_global(
                        {'shape': (1280, 1280), 'crop_shape': (360, 360)},
                        bottom_boxes, bottom_classes, bottom_scores, '460_880')
                    this_boxes = top_boxes + bottom_boxes
                    this_classes = top_classes + bottom_classes
                    this_scores = top_scores + bottom_scores
                    if 0 != angle:
                        revolve_mat, post_shape, offset = get_revolve_info(angle, (1280, 1280))
                        this_boxes = revolve_boxes(np.asarray(this_boxes), revolve_mat, (1280, 1280), post_shape)
                        this_boxes, this_classes, this_scores = convert_region_box_to_global(
                            {'shape': (1280, 1280), 'crop_shape': post_shape},
                            this_boxes, this_classes, this_scores, '{}_{}'.format(*offset), reverse=True)
                    boxes += this_boxes
                    scores += this_scores
                    classes += this_classes


def get_center_boxes_classes_scores(boxes, classes, scores, detection_graph, image):
    config = tf.ConfigProto(device_count={"CPU": 4, "GPU": 1})
    config.gpu_options.allow_growth = True
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph, config=config) as sess:
            with tf.device("/gpu:0"):
                center_seats = ((360, 360), (520, 360), (360, 520), (520, 520))
                for seat in center_seats:
                    this_img = image[seat[0]:(seat[0] + 400), seat[1]:(seat[1] + 400), :]
                    this_img = np.expand_dims(this_img, axis=0).astype(np.uint8)
                    this_boxes, this_classes, this_scores = run_detection(sess, detection_graph, this_img)
                    this_boxes, this_classes, this_scores = convert_region_box_to_global(
                        {'shape': (1280, 1280), 'crop_shape': (400, 400)},
                        this_boxes, this_classes, this_scores, '{}_{}'.format(seat[1], seat[0]))
                    boxes += this_boxes
                    scores += this_scores
                    classes += this_classes


def predict_image(image, rois=None, checkpoint_edge='',
                  checkpoint_center='', score=0.3, percent=0.8):
    start_time = time.time()
    detection_graph = load_model(checkpoint_edge)
    detection_graph_center = load_model(checkpoint_center)
    stage_time = time.time()
    print('{} elapsed time: {:.3f}s'.format(time.ctime(),
                                            stage_time - start_time))
    start_time = stage_time

    _boxes = []
    _scores = []
    _classes = []
    img = cv2.imread(image)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    get_center_boxes_classes_scores(_boxes, _classes, _scores, detection_graph, img)
    get_edge_boxes_classes_scores(_boxes, _classes, _scores, detection_graph_center, img)

    stage_time = time.time()
    print('{} elapsed time: {:.3f}s'.format(time.ctime(),
                                            stage_time - start_time))

    _boxes, _classes, _scores = merge_region_prediction(
        np.array(_boxes), np.array(_scores), np.array(_classes), percent)
    _boxes = np.array(_boxes)
    _classes = np.array(_classes).astype(np.int32)
    _scores = np.array(_scores)
    new_img = cv2.imread(image)
    new_img = cv2.cvtColor(new_img, cv2.COLOR_BGR2RGB)
    vis_util.visualize_boxes_and_labels_on_image_array(
        new_img, _boxes, _classes, _scores, {1: {'id': 1, 'name': 'p'}},
        use_normalized_coordinates=True,
        min_score_thresh=score,
        line_thickness=3)
    import matplotlib.pyplot as plt
    plt.imshow(new_img)
    plt.show()
    comb = filter(lambda x: x[-1] >= score, zip(_boxes, _classes, _scores))
    _boxes, _ = np.hsplit(np.array(list(comb)), [1])
    _boxes = list(map(lambda x: list(x[0]), _boxes))
    rois_point = []
    if rois is not None:
        points = get_point_from_box(_boxes)
        for idx, roi in enumerate(rois):
            rois_point.append([])
            for point in points:
                if is_in_polygon(point, roi):
                    rois_point[idx].append([point[1], point[0]])
    print(rois_point)
    stage_time = time.time()
    print('{} elapsed time: {:.3f}s'.format(time.ctime(),
                                            stage_time - start_time))


if __name__ == '__main__':
    predict_image(
        image='D:\\workspace\\data\\2\\2017-07-23 14-30-24-1133-.png',
        rois=[[[0.4, 0.4], [0.4, 0.8], [0.8, 0.4], [0.8, 0.8]]],
        checkpoint_edge='D:\\workspace\\data\\frozen_inference_graph_fast_edge_105912.pb',
        checkpoint_center='D:\\workspace\\data\\frozen_inference_graph_fast_center_200000.pb', score=0.35)
