#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: liang kang
@contact: gangkanli1219@gmail.com
@time: 1/3/18 5:18 PM
@desc: trying to speed up predicting
"""

import multiprocessing
import time
from functools import reduce

import cv2
import numpy as np
import tensorflow as tf
from object_detection.utils import visualization_utils as vis_util
from skimage import transform

from utils.basic import get_point_from_box
from utils.basic import is_in_polygon
from utils.detector import load_model
from utils.detector import run_detection
from utils.evaluator import convert_region_box_to_global
from utils.evaluator import merge_region_prediction
from utils.evaluator import revolve_boxes
from utils.evaluator import get_revolve_info


def cropping_image(image, crop_size, revolving_angles):
    """
    cropping the big image into some small images according
    the specify seats

    Parameters
    ----------
    image
    crop_size
    revolving_angles

    Returns
    -------

    """
    images = {}
    for key, _ in revolving_angles.items():
        x, y = key.split('_')
        xmax = int(x) + crop_size[1]
        ymax = int(y) + crop_size[0]
        images[key] = image[int(y):ymax, int(x):xmax, :].copy()
    return images


def compress_key_image(group, angle, key, image):
    """
    compress the key of seat and image into a python dict object

    Parameters
    ----------
    group
    angle
    key
    image

    Returns
    -------

    """
    this_img = np.expand_dims(image, axis=0)
    if angle not in group.keys():
        group[angle] = {'keys': [], 'images': None}
    if group[angle]['images'] is None:
        group[angle]['images'] = this_img
    else:
        group[angle]['images'] = np.concatenate((group[angle]['images'], this_img), axis=0)
    group[angle]['keys'].append(key)


def detect(checkpoint, key, value, revolving_angles,
           ob_size, rotate_size, crop_size, index):
    _boxes = []
    _scores = []
    _classes = []
    config = tf.ConfigProto(device_count={"GPU": 0, "CPU": multiprocessing.cpu_count()})
    detection_graph = load_model(checkpoint)
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph, config=config) as sess:
            with tf.device("/cpu:{}".format(index)):
                boxes, classes, scores = run_detection(sess, detection_graph, value['images'])
    if 0 == key:
        for loc_boxes, loc_classes, loc_scores, loc_key in zip(boxes.tolist(), classes.tolist(),
                                                               scores.tolist(), value['keys']):
            new_boxes, new_classes, new_scores = convert_region_box_to_global(
                {'shape': ob_size, 'crop_shape': crop_size},
                loc_boxes, loc_classes, loc_scores, loc_key)
            _boxes += new_boxes
            _classes += new_classes
            _scores += new_scores
    elif key in [90, 180]:
        for loc_boxes, loc_classes, loc_scores, loc_key in zip(boxes.tolist(), classes.tolist(),
                                                               scores.tolist(), value['keys']):
            revolve_mat, _, _ = get_revolve_info(revolving_angles[loc_key], crop_size, crop_size)
            new_boxes = revolve_boxes(np.asarray(loc_boxes), revolve_mat, crop_size)
            new_boxes, new_classes, new_scores = convert_region_box_to_global(
                {'shape': ob_size, 'crop_shape': crop_size},
                new_boxes, loc_classes, loc_scores, loc_key)
            _boxes += new_boxes
            _classes += new_classes
            _scores += new_scores
    else:
        for loc_boxes, loc_classes, loc_scores, loc_key in zip(boxes.tolist(), classes.tolist(),
                                                               scores.tolist(), value['keys']):
            new_boxes, new_classes, new_scores = convert_region_box_to_global(
                {'shape': value['shape'], 'crop_shape': rotate_size}, loc_boxes, loc_classes,
                loc_scores,
                '{}_{}'.format((value['shape'][1] - rotate_size[1]) // 2,
                               value['shape'][0] - rotate_size[0]))
            revolve_mat, _, _ = get_revolve_info(revolving_angles[loc_key], crop_size, value['shape'])
            new_boxes = revolve_boxes(np.asarray(new_boxes), revolve_mat, value['shape'])
            new_boxes, new_classes, new_scores = convert_region_box_to_global(
                {'shape': ob_size, 'crop_shape': crop_size},
                new_boxes, new_classes, new_scores, loc_key)
            _boxes += new_boxes
            _classes += new_classes
            _scores += new_scores
    return _boxes, _classes, _scores


def predict_image(image, rois=None, checkpoint='',
                  src_size=(1280, 1280),
                  ob_size=(1160, 1160),
                  rotate_size=(340, 340),
                  crop_size=(320, 320),
                  score=0.3,
                  percent=0.8):
    """
    prediction function using the pretrained model predict the input images

    Parameters
    ----------
    image: image who are going to be predicted,
                   can be string or ndarray, the ndarray's shape
                   should be (H, W, C), for instance. [image]:
                   image = cv2.imread(image_file)[:, :, (2, 1, 0)]
    rois
    checkpoint: frozen_inference_graph.pb file
    src_size: input big image size, if the size of `image` is not equal to
              `src_size`, the image will be resize to `src_size`
    ob_size: the size of detecting window
    rotate_size: if the sub image must to be rotated, crop the rotated image
                 into `rotate_size`
    crop_size: the size of the cropped sub image
    score: the minimum threshold to show the boxes of predicted objects
    percent: the minimum threshold to merge the two overlapped boxes

    Returns
    -------

    """
    start_time = time.time()
    revolving_angles = {'0_0': 45, '0_840': 135, '840_0': 315, '840_840': 225,
                        '0_280': 90, '0_560': 90, '840_280': 270, '840_560': 270,
                        '280_560': 180, '280_840': 180, '560_560': 180, '560_840': 180,
                        '280_0': 0, '560_0': 0, '280_280': 0, '560_280': 0}
    img = cv2.imread(image)[:, :, (2, 1, 0)] if isinstance(image, str) else image
    img = cv2.resize(img, src_size) if not (img.shape[0] == img.shape[1] == 1280) else img
    images = cropping_image(img[60:1220, 60:1220, :], crop_size, revolving_angles)

    group = {}
    for key, value in images.items():
        angle = revolving_angles[key]
        this_img = value.astype(np.uint8)
        if 0 == angle:
            compress_key_image(group, 0, key, this_img)
        else:
            this_img = transform.rotate(this_img, -angle, True) * 255
            this_img = this_img.round().astype(np.uint8)
            if abs(angle) in [45, 135]:
                x_start = (this_img.shape[1] - rotate_size[1]) // 2
                this_img = this_img[(this_img.shape[0] - rotate_size[0]):, x_start:(x_start + rotate_size[1]), :]
                compress_key_image(group, 45, key, this_img)
                group[45]['shape'] = this_img.shape
            else:
                compress_key_image(group, abs(angle), key, this_img)

    stage_time = time.time()
    print('{} elapsed time: {:.3f}s'.format(time.ctime(),
                                            stage_time - start_time))
    start_time = stage_time
    processes = []
    num_of_cpu = multiprocessing.cpu_count()
    index = 0
    pool = multiprocessing.Pool(processes=len(group))
    for key, value in group.items():
        process = pool.apply_async(detect, args=(checkpoint, key, value, revolving_angles,
                                                 ob_size, rotate_size, crop_size, index % num_of_cpu))
        processes.append(process)
        index += 1
    pool.close()
    pool.join()
    results = [processes.get() for processes in processes]

    stage_time = time.time()
    print('{} elapsed time: {:.3f}s'.format(time.ctime(),
                                            stage_time - start_time))
    start_time = stage_time
    _boxes = list(reduce(lambda a, x: a + x[0], results, []))
    _classes = list(reduce(lambda a, x: a + x[1], results, []))
    _scores = list(reduce(lambda a, x: a + x[2], results, []))
    if len(_boxes) == 0:
        return img[:, :, (2, 1, 0)], [[]] * len(rois)
    else:
        _boxes, _classes, _scores = merge_region_prediction(
            np.array(_boxes), np.array(_scores), np.array(_classes), percent)
        _boxes, _classes, _scores = convert_region_box_to_global(
            {'shape': img.shape, 'crop_shape': (1160, 1160)}, _boxes, _classes, _scores, '60_60')
        _boxes = np.array(_boxes)
        _classes = np.array(_classes).astype(np.int32)
        _scores = np.array(_scores)
        vis_util.visualize_boxes_and_labels_on_image_array(
            img, _boxes, _classes, _scores, {1: {'id': 1, 'name': 'fisheye'}},
            use_normalized_coordinates=True,
            min_score_thresh=score,
            line_thickness=1)
        comb = filter(lambda x: x[-1] >= score, zip(_boxes, _classes, _scores))
        _boxes, _ = np.hsplit(np.array(list(comb)), [1])
        _boxes = list(map(lambda x: list(x[0]), _boxes))
        rois_point = []
        if rois is not None:
            points = get_point_from_box(_boxes)
            for idx, roi in enumerate(rois):
                rois_point.append([])
                for point in points:
                    if is_in_polygon(point, roi):
                        rois_point[idx].append([point[1], point[0]])
    stage_time = time.time()
    print('{} elapsed time: {:.3f}s'.format(time.ctime(),
                                            stage_time - start_time))
    return img[:, :, (2, 1, 0)], rois_point


if __name__ == '__main__':
    imag, roii = predict_image(
        # image='/home/admins/repos/trained/people-faster-rcnn/eval/origin/2017-07-22 16-46-25-1131-.png',
        image='D:\\workspace\\data\\2017-07-22 19-44-22-1131-.png',
        rois=[[[0.4, 0.4], [0.4, 0.8], [0.8, 0.4], [0.8, 0.8]]],
        checkpoint='D:\\workspace\\data\\frozen_inference_graph.pb', score=0.3)
    cv2.imwrite('D:\\workspace\\data\\test.jpg', imag)
    print(roii)
