#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: liang kang
@contact: gangkanli1219@gmail.com
@time: 2018/1/26 18:10
@desc: trying to speed up predicting
"""
import os

import numpy as np
import tensorflow as tf

from .utils import visualization_utils as vis_utils
from .utils.basic import get_point_from_box
from .utils.eval import check_image_symbol
from .utils.eval import convert_region_box_to_global
from .utils.eval import load_model
from .utils.eval import merge_region_prediction
from .utils.eval import run_detection
from .utils.imaging import get_revolve_matrix
from .utils.imaging import revolve_boxes
from .utils.imaging import rotate_image
from .utils.json import data_to_json_low


def compute_bottom_box(box, center=(0.5, 0.5), bias=0.002):
    """
    计算人脚部的位置

    Parameters
    ----------
    box: 
    center:
    bias: (0, 0.01) 

    Returns
    -------
    """
    box_center = ((box[0] + box[2]) / 2, (box[1] + box[3]) / 2)
    distance = np.sqrt((box_center[0] - center[0]) ** 2 + (box_center[1] - center[1]) ** 2)
    radius = np.sqrt((box[2] - box[0]) * (box[3] - box[1]) / np.pi)
    if distance <= radius:
        return [(box_center[0] + center[0]) / 5 - bias, (box_center[1] + center[1]) / 5 - bias,
                (box_center[0] + center[0]) / 5 + bias, (box_center[1] + center[1]) / 5 + bias]
    else:
        sin = (center[0] - box_center[0]) / distance
        cos = (box_center[1] - center[1]) / distance
        result_point = (box_center[0] + sin * radius, box_center[1] - cos * radius)
        return [result_point[0] - bias, result_point[1] - bias,
                result_point[0] + bias, result_point[1] + bias]


def get_edge_boxes_classes_scores(boxes, classes, scores, config, detection_graph, image):
    """
    检测鱼眼图像的边缘区域， 获取boundingBox， 及其对应的类别和置信度

    Parameters
    ----------
    boxes: boundingBox 列表
    classes: 类类别列表
    scores: 置信度列表
    config: tensorflow 的会话配置参数
    detection_graph: 检测边缘的模型
    image: 鱼眼图像

    Returns
    -------

    """
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph, config=config) as sess:
            with tf.device("/gpu:0"):
                for angle in range(0, 180, 30):
                    _, img_buf = rotate_image(angle, image)
                    img_top = img_buf[40:400, 460:820, :]
                    img_bottom = img_buf[880:1240, 460:820, :]
                    img_bottom = np.flip(img_bottom, axis=0)
                    img_bottom = np.flip(img_bottom, axis=1)
                    img_top = np.expand_dims(img_top, axis=0).astype(np.uint8)
                    img_bottom = np.expand_dims(img_bottom, axis=0).astype(np.uint8)
                    top_boxes, top_classes, top_scores = run_detection(sess, detection_graph, img_top)
                    bottom_boxes, bottom_classes, bottom_scores = run_detection(sess, detection_graph, img_bottom)
                    bottom_boxes = np.vstack((1.0 - bottom_boxes[:, 2], 1.0 - bottom_boxes[:, 3],
                                              1.0 - bottom_boxes[:, 0], 1.0 - bottom_boxes[:, 1])).T
                    top_boxes, top_classes, top_scores = convert_region_box_to_global(
                        {'shape': (1280, 1280), 'crop_shape': (360, 360)},
                        top_boxes, top_classes, top_scores, '460_40')
                    bottom_boxes, bottom_classes, bottom_scores = convert_region_box_to_global(
                        {'shape': (1280, 1280), 'crop_shape': (360, 360)},
                        bottom_boxes, bottom_classes, bottom_scores, '460_880')
                    this_boxes = top_boxes + bottom_boxes
                    this_classes = top_classes + bottom_classes
                    this_scores = top_scores + bottom_scores
                    if 0 != angle:
                        revolve_mat, post_shape, offset = get_revolve_matrix(angle, (1280, 1280))
                        this_boxes = revolve_boxes(np.asarray(this_boxes), revolve_mat, (1280, 1280), post_shape)
                        this_boxes, this_classes, this_scores = convert_region_box_to_global(
                            {'shape': (1280, 1280), 'crop_shape': post_shape},
                            this_boxes, this_classes, this_scores, '{}_{}'.format(*offset), reverse=True)
                    boxes.extend(this_boxes)
                    scores.extend(this_scores)
                    classes.extend(this_classes)


def get_center_boxes_classes_scores(boxes, classes, scores, config, detection_graph, image):
    """
    检测鱼眼图像的中心区域， 获取boundingBox， 及其对应的类别和置信度

    Parameters
    ----------
    boxes: boundingBox 列表
    classes: 类类别列表
    scores: 置信度列表
    config: tensorflow 的会话配置参数
    detection_graph: 检测中心的模型
    image: 鱼眼图像

    Returns
    -------

    """
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph, config=config) as sess:
            with tf.device("/gpu:0"):
                center_seats = ((360, 360), (520, 360), (360, 520), (520, 520))
                for seat in center_seats:
                    this_img = image[seat[0]:(seat[0] + 400), seat[1]:(seat[1] + 400), :]
                    this_img = np.expand_dims(this_img, axis=0).astype(np.uint8)
                    this_boxes, this_classes, this_scores = run_detection(sess, detection_graph, this_img)
                    this_boxes, this_classes, this_scores = convert_region_box_to_global(
                        {'shape': (1280, 1280), 'crop_shape': (400, 400)},
                        this_boxes, this_classes, this_scores, '{}_{}'.format(seat[1], seat[0]))
                    boxes.extend(this_boxes)
                    scores.extend(this_scores)
                    classes.extend(this_classes)


def predict_image(img, checkpoint_center='', checkpoint_edge='', visual_whole_person=False,
                  score=0.5, percent=0.8, need_return_image=False):
    """
    预测函数

    Parameters
    ----------
    img: 图片
    checkpoint_center: 中心模型
    checkpoint_edge: 边缘模型
    visual_whole_person: 决定是否画出包含整个人的矩形
    score: 显示的置信度阈值
    percent: 矩形框重叠率阈值
    need_return_image: 是否需要返回标注的图像

    Returns
    -------

    """
    detection_graph_center = load_model(checkpoint_center)
    detection_graph_edge = load_model(checkpoint_edge)
    config = tf.ConfigProto(device_count={"CPU": 4, "GPU": 1})
    config.gpu_options.allow_growth = True
    _boxes, _classes, _scores = [], [], []
    get_center_boxes_classes_scores(
        _boxes, _classes, _scores, config, detection_graph_center, img)
    get_edge_boxes_classes_scores(
        _boxes, _classes, _scores, config, detection_graph_edge, img)
    small_boxes, boxes, classes, scores = [], [], [], []
    for box, cls, sc in zip(_boxes, _classes, _scores):
        if sc >= score:
            boxes.append(box)
            classes.append(cls)
            scores.append(sc)
    if 0 == len(boxes):
        return [], img
    boxes, classes, scores = merge_region_prediction(
        np.array(boxes), np.array(scores), np.array(classes), percent)
    classes = np.array(classes).astype(np.int32)
    scores = np.array(scores)
    image = img.copy()
    if need_return_image:
        if visual_whole_person:
            paint_boxes = np.array(boxes)
        else:
            for box in boxes:
                small_boxes.append(compute_bottom_box(box))
            paint_boxes = np.array(small_boxes)
        vis_utils.visualize_boxes_and_labels_on_image_array(
            image, paint_boxes, classes, scores,
            use_normalized_coordinates=True,
            min_score_thresh=score,
            line_thickness=3,
            max_boxes_to_draw=200)
    points = get_point_from_box(boxes)
    return points, image


def predict_low_density_image(url, rois=None, score=0.5, percent=0.65, visual_whole_person=False,
                              need_return_image=False, store_id=0, types=None):
    """
    打包成json格式

    Parameters
    ----------
    url: url图像
    rois: roi区域
    score: 显示的置信度阈值
    percent: 矩形框重叠率阈值
    visual_whole_person: 决定是否画出包含整个人的矩形
    need_return_image: 是否需要返回标注的图像
    store_id
    types

    Returns
    -------

    """
    image = check_image_symbol(url)
    # 中心模型的路径
    checkpoint_center = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                     'data', 'low', 'frozen_inference_graph_fast_center_200000.pb')
    # 边缘模型的路径
    checkpoint_edge = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                   'data', 'low', 'frozen_inference_graph_fast_edge_160998.pb')
    points, img = predict_image(
        image,
        checkpoint_center=checkpoint_center,
        checkpoint_edge=checkpoint_edge,
        need_return_image=need_return_image,
        score=score,
        percent=percent,
        visual_whole_person=visual_whole_person)
    return data_to_json_low(url, points, img, rois, store_id=store_id, types=types)
