#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: liang kang
@contact: gangkanli1219@gmail.com
@time: 1/3/18 5:18 PM
@desc: trying to speed up predicting
"""
import argparse
import base64
import json
import logging
import time

import numpy as np
import tensorflow as tf
from object_detection.utils import visualization_utils as vis_util

from crowdcounting.test.tools import rotate_image
from utils.basic import get_point_from_box
from utils.basic import is_in_polygon
from utils.detector import load_model
from utils.detector import run_detection
from utils.evaluator import convert_region_box_to_global
from utils.evaluator import merge_region_prediction


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--image-root', type=str, default='', dest='root',
        help='The directory where the image data and the annotation is stored.'
             '保存图像与标记数据的根目录, 图像与其相应标记应当在同一级目录。')
    parser.add_argument(
        '--model-edge', type=str, default='', dest='model_edge',
        help='保存模型的目录edge')
    parser.add_argument(
        '--model-center', type=str, default='', dest='model_center',
        help='保存模型的目录center')
    parser.add_argument(
        '--output-root', type=str, default='', dest='output',
        help='the root for all the output.'
             '输出结果的根目录')
    return parser.parse_args()


def url_to_image(url):
    from skimage import io
    image = io.imread(url)
    return image


def _encode_image(image, database):
    """
    encode image into base64

    Parameters
    ----------
    image
    database

    Returns
    -------

    """
    if image is None:
        database["pic_base64"] = ''
    else:
        base_img = base64.b64encode(image.tobytes())
        base64_str = str(base_img, 'utf-8')
        print(len(base64_str))
        database["pic_base64"] = ''


def _get_list(string):
    """
    字符串转化成list
    :param string: 字典格式的字符串
    :return:
    """
    d_string = eval(string)
    d_string = sorted(d_string.items(), key=lambda d: d[0], reverse=False)
    list_all = []
    for key, value in d_string:
        list_value = []
        for val in value:
            list_value.append([val['x'], val['y']])
        list_all.append(list_value)
    return list_all


def point_data_to_json(url, points, image=None, rois=None, store_id=0, types=None):
    if rois is not None:
        img_roi = {}
        rois = _get_list(rois)
        for idx, roi in enumerate(rois):
            img_roi[str(idx + 1)] = {}
            rois_point = []
            for point in points:
                if is_in_polygon(point, roi):
                    rois_point.append({'x': point[1], 'y': point[0]})
            img_roi[str(idx + 1)]['is_high_density'] = 0
            img_roi[str(idx + 1)]['people_num'] = len(rois_point)
            img_roi[str(idx + 1)]['people_point'] = rois_point
        database = {"code": "200",
                    "data": {"image_roi": img_roi, "url": url, "store_id": store_id, "type": types}}
    else:
        rois_point = []
        for idx, point in enumerate(points):
            rois_point.append({'x': point[1], 'y': point[0]})
        database = {"code": "200",
                    "data": {"is_high_density": 0, "people_num": len(rois_point),
                             "people_point": rois_point, "url": url, "store_id": store_id,
                             "type": types}}
    _encode_image(image, database)
    in_json = json.dumps(database)
    return in_json


def get_revolve_matrix(angle, src_shape):
    """
    get the revolving matrix which aim at revolving the rotated
    data into origin data

    Parameters
    ----------
    angle
    src_shape

    Returns
    -------

    """
    arc = angle * np.pi / 180
    post_shape = (np.ceil(abs(src_shape[0] * np.cos(arc)) + abs(src_shape[1] * np.sin(arc))).astype(np.int32),
                  np.ceil(abs(src_shape[0] * np.sin(arc)) + abs(src_shape[1] * np.cos(arc))).astype(np.int32))
    revolve_mat_1 = np.matrix([[1, 0, 0],
                               [0, -1, 0],
                               [-0.5 * src_shape[1], 0.5 * src_shape[0], 1]])
    revolve_mat_2 = np.matrix([[np.cos(arc), -np.sin(arc), 0],
                               [np.sin(arc), np.cos(arc), 0],
                               [0, 0, 1]])
    revolve_mat_3 = np.matrix([[1, 0, 0],
                               [0, -1, 0],
                               [0.5 * post_shape[1], 0.5 * post_shape[0], 1]])
    offset = list(map(lambda x, y: (x - y) // 2, post_shape, src_shape))
    return revolve_mat_1 * revolve_mat_2 * revolve_mat_3, post_shape, offset


def revolve_boxes(boxes, revolve_mat, src_shape, dst_shape=None):
    """
    revolve the boxes into the origin image

    Parameters
    ----------
    boxes
    revolve_mat
    src_shape
    dst_shape

    Returns
    -------

    """
    if dst_shape is None:
        dst_shape = src_shape
    result_boxes = []
    for box in boxes.tolist():
        points = np.matrix([[box[1] * src_shape[1], box[0] * src_shape[0], 1],
                            [box[3] * src_shape[1], box[0] * src_shape[0], 1],
                            [box[1] * src_shape[1], box[2] * src_shape[0], 1],
                            [box[3] * src_shape[1], box[2] * src_shape[0], 1]])
        revolved_points = points * revolve_mat
        min_x, min_y, max_x, max_y = dst_shape[1], dst_shape[0], 0, 0
        for point in revolved_points.tolist():
            min_x = min(min_x, point[0])
            min_y = min(min_y, point[1])
            max_x = max(max_x, point[0])
            max_y = max(max_y, point[1])
        result_boxes.append([min_y / dst_shape[0], min_x / dst_shape[1],
                             max_y / dst_shape[0], max_x / dst_shape[1]])
    return result_boxes


def get_edge_boxes_classes_scores(boxes, classes, scores, config, detection_graph, image):
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph, config=config) as sess:
            with tf.device("/gpu:0"):
                for angle in range(0, 180, 30):
                    _, img_buf = rotate_image(angle, image)
                    img_top = img_buf[40:400, 460:820, :]
                    img_bottom = img_buf[880:1240, 460:820, :]
                    img_bottom = np.flip(img_bottom, axis=0)
                    img_bottom = np.flip(img_bottom, axis=1)
                    img_top = np.expand_dims(img_top, axis=0).astype(np.uint8)
                    img_bottom = np.expand_dims(img_bottom, axis=0).astype(np.uint8)
                    top_boxes, top_classes, top_scores = run_detection(sess, detection_graph, img_top)
                    bottom_boxes, bottom_classes, bottom_scores = run_detection(sess, detection_graph, img_bottom)
                    bottom_boxes = np.vstack((1.0 - bottom_boxes[:, 2], 1.0 - bottom_boxes[:, 3],
                                              1.0 - bottom_boxes[:, 0], 1.0 - bottom_boxes[:, 1])).T
                    top_boxes, top_classes, top_scores = convert_region_box_to_global(
                        {'shape': (1280, 1280), 'crop_shape': (360, 360)},
                        top_boxes, top_classes, top_scores, '460_40')
                    bottom_boxes, bottom_classes, bottom_scores = convert_region_box_to_global(
                        {'shape': (1280, 1280), 'crop_shape': (360, 360)},
                        bottom_boxes, bottom_classes, bottom_scores, '460_880')
                    this_boxes = top_boxes + bottom_boxes
                    this_classes = top_classes + bottom_classes
                    this_scores = top_scores + bottom_scores
                    if 0 != angle:
                        revolve_mat, post_shape, offset = get_revolve_matrix(angle, (1280, 1280))
                        this_boxes = revolve_boxes(np.asarray(this_boxes), revolve_mat, (1280, 1280), post_shape)
                        this_boxes, this_classes, this_scores = convert_region_box_to_global(
                            {'shape': (1280, 1280), 'crop_shape': post_shape},
                            this_boxes, this_classes, this_scores, '{}_{}'.format(*offset), reverse=True)
                    boxes.extend(this_boxes)
                    scores.extend(this_scores)
                    classes.extend(this_classes)


def get_center_boxes_classes_scores(boxes, classes, scores, config, detection_graph, image):
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph, config=config) as sess:
            with tf.device("/gpu:0"):
                center_seats = ((360, 360), (520, 360), (360, 520), (520, 520))
                for seat in center_seats:
                    this_img = image[seat[0]:(seat[0] + 400), seat[1]:(seat[1] + 400), :]
                    this_img = np.expand_dims(this_img, axis=0).astype(np.uint8)
                    this_boxes, this_classes, this_scores = run_detection(sess, detection_graph, this_img)
                    this_boxes, this_classes, this_scores = convert_region_box_to_global(
                        {'shape': (1280, 1280), 'crop_shape': (400, 400)},
                        this_boxes, this_classes, this_scores, '{}_{}'.format(seat[1], seat[0]))
                    boxes.extend(this_boxes)
                    scores.extend(this_scores)
                    classes.extend(this_classes)


def predict_image(img, checkpoint_center='', checkpoint_edge='',
                  score=0.5, percent=0.8, need_return_image=False):
    start_time = time.time()
    detection_graph_center = load_model(checkpoint_center)
    detection_graph_edge = load_model(checkpoint_edge)
    config = tf.ConfigProto(device_count={"CPU": 4, "GPU": 1})
    config.gpu_options.allow_growth = True
    stage_time = time.time()
    logging.info('{} elapsed time: {:.3f}s'.format(time.ctime(),
                                                   stage_time - start_time))
    logging.info('loading models completed !')
    start_time = stage_time

    _boxes, _classes, _scores = [], [], []
    get_center_boxes_classes_scores(
        _boxes, _classes, _scores, config, detection_graph_center, img)
    get_edge_boxes_classes_scores(
        _boxes, _classes, _scores, config, detection_graph_edge, img)

    stage_time = time.time()
    logging.info('{} elapsed time: {:.3f}s'.format(time.ctime(),
                                                   stage_time - start_time))
    logging.info('predicting image completed !')

    boxes, classes, scores = [], [], []
    for box, cls, sc in zip(_boxes, _classes, _scores):
        if sc >= score:
            boxes.append(box)
            classes.append(cls)
            scores.append(sc)
    _boxes, _classes, _scores = merge_region_prediction(
        np.array(boxes), np.array(scores), np.array(classes), percent)
    _boxes = np.array(_boxes)
    _classes = np.array(_classes).astype(np.int32)
    _scores = np.array(_scores)
    points = get_point_from_box(_boxes)
    if need_return_image:
        vis_util.visualize_boxes_and_labels_on_image_array(
            img, np.array(_boxes), np.array(_classes).astype(np.uint8), np.array(_scores),
            {1: {'id': 1, 'name': 'p'}},
            use_normalized_coordinates=True,
            min_score_thresh=score,
            line_thickness=3)
        return points, img
    else:
        return points, None


def get_json(url, rois):
    image = url_to_image(url)
    image = np.array(image)
    print(image.shape)
    points, img = predict_image(
        np.array(image),
        checkpoint_center='/home/admins/data/fisheye/models/frozen_inference_graph_fast_center_200000.pb',
        checkpoint_edge='/home/admins/data/fisheye/models/frozen_inference_graph_fast_edge_160998.pb',
        need_return_image=True)
    return point_data_to_json(url, points, img, rois)


if __name__ == '__main__':
    PARAMS = parse_args()
    logging.basicConfig(level=logging.INFO)
    print(get_json(rois="{'1': [{'x': 0.1010, 'y': 0.2010}, {'x': 0.3010, 'y': 0.4010}, {'x':0.5010, 'y': 0.6010}, {'x': 0.7010,'y':0.8010}]}",
                   url='http://ovuhjq3qc.bkt.clouddn.com/2017-11-16%2017:20:22-920-.png'))
    # predict_image(
    #     img=PARAMS.root,
    #     checkpoint_center=PARAMS.model_center,
    #     checkpoint_edge=PARAMS.model_edge,
    #     score=0.5)
