#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: liang kang
@contact: gangkanli1219@gmail.com
@time: 2018/1/26 16:08
@desc: 
"""
import os
import xml.etree.ElementTree as ET

import numpy as np
import tensorflow as tf
import cv2
from skimage import io

from .basic import get_overlap_area
from .basic import is_overlap


def load_model(checkpoint):
    """
    载入模型

    Parameters
    ----------
    checkpoint: 模型路径

    Returns
    -------
    模型计算图
    """
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(checkpoint, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')
    return detection_graph


def run_detection(sess, detection_graph, image_np):
    """
    在tensorflow会话中运行网络，生成结果

    Parameters
    ----------
    sess: tensorflow 会话
    detection_graph: 模型计算图
    image_np: 4-D 图像数据

    Returns
    -------
    boxes, classes, scores

    """
    image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
    boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
    scores = detection_graph.get_tensor_by_name('detection_scores:0')
    classes = detection_graph.get_tensor_by_name('detection_classes:0')
    num_detections = detection_graph.get_tensor_by_name('num_detections:0')
    (boxes, scores, classes, num_detections) = sess.run(
        [boxes, scores, classes, num_detections],
        feed_dict={
            image_tensor: image_np
        })
    boxes = np.squeeze(boxes)
    classes = np.squeeze(classes).astype(np.int32)
    scores = np.squeeze(scores)
    return boxes, classes, scores


def merge_region_prediction(boxes, scores, classes, percent):
    """
    合并box，当两个box重叠到一定比例，置信度高的覆盖低的

    Parameters
    ----------
    boxes
    scores
    classes
    percent: 判断重叠的比例阈值

    Returns
    -------
    合并后的boxes, classes, scores
    """
    idx = np.argsort(-scores)
    boxes = boxes[idx]
    scores = scores[idx]
    classes = classes[idx]
    _boxes = [boxes[0]]
    _scores = [scores[0]]
    _classes = [classes[0]]
    for box, score_, cls in zip(boxes, scores, classes):
        is_add = True
        for _box, _sc, _cls in zip(_boxes, _scores, _classes):
            if is_overlap(box, _box):
                src_area = min((_box[2] - _box[0]) * (_box[3] - _box[1]),
                               (box[2] - box[0]) * (box[3] - box[1]))
                area = get_overlap_area(_box, box)
                if (src_area < 0.001) or ((area / src_area) > percent):
                    is_add = False
                    break
        if is_add:
            _boxes.append(box)
            _scores.append(score_)
            _classes.append(cls)
    return _boxes, _classes, _scores


def convert_region_box_to_global(info, boxes, classes, scores, index, reverse=False):
    """
    将局部的box转化为全局的box，或者反向操作

    Parameters
    ----------
    info
    boxes
    classes
    scores
    index
    reverse

    Returns
    -------

    """
    src_h, src_w = info['shape'][:2]
    h, w = info['crop_shape'][:2]
    idx_x, idx_y = int(index.split('_')[0]), int(index.split('_')[1])
    _boxes = []
    _scores = []
    _classes = []
    if reverse:
        factor = -1
    else:
        factor = 1
    for box, score, cls in zip(boxes, scores, classes):
        xmin = (box[1] * w + factor * idx_x) / src_w
        ymin = (box[0] * h + factor * idx_y) / src_h
        xmax = (box[3] * w + factor * idx_x) / src_w
        ymax = (box[2] * h + factor * idx_y) / src_h
        if (xmax >= 1.0) or (ymax >= 1.0):
            continue
        _boxes.append([ymin, xmin, ymax, xmax])
        _scores.append(score)
        _classes.append(cls)
    return _boxes, _classes, _scores


def check_image_symbol(image):
    """
    检查并读入图像

    Parameters
    ----------
    image

    Returns
    -------

    """
    if isinstance(image, str):
        try:
            img = io.imread(image)
            if 2 == len(img.shape):
                raise TypeError('please input a rgb image instead of gray image !')
            if 4 == img.shape[-1]:
                img = cv2.cvtColor(img, cv2.COLOR_RGBA2RGB)
        except OSError:
            raise FileExistsError('could not read image !')
    elif isinstance(image, np.ndarray):
        img = image[:]
        if 3 == img.shape[0] or 4 == img.shape[0]:
            img = np.transpose(img, (1, 2, 0))
        if 4 == img.shape[-1]:
            img = cv2.cvtColor(img, cv2.COLOR_RGBA2RGB)
    else:
        raise TypeError('image type is wrong !')
    if not (img.shape[0] == img.shape[1] == 1280):
        img = cv2.resize(img, (1280, 1280))
    img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
    return img


def read_xml_as_eval_info(xml_path, label_list):
    tree = ET.parse(xml_path)
    root = tree.getroot()
    size = root.find('size')
    info = {}
    width = int(size.find('width').text)
    height = int(size.find('height').text)
    info['shape'] = (height, width)
    objects = []
    for obj in root.iter('object'):
        cls_name = obj.find('name').text
        if cls_name not in label_list:
            continue
        xml_box = obj.find('bndbox')
        xmin = int(xml_box.find('xmin').text) / width
        ymin = int(xml_box.find('ymin').text) / height
        xmax = int(xml_box.find('xmax').text) / width
        ymax = int(xml_box.find('ymax').text) / height
        objects.append([label_list.index(cls_name), xmin, ymin, xmax, ymax])
    info['objects'] = objects
    return info


def eval_detect_result(true_boxes, true_classes, pred_boxes, pred_classes,
                       threshold=0.8, default_class=-1):
    ground_true = []
    predictions = []
    _index = []
    for box, cls in zip(pred_boxes, pred_classes):
        index = -1
        overlap_rate = 0.0
        for idx, (t_box, t_cls) in enumerate(zip(true_boxes, true_classes)):
            if is_overlap(t_box, box):
                area = get_overlap_area(t_box, box)
                src_area = min((box[2] - box[0]) * (box[3] - box[1]),
                               (t_box[2] - t_box[0]) * (t_box[3] - t_box[1]))
                if ((area / src_area) > threshold) and (cls == t_cls):
                    if (area / src_area) > overlap_rate:
                        overlap_rate = area / src_area
                        index = idx
                        _index.append(idx)
        ground_true.append(default_class if index == -1 else cls)
        predictions.append(cls)
    for idx in range(len(true_classes)):
        if idx not in _index:
            ground_true.append(true_classes[idx])
            predictions.append(default_class)
    return ground_true, predictions