# -*- coding:utf-8 -*-

from __future__ import absolute_import
from __future__ import print_function
from __future__ import division

import os, sys
import tensorflow as tf
import time
import cv2
import argparse
import numpy as np
sys.path.append("./")

from data.io.image_preprocess import short_side_resize_for_inference_data
# sys.path.append("../libs/")
from libs.configs import cfgs
from libs.networks import build_whole_network
from libs.box_utils import draw_box_in_img
from help_utils import tools
from libs.label_name_dict.label_dict import LABEl_NAME_MAP


'''人为构造xml文件的格式'''
out0 ='''<annotation>
    <folder>%(folder)s</folder>
    <filename>%(name)s</filename>
    <path>%(path)s</path>
    <source>
        <database>Unknown</database>
    </source>
    <size>
        <width>%(width)d</width>
        <height>%(height)d</height>
        <depth>1</depth>
    </size>
    <segmented>0</segmented>
'''
out1 = '''    <object>
        <name>%(class)s</name>
        <pose>Unspecified</pose>
        <truncated>0</truncated>
        <difficult>0</difficult>
        <bndbox>
            <xmin>%(xmin)d</xmin>
            <ymin>%(ymin)d</ymin>
            <xmax>%(xmax)d</xmax>
            <ymax>%(ymax)d</ymax>
        </bndbox>
    </object>
'''

out2 = '''</annotation>
'''


def translate(fdir, xml_dir, img_path, boxes, labels):
    """
    Args:
        fdir:
        xml_dir: xml data pate, Annotations
        img_path: singel image path, *.jpg
        boxes: all boxes in single image
        labels: labels for above mentioned boxes

    Returns:

    """
    source = {}
    label = {}
    image = cv2.imread(img_path)
    h, w, c = image.shape

    image_name = img_path.split("/")[-1]
    xml_name = image_name.replace(".jpg", ".xml")
    # print(xml_name)
    fxml = os.path.join(xml_dir, xml_name)
    fxml = open(fxml, "w")
    source['name'] = image_name
    source['path'] = img_path
    source['folder'] = "JPEGImages"

    source['width'] = w
    source['height'] = h
    source['depth'] = c

    fxml.write(out0 % source)
    for i, box in enumerate(boxes):
        print(box)
        # print(box.shape)
        # if box.shape != (5,):
        #     box = lines
        label['class'] = LABEl_NAME_MAP[labels[i]]
        label['xmin'] = box[0]
        label['ymin'] = box[1]
        label['xmax'] = box[2]
        label['ymax'] = box[3]

        # if label['xmin']>=w or label['ymin']>=h or label['xmax']>=w or label['ymax']>=h:
        #     continue
        # if label['xmin']<0 or label['ymin']<0 or label['xmax']<0 or label['ymax']<0:
        #     continue

        fxml.write(out1 % label)
    fxml.write(out2)
    fxml.close()


def detect(det_net, inference_save_path, real_test_imgname_list, save_xml):

    # 1. preprocess img
    img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None, 3])  # is RGB. not GBR
    img_batch = tf.cast(img_plac, tf.float32)
    img_batch = short_side_resize_for_inference_data(img_tensor=img_batch,
                                                     target_shortside_len=cfgs.IMG_SHORT_SIDE_LEN,
                                                     length_limitation=cfgs.IMG_MAX_LENGTH)
    #img_batch = tf.image.adjust_brightness(img_batch, 0.3)
    #img_batch = tf.clip_by_value(img_batch, 0, 255)
    img_batch = img_batch - tf.constant(cfgs.PIXEL_MEAN)

    img_batch = tf.expand_dims(img_batch, axis=0)  # [1, None, None, 3]

    detection_boxes, detection_scores, detection_category = det_net.build_whole_detection_network(
        input_img_batch=img_batch,
        gtboxes_batch=None)

    init_op = tf.group(
        tf.global_variables_initializer(),
        tf.local_variables_initializer()
    )

    restorer, restore_ckpt = det_net.get_restorer()

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        sess.run(init_op)
        if not restorer is None:
            restorer.restore(sess, restore_ckpt)
            print('restore model')

        for i, a_img_name in enumerate(real_test_imgname_list):
            print("\n\n")
            print(a_img_name)


            raw_img = cv2.imread(a_img_name)
            img_shape = raw_img.shape
            start = time.time()
            resized_img, detected_boxes, detected_scores, detected_categories = \
                sess.run(
                    [img_batch, detection_boxes, detection_scores, detection_category],
                    feed_dict={img_plac: raw_img[:, :, ::-1]}  # cv is BGR. But need RGB
                )
            end = time.time()
            # print("{} cost time : {} ".format(img_name, (end - start)))

            print("detected_boxes:{}\ndetected_scores:{}\ndetected_categories:{}".format(detected_boxes.shape,detected_scores.shape,detected_categories.shape))

            show_indices = detected_scores >= cfgs.SHOW_SCORE_THRSHOLD
            show_scores = detected_scores[show_indices]
            show_boxes = detected_boxes[show_indices]
            show_categories = detected_categories[show_indices]
            final_detections = draw_box_in_img.draw_boxes_with_label_and_scores(np.squeeze(resized_img, 0),
                                                                                boxes=show_boxes,
                                                                                labels=show_categories,
                                                                                scores=show_scores)
            nake_name = a_img_name.split('/')[-1]
            # print (inference_save_path + '/' + nake_name)
            positive_path = os.path.join(inference_save_path, 'positive')
            negative_path = os.path.join(inference_save_path, 'negative')
            xml_path = os.path.join(inference_save_path, 'Annotations_tive')
            if save_xml:
                if not os.path.exists(xml_path):
                    os.mkdir(xml_path)
                translate(fdir=None, xml_dir=xml_path, img_path=a_img_name, boxes=show_boxes, labels=show_categories)
            if not os.path.exists(positive_path):
                os.mkdir(positive_path)
            if not os.path.exists(negative_path):
                os.mkdir(negative_path)
            if any(show_scores):
                cv2.imwrite(negative_path + '/' + nake_name,
                            final_detections[:, :, ::-1])
            else:
                cv2.imwrite(positive_path + '/' + nake_name,
                            final_detections[:, :, ::-1])
            print('final_detections\ncategory:{}\nscore:{}\nbox:{}'.format(show_categories, show_scores, show_boxes))
            tools.view_bar('{} image cost {}s'.format(a_img_name, (end - start)), i + 1, len(real_test_imgname_list))


def inference(test_dir, inference_save_path, save_xml=False):

    test_imgname_list = [os.path.join(test_dir, img_name) for img_name in os.listdir(test_dir)
                                                          if img_name.endswith(('.jpg', '.png', '.jpeg', '.tif', '.tiff'))]
    assert len(test_imgname_list) != 0, 'test_dir has no imgs there.' \
                                        ' Note that, we only support img format of (.jpg, .png, and .tiff) '

    faster_rcnn = build_whole_network.DetectionNetwork(base_network_name=cfgs.NET_NAME, is_training=False)
    detect(det_net=faster_rcnn, inference_save_path=inference_save_path, real_test_imgname_list=test_imgname_list,
           save_xml=save_xml)


def parse_args():
    """
    Parse input arguments
    """
    parser = argparse.ArgumentParser(description='TestImgs...U need provide the test dir')
    parser.add_argument('--data_dir', dest='data_dir', help='data path', default='demos', type=str)
    #parser.add_argument('--save_dir', dest='save_dir',
    #                    help='demo imgs to save',
    #                    default='inference_results', type=str)
    parser.add_argument('--GPU', dest='GPU', help='gpu id ', default='0', type=str)
    parser.add_argument('--save_xml', action='store_true', help='save results to *.xml')
    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(1)

    args = parser.parse_args()

    return args

if __name__ == '__main__':

    args = parse_args()
    print('Called with args:')
    print(args)
    os.environ["CUDA_VISIBLE_DEVICES"] = args.GPU
    inference(args.data_dir, inference_save_path=args.data_dir, save_xml=args.save_xml)
