# This kit is for testing the generated model, check the accuracy of the labeled objects vs the
# given labeled image which used for training the model.
import io
import os
import sys

import PIL.Image as Image
import numpy as np
import tensorflow as tf
from object_detection.utils import label_map_util
from tensorflow.python.lib.io.file_io import FileIO

import tfrecord_decoder
import visualization_utils

sys.path.append("..")
flags = tf.app.flags

flags.DEFINE_string("model_path", 'D:/work/workspace/test_model/helmet_fire_test_1/good/frozen_inference_graph.pb',
                    "The model used for training.")
flags.DEFINE_string('path_to_labels',
                    'D:/work/workspace/test_model/helmet_fire_test_1/helmet_fire.pbtxt',
                    "The path for label mapping .pbtxt file")
flags.DEFINE_string('tf_record_path', 'D:/work/workspace/helmet_fire.record',
                    "The path to the config file.")
flags.DEFINE_integer('epoch', 100,
                     'The number of samples to test in a single run.')
flags.DEFINE_integer('test_buffer_size', 1000,
                     'The buffer size for testing within the TF record.')
FLAGS = flags.FLAGS

PATH_TO_CKPT = FLAGS.model_path
TFRECORD_FILE = FLAGS.tf_record_path
TESTING_EPOCH = FLAGS.epoch
BUFFER_SIZE = FLAGS.test_buffer_size

NUM_CLASSES = 90


def calculate_box_size(box, img_height, img_width):
    normalized_boxes = []
    ymin, xmin, ymax, xmax = box
    normalized_ymin = ymin * img_height
    normalized_xmin = xmin * img_width
    normalized_ymax = ymax * img_height
    normalized_xmax = xmax * img_width
    normalized_boxes.append((normalized_ymin, normalized_xmin, normalized_ymax,
                             normalized_xmax))
    return normalized_boxes


def convert_box_values(boxes, img_height, img_width):
    normalized_boxes = []
    for box in boxes:
        normalized_boxes.append(calculate_box_size(box, img_height, img_width))
    return normalized_boxes


# This converts the result boxes to normal box data with x, y, length, height
def convert_to_normalized_boxes(input_box, img_height, img_width):
    normalized_boxes = []
    texts = []
    for box, text in input_box.items():
        normalized_boxes.append(calculate_box_size(box, img_height, img_width))
        texts.append(text)
    return normalized_boxes, texts


def convert_tensor_dataset_to_box_and_text(e_ymin, e_xmin, e_ymax, e_xmax, e_text):
    length = e_ymax[1].__len__()
    boxes = []
    texts = []
    for i in range(length):
        boxes.append((e_ymin[1][i], e_xmin[1][i], e_ymax[1][i], e_xmax[1][i]))
        texts.append(e_text[1][i])
    return boxes, texts


# The accuracy is calculated with the cross area of the generated label with the given area.
def calculate_accuracy(groundtruth_boxes, normalized_boxes, groundtruth_text, texts):
    image_accuracy = []
    for i in range(groundtruth_boxes.__len__()):
        max_accuracy = -1
        label_box = groundtruth_boxes[i]
        text = groundtruth_text[i].decode('utf-8')
        ymin, xmin, ymax, xmax = label_box[0]
        xmin = float(xmin)
        ymin = float(ymin)
        xmax = float(xmax)
        ymax = float(ymax)
        for j in range(normalized_boxes.__len__()):
            test_box = normalized_boxes[j]
            test_text = str(texts[j][0])
            test_text = test_text[:test_text.index(':')]
            t_ymin, t_xmin, t_ymax, t_xmax = test_box[0]
            x_axis = [xmin, xmax, t_xmin, t_xmax]
            y_axis = [ymin, ymax, t_ymin, t_ymax]

            x_axis.sort()
            y_axis.sort()
            if x_axis[2] != t_xmin \
                    and y_axis[2] != t_ymin \
                    and x_axis[1] != t_xmax \
                    and y_axis[1] != t_ymax \
                    and test_text == text:
                base_height = ymax - ymin
                base_width = xmax - xmin

                cross_width = x_axis[2] - x_axis[1]
                cross_height = y_axis[2] - y_axis[1]
                cross_area = cross_width * cross_height

                accuracy = cross_area / (base_height * base_width * 2 - cross_area)
                if accuracy > max_accuracy:
                    max_accuracy = accuracy
            else:
                continue
        if max_accuracy > 0:
            image_accuracy.append(max_accuracy)
        else:
            image_accuracy.append(0)

    return 0 if image_accuracy.__len__() == 0 else sum(image_accuracy) / image_accuracy.__len__()


def calculate_average_accuracy(total_accuracy_indices):
    return sum(total_accuracy_indices) / total_accuracy_indices.__len__()


def process_accuracy(filename, height, width, groundtruth_boxes, groundtruth_text, box_to_display_map):
    normalized_boxes, texts = convert_to_normalized_boxes(box_to_display_map, float(height), float(width))
    normalized_groundtruth_boxes = convert_box_values(groundtruth_boxes, float(height), float(width))

    single_img_accuracy = calculate_accuracy(normalized_groundtruth_boxes, normalized_boxes, groundtruth_text, texts)
    print("File Name:" + filename.decode('utf-8')[:-4] + "\tAccuracy:" + str(single_img_accuracy))
    return single_img_accuracy


def load_image_into_numpy_array(image):
    (im_width, im_height) = image.size
    return np.array(image.getdata()).reshape(
        (im_height, im_width, 3)).astype(np.uint8)


total_accuracy = []


def draw_boxes_on_image_array(image, groundtruth_boxes, groundtruth_text):
    texts = []
    for text in groundtruth_text:
        text = text.decode('utf-8')
        texts.append(text)
    for i in range(groundtruth_boxes.__len__()):
        ymin, xmin, ymax, xmax = groundtruth_boxes[i]
        image_pil = Image.fromarray(image).convert('RGB')
        visualization_utils.draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax,
                                                       color='red', thickness=4,
                                                       display_str_list=texts[i], use_normalized_coordinates=True)
        np.copyto(image, np.array(image_pil))


def process_testing(tfrecord_file, buffer_size, epoch, path_to_ckpt, path_to_labels):
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with FileIO(path_to_ckpt, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')
            label_map = label_map_util.load_labelmap(path_to_labels)
            categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,
                                                                        use_display_name=True)
            category_index = label_map_util.create_category_index(categories)

    with detection_graph.as_default():
        dataset = tf.data.TFRecordDataset(tfrecord_file)
        dataset = dataset.shuffle(buffer_size=buffer_size)
        iterator = dataset.make_one_shot_iterator()
        element = iterator.get_next()
        with tf.Session(graph=detection_graph) as sess:
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
            detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
            detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
            detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
            num_detections = detection_graph.get_tensor_by_name('num_detections:0')

            for i in range(epoch):
                example = sess.run(element)
                image, height, width, filename, xmin, xmax, ymin, ymax, text \
                    = tfrecord_decoder.decode_example(example)
                e_image, e_height, e_width, e_filename, e_xmin, e_xmax, e_ymin, e_ymax, e_text = \
                    sess.run([image, height, width, filename, xmin, xmax, ymin, ymax, text])

                groundtruth_boxes, groundtruth_text = \
                    convert_tensor_dataset_to_box_and_text(e_ymin, e_xmin, e_ymax, e_xmax, e_text)

                image_io = io.BytesIO(e_image)
                image_data = Image.open(image_io)
                image_byte = load_image_into_numpy_array(image_data)
                image_np_expanded = np.expand_dims(image_byte, axis=0)

                # Do the prediction and returns the label boxes.
                (boxes, scores, classes, num) = sess.run(
                    [detection_boxes, detection_scores, detection_classes, num_detections],
                    feed_dict={image_tensor: image_np_expanded})

                box_to_display_str_map = visualization_utils.visualize_boxes_and_labels_on_image_array(
                    image_byte,
                    np.squeeze(boxes),
                    np.squeeze(classes).astype(np.int32),
                    np.squeeze(scores),
                    category_index,
                    use_normalized_coordinates=True,
                    line_thickness=8)

                draw_boxes_on_image_array(image_byte, groundtruth_boxes, groundtruth_text)

                # Calculate the accuracy.
                accuracy = process_accuracy(e_filename, e_height, e_width, groundtruth_boxes, groundtruth_text,
                                            box_to_display_str_map)
                total_accuracy.append(accuracy)
                result = Image.fromarray(image_byte)
                result_file_name = e_filename.decode('utf-8')[:-4] + '_acc_' + str(accuracy) + '.jpeg'
                io.FileIO(result_file_name, 'w')
                result.save(result_file_name, 'jpeg')
                os.rename(result_file_name, '.\\result\\' + result_file_name)

    accuracy_sum = sum(total_accuracy)

    print("=========================================================")
    print("Total Average Accuracy:" + str(accuracy_sum / total_accuracy.__len__() if accuracy_sum > 0 else 0))


def main():
    process_testing(TFRECORD_FILE,
                    BUFFER_SIZE,
                    TESTING_EPOCH,
                    PATH_TO_CKPT,
                    FLAGS.path_to_labels)


if __name__ == '__main__':
    main()
