#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov  4 15:05:09 2017

@author: shirhe-lyh
"""
import os

import cv2
import numpy as np
import tensorflow as tf
from PIL import Image


def run(model_path, images_dir):
    # --------------Model preparation----------------
    # Path to frozen detection graph. This is the actual model that is used for
    # the object detection.

    # PATH_TO_CKPT = 'saved_model/saved_model.pb'

    # Load a (frozen) Tensorflow model into memory
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(model_path, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

    image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
    # Each box represents a part of the image where a particular
    # object was detected.
    gboxes = detection_graph.get_tensor_by_name('detection_boxes:0')
    # Each score represent how level of confidence for each of the objects.
    # Score is shown on the result image, together with the class label.
    gscores = detection_graph.get_tensor_by_name('detection_scores:0')
    gclasses = detection_graph.get_tensor_by_name('detection_classes:0')
    gnum_detections = detection_graph.get_tensor_by_name('num_detections:0')

    images_path = pathlib.Path(images_dir)
    images_path = sorted(list(images_path.glob("*.jpg")))

    results = []

    with detection_graph.as_default():
        with tf.Session(graph=detection_graph) as sess:
            print('start')
            for image_path in images_path:
                print(image_path, file=logfile)
                # image = cv2.imread(image_path)
                # Expand dimensions since the model expects images to have
                # shape: [1, None, None, 3]
                print(image_path, file=logfile)
                image = np.array(Image.open(image_path))
                image_np_expanded = np.expand_dims(image, axis=0)

                # Actual detection.
                (boxes, scores, classes, num_detections) = sess.run(
                    [gboxes, gscores, gclasses, gnum_detections],
                    feed_dict={image_tensor: image_np_expanded})

                # Visualization of the results of a detection.
                boxes = np.squeeze(boxes)
                scores = np.squeeze(scores)
                height, width = image.shape[:2]

                max_score = 0.0
                max_box = None

                for i in range(boxes.shape[0]):
                    if (scores is None or
                            scores[i] > 0.9):
                        ymin, xmin, ymax, xmax = boxes[i]
                        ymin = int(ymin * height)
                        ymax = int(ymax * height)
                        xmin = int(xmin * width)
                        xmax = int(xmax * width)

                        score = None if scores is None else scores[i]
                        font = cv2.FONT_HERSHEY_SIMPLEX
                        text_x = np.max((0, xmin - 10))
                        text_y = np.max((0, ymin - 10))
                        cv2.putText(image, 'Detection score: ' + str(score),
                                    (text_x, text_y), font, 0.4, (0, 255, 0))
                        cv2.rectangle(image, (xmin, ymin), (xmax, ymax),
                                      (0, 255, 0), 2)
                        if max_score < scores[i]:
                            max_box = [xmin, ymin, xmax, ymax]
                            max_score = scores[i]

                        print('{}:{}:({} {} {} {})'.format(image_path, score, xmin, ymin, xmax, ymax), file=logfile)
                _, n = os.path.split(image_path)
                results.append([n, max_score, max_box])
    return results


import pathlib
import sys
import getopt


def print_help():
    print('--images/-i: input images dir, only for jpg')
    print('--model/-m: model file')
    print('--result/-: output file')
    print('--help/-h: help')


def match(ref, rect):
    if ref[0] > rect[2] or ref[2] < rect[0]:
        return False
    if ref[1] > rect[3] or ref[3] < rect[1]:
        return False
    return True


def analysis(data, extend_num=2):
    results = []
    scores = []
    files = []
    locations = []
    orglocation = []
    for item in data:
        files.append(item[0])
        scores.append(item[1])
        if item[2] is not None and len(item[2]) > 0:
            locations.append(item[2])
            orglocation.append(item[2])
        else:
            orglocation.append([])

    locations = np.array(locations)
    xmin = np.sort(locations[:, 0])[int(locations.shape[0]/10)]
    ymin = np.sort(locations[:, 1])[int(locations.shape[0]/10)]
    xmax = np.sort(locations[:, 2])[int(locations.shape[0]*9/10)]
    ymax = np.sort(locations[:, 3])[int(locations.shape[0]*9/10)]

    ref = [xmin, ymin, xmax, ymax]

    for i in range(len(scores)):
        if scores[i] > 0.9 and match(ref, orglocation[i]):
            scores[i] = scores[i]
        else:
            scores[i] = 0.0

    ref_scores = np.array(scores)
    for i in range(len(scores)):
        start_index = max(0, i - extend_num)
        end_index = min(i + extend_num, len(scores) - 1)
        if np.sum(ref_scores[start_index:end_index]) <= extend_num:
            scores[i] = 0.0
        else:
            scores[i] = 1.0

    ref_scores = np.array(scores)
    for i in range(len(scores)):
        start_index = max(0, i - extend_num)
        end_index = min(i + extend_num, len(scores) - 1)
        if np.sum(ref_scores[start_index:end_index]) <= extend_num:
            scores[i] = 0.0
        else:
            scores[i] = 1.0

    for i in range(1, len(files)):
        if scores[i-1] < 0.5 and scores[i] > 0.5:
            results.append("{}:{}".format(files[i], scores[i]))
    return results


def write_result(result_file, data):
    with open(result_file, 'w', encoding='utf8') as fp:
        fp.write('\n'.join(data))


global logfile


def read_result(filename):
    results = []
    with open(filename, 'r') as fp:
        imagefile = None
        score = 0.0
        location = []
        for line in fp.readlines():
            arr = line.strip().split(':')
            if imagefile is not None and imagefile != arr[0]:
                results.append([os.path.split(imagefile)[1], score, location])
                score = 0.0
                location = []
            imagefile = arr[0]
            if len(arr) > 2:
                if float(arr[1]) > score:
                    score = float(arr[1])
                    location = list(map(lambda x : float(x), (arr[2].replace('(', '').replace(')', '').split(' '))))
    return results


if __name__ == '__main__':
    pyfile = sys.argv[0]
    pwd, filename = os.path.split(pyfile)
    os.chdir(pwd)
    global logfile
    logfile = open('detection.log', 'a+')
    optlist, args = getopt.getopt(sys.argv[1:], 'i:m:r:h:f:', ['images=', 'model=', 'result=', 'help=', 'fps='])
    images_dir = 'images'
    model_file = 'frozen_inference_graph.pb'
    result_file = 'output.txt'
    fps = 1
    for k, v in optlist:
        if k in ['-h', '--help']:
            print_help()
        if k in ['-i', '--images']:
            images_dir = v
        if k in ['-m', '--model']:
            model_file = v
        if k in ['-r', '--result']:
            result_file = v
        if k in ['-f', '--fps']:
            fps = float(v)

    results = run(model_file, images_dir) #read_result('E:/output.txt') #
    data = analysis(results, int(fps*2))
    write_result(result_file, data)
    logfile.close()

