import os
import argparse
import time
from functools import reduce

import cv2
import numpy as np
import tensorflow as tf
from object_detection.utils import visualization_utils as vis_util
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score

from beer.data.tools import check_xml_and_img_file
from utils.cropper import ImageDictCropper
from utils.detector import run_detection
from utils.evaluator import convert_region_box_to_global
from utils.evaluator import eval_detect_result
from utils.evaluator import merge_region_prediction
from utils.evaluator import read_xml_as_eval_info
from utils.io import get_label_from_pd_file
from utils.io import get_label_list_from_category_index
from utils.list import create_file_list


def parse_args():
    parser = argparse.ArgumentParser(
        description='predictions parameters')
    parser.add_argument(
        '--root',
        dest='root',
        help='image to use',
        default='',
        type=str)
    parser.add_argument(
        '--image-path',
        dest='image_path',
        help='path of image to use in root, split by .',
        default='',
        type=str)
    parser.add_argument(
        '--checkpoint',
        dest='checkpoint',
        help='path to checkpoint',
        default='',
        type=str)
    parser.add_argument(
        '--output-root',
        dest='output',
        help='output root',
        default='',
        type=str)
    parser.add_argument(
        '--score',
        dest='score',
        help='score threshold',
        default=0.3,
        type=float)
    parser.add_argument(
        '--percent',
        dest='percent',
        help='area percent threshold',
        default=0.8,
        type=float)
    parser.add_argument(
        '--image-list',
        dest='image_list',
        help='path of image to use',
        default='',
        type=str)
    parser.add_argument(
        '--class-num',
        dest='class_num',
        help='number of classes',
        default=9,
        type=int)
    parser.add_argument(
        '--instance',
        dest='instance',
        help='required instance',
        default=0,
        type=int)
    parser.add_argument(
        '--label-file',
        dest='label_file',
        help='path of image to use',
        default='',
        type=str)
    return parser.parse_args()


def predict_image(output_root, checkpoint, label_file, image_lists, score, percent):
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(checkpoint, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    category_index = get_label_from_pd_file(label_file, args.class_num)
    label_list = get_label_list_from_category_index(category_index)
    ground_true = []
    predictions = []
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph, config=config) as sess:
            start_time = time.time()
            print(time.ctime())
            for idx, paths in enumerate(image_lists):
                print('predicting {} of {} images'.format(idx, len(image_lists)))
                img_path, xml_path = paths.split('&!&')
                img_path = img_path.strip()
                info = read_xml_as_eval_info(xml_path, label_list)
                cropper = ImageDictCropper(img_path)
                info['crop_shape'] = cropper.cropped_size
                cropper.update()
                images = cropper.get_images()
                _boxes = []
                _scores = []
                _classes = []
                for key, value in images.items():
                    image_np = np.array(value[:, :, (2, 1, 0)]).astype(np.uint8)
                    image_np = np.expand_dims(image_np, axis=0)
                    boxes, classes, scores = run_detection(sess, detection_graph, image_np)
                    boxes, classes, scores = convert_region_box_to_global(info, boxes, classes, scores, key)
                    _boxes += boxes
                    _classes += classes
                    _scores += scores
                _boxes, _classes, _scores = merge_region_prediction(
                    np.array(_boxes), np.array(_scores), np.array(_classes), percent)
                _boxes = np.array(_boxes)
                _classes = np.array(_classes).astype(np.int32)
                _scores = np.array(_scores)
                image = cropper.image.copy()
                cv2.imwrite(os.path.join(output_root, '{}_origin.jpg'.format(idx)), image)
                vis_util.visualize_boxes_and_labels_on_image_array(
                    image,
                    _boxes,
                    _classes,
                    _scores,
                    category_index,
                    use_normalized_coordinates=True,
                    line_thickness=1)
                cv2.imwrite(os.path.join(output_root, '{}_predicted.jpg'.format(idx)), image)
                comb = filter(lambda x: x[-1] >= score, zip(_boxes, _classes, _scores))
                _boxes, _classes, _ = np.hsplit(np.array(list(comb)), [1, 2])
                _classes = list(_classes.flatten())
                _boxes = list(map(lambda x: list(x[0]), _boxes))
                objects_info = read_xml_as_eval_info(xml_path, label_list)['objects']
                gt_classes, gt_boxes = np.hsplit(np.array(objects_info), [1])
                if len(objects_info) == 0:
                    gt_boxes = []
                    gt_classes = []
                else:
                    gt_classes = list(gt_classes.flatten() + 1)
                    gt_boxes = gt_boxes[:, (1, 0, 3, 2)]
                    gt_boxes = list(map(lambda x: list(x), gt_boxes))
                gt, pred = eval_detect_result(gt_boxes, gt_classes,
                                              _boxes, _classes, default_class=0)
                ground_true += gt
                predictions += pred
                print('{} elapsed time: {:.3f}s'.format(time.ctime(),
                                                        time.time() - start_time))
    print(ground_true)
    print(predictions)
    recall = recall_score(ground_true, predictions, average='weighted')
    print(recall)
    precision = precision_score(ground_true, predictions, average='weighted')
    print(precision)


def process():
    image_root = reduce(lambda x, y: os.path.join(x, y), args.image_path.split('.'), args.root)
    image_lists, _ = create_file_list(image_root, filtering=check_xml_and_img_file, params=[[], '&!&'])
    output_root = args.root if args.output == '' else args.output
    score = args.score
    percent = args.percent
    if not os.path.exists(output_root):
        os.makedirs(output_root)
    predict_image(output_root, args.checkpoint,
                  args.label_file, image_lists, score, percent)


if __name__ == '__main__':
    args = parse_args()
    process()
