import cv2
import time
import os, os.path
import sys
import tensorflow as tf
import numpy as np
import collections
from object_detection.utils import label_map_util
import PIL.Image as Image
import PIL.ImageDraw as ImageDraw
import PIL.ImageFont as ImageFont



STANDARD_COLORS = [
    'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',
    'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',
    'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',
    'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',
    'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',
    'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',
    'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',
    'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',
    'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',
    'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',
    'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',
    'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',
    'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',
    'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',
    'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',
    'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',
    'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',
    'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',
    'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',
    'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',
    'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',
    'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',
    'WhiteSmoke', 'Yellow', 'YellowGreen'
]


def get_all_images_list_and_masks(dirs_path_list,dirs_mask_list):

    all_images_list = []
    all_masks_list = []

    for one_dir_path, one_dir_mask in zip(dirs_path_list, dirs_mask_list):
        one_dir_images_list = [os.path.join(one_dir_path, i) for i in sorted(os.listdir(one_dir_path))]
        all_images_list += one_dir_images_list
        one_dir_mask_list = [one_dir_mask] * len(one_dir_images_list)
        all_masks_list += one_dir_mask_list

    return all_images_list, all_masks_list


def load_image_into_numpy_array(image):
    (im_height, im_width, c) = image.shape
    return np.array(image).reshape(
        (im_height, im_width, 3)).astype(np.uint8)

def draw_bounding_box_on_image(image,
                               ymin,
                               xmin,
                               ymax,
                               xmax,
                               color='red',
                               thickness=4,
                               display_str_list=(),
                               use_normalized_coordinates=True):

    draw = ImageDraw.Draw(image)
    im_width, im_height = image.size
    if use_normalized_coordinates:
        (left, right, top, bottom) = (xmin * im_width, xmax * im_width,
                                  ymin * im_height, ymax * im_height)
    else:
        (left, right, top, bottom) = (xmin, xmax, ymin, ymax)
    draw.line([(left, top), (left, bottom), (right, bottom),(right, top), (left, top)], width=thickness, fill=color)
    try:
        font = ImageFont.truetype('arial.ttf', 12)
    except IOError:
        font = ImageFont.load_default()

    text_bottom = top
    # Reverse list and print from bottom to top.
    for display_str in display_str_list:
        text_width, text_height = font.getsize(display_str)
        margin = np.ceil(0.05 * text_height)
        draw.rectangle(
            [(left, text_bottom - text_height - 2 * margin), (left + text_width,
                                                              text_bottom)],
            fill=color)
        draw.text(
            (left + margin, text_bottom - text_height - margin),
            display_str,
            fill='black',
            font=font)
        left += text_width

# DRAW BOX###########################
def draw_bounding_box_on_image_array(image,
                                    ymin,
                                     xmin,
                                     ymax,
                                     xmax,
                                     color='red',
                                     thickness=4,
                                     display_str_list=(),
                                     use_normalized_coordinates=True):


    image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
    draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color,
                             thickness, display_str_list,
                             use_normalized_coordinates)
    np.copyto(image, np.array(image_pil))





def use_result(img_path
            ,image,
            boxes,
            classes,
            scores,
            category_index,
            instance_masks=None,
            keypoints=None,
            use_normalized_coordinates=False,
            max_boxes_to_draw=20,
            min_score_thresh=.5,
            agnostic_mode=False,
            line_thickness=8):

    box_to_display_str_map = collections.defaultdict(list)
    class_str_map = []
    images=image.copy()
    box_to_color_map = collections.defaultdict(str)
    box_to_instance_masks_map = {}
    box_to_keypoints_map = collections.defaultdict(list)
    if not max_boxes_to_draw:
        max_boxes_to_draw = boxes.shape[0]
    for i in range(min(max_boxes_to_draw, boxes.shape[0])):
        if scores is None or scores[i] > min_score_thresh:
            box = tuple(boxes[i].tolist())

            if instance_masks is not None:
                box_to_instance_masks_map[box] = instance_masks[i]
            if keypoints is not None:
                box_to_keypoints_map[box].extend(keypoints[i])
            if scores is None:
                box_to_color_map[box] = 'black'
            else:
                if not agnostic_mode:

                    if classes[i] in category_index.keys():
                        class_name = category_index[classes[i]]['name']
                    else:
                        class_name = 'N/A'

                    class_str = class_name
                    display_str = class_name

                else:
                    display_str = ''

                box_to_display_str_map[box].append(display_str)
                box_to_display_str_map[box].append(str(classes[i]))
                box_to_display_str_map[box].append(str(scores[i]))


                class_str_map.append(class_str)
                if agnostic_mode:
                    box_to_color_map[box] = 'DarkOrange'
                else:
                    box_to_color_map[box] = STANDARD_COLORS[
                        classes[i] % len(STANDARD_COLORS)]



    for box, color in box_to_color_map.items():
        ymin, xmin, ymax, xmax = box

        draw_bounding_box_on_image_array(
            image,
            ymin,
            xmin,
            ymax,
            xmax,
            color=color,
            thickness=line_thickness,
            display_str_list=box_to_display_str_map[box],
            use_normalized_coordinates=use_normalized_coordinates)


    #=========================================
    for box, color in box_to_color_map.items():
        print( box_to_display_str_map[box][1])
        ymin, xmin, ymax, xmax = box
        x1 = int((xmin - 0) * im_width)
        x2 = int((xmax + 0) * im_width)
        y1 = int((ymin - 0) * im_height)
        y2 = int((ymax + 0) * im_height)
        if x1 < 0: x1 = 0
        if y1 < 0: y1 = 0
        if x2 > im_width: x2 = im_width
        if y2 > im_height: y2 = im_height
        if box_to_display_str_map[box][1] == '2':
            print(box_to_display_str_map[box][1])
            cropImg = images[y1:y2, x1:x2]
            cv2.imwrite(os.path.join('E:\\process_sg\\ss',str(time.time())[-2:]+'_'+os.path.basename(img_path))[0:-8]+".jpg",cropImg)




# defined variable
# ===============================================

PATH_TO_OCR_LABELS = 'E:\\process_sg\\shanggang\\label_map.pbtxt'

DETECT_TH = 0.7

path_0 = 'E:\\process_sg\\Untitled Folder'


dirs_path_list = [path_0]

dirs_mask_list = [0]


# just worry:0   show_all_image:1
show_image = 1

# MODEL = '/media/ai18/4846bb03-289b-45f9-b333-5da500b78e62/shared/LiAng/temporary_model/200417/shanggang/2004162002_OD_plate_shanggang_T1_G1_DSG1_98955.pb'
MODEL = 'E:\\process_sg\\shanggang\\2004202034_OD_plate_shanggang_T1_G2_DSGHAB2_99857.pb'
# ========================================

seal_label_map = label_map_util.load_labelmap(PATH_TO_OCR_LABELS)
seal_categories = label_map_util.convert_label_map_to_categories(seal_label_map, max_num_classes=sys.maxsize, use_display_name=True)
SEAL_CATEGORY_INDEX = label_map_util.create_category_index(seal_categories)


# read model and create sess

truck_detection_graph = tf.Graph()
with truck_detection_graph.as_default():
    od_graph_def = tf.GraphDef()
    with tf.gfile.GFile(MODEL, 'rb') as fid:
        serialized_graph = fid.read()
        od_graph_def.ParseFromString(serialized_graph)
        tf.import_graph_def(od_graph_def, name='')

print('detecting...')
with truck_detection_graph.as_default():
    with tf.Session(graph=truck_detection_graph) as sess:
        image_tensor = truck_detection_graph.get_tensor_by_name('image_tensor:0')
        detection_boxes = truck_detection_graph.get_tensor_by_name('detection_boxes:0')
        detection_scores = truck_detection_graph.get_tensor_by_name('detection_scores:0')
        detection_classes = truck_detection_graph.get_tensor_by_name('detection_classes:0')
        num_detections = truck_detection_graph.get_tensor_by_name('num_detections:0')



        # get images/masks list

        all_images_list, all_masks_list = get_all_images_list_and_masks(dirs_path_list, dirs_mask_list)


        # start test

        right_num = 0

        for img, mask in zip(all_images_list, all_masks_list):
            image = cv2.imread(img)
            if not image.shape:
                continue
            im_height, im_width, c = image.shape
            image_np = load_image_into_numpy_array(image)
            image_np_expanded = np.expand_dims(image_np, axis=0)

            (boxes, scores, classes, num) = sess.run(
                [detection_boxes, detection_scores, detection_classes, num_detections],
                feed_dict={image_tensor: image_np_expanded})

            # use result

            image_mask = use_result(img,image_np,
                                    np.squeeze(boxes),
                                    np.squeeze(classes).astype(np.int32),
                                    np.squeeze(scores),
                                    SEAL_CATEGORY_INDEX,
                                    min_score_thresh=DETECT_TH,
                                    use_normalized_coordinates=True,
                                    line_thickness=8)

            if image_mask == mask:
                right_num += 1

            print('   ', img.split('/')[-1], image_mask, mask)
            print()
            image_np = cv2.resize(image_np,(640,340))
            if show_image:
                cv2.imshow('all', image_np)
                cv2.waitKey(1)
            else:
                if image_mask != mask:
                    cv2.imshow('worry', image_np)
                    cv2.waitKey(1)

        print('--P: ', round((right_num / len(all_masks_list)), 4))

