import tensorflow as tf
from PIL import Image, ImageFont, ImageDraw
import numpy as np
import cv2
import os
from absl import app, flags
from absl.flags import FLAGS
from termcolor import colored

from models.yolonet import *
from tools.dataset import Dataset

INFO = colored('[ INFO  ]', 'blue')
ERROR = colored('[ ERROR ]', 'red')
NOTE = colored('[ NOTE ]', 'green')

flags.DEFINE_string('dataset', 'coco', 'type of dataset')
flags.DEFINE_string('ckpt_weights', './log/coco/yolo_model.h5', 'path to model weight file')
flags.DEFINE_string('model_type', 'yolo_mobilev1', 'type of model')
flags.DEFINE_string('test_dir', '/dataset/COCO/2017/images/val2017/', 'dir of test images')

flags.DEFINE_integer('img_h', 224, 'image height')
flags.DEFINE_integer('img_w', 320, 'image width')
flags.DEFINE_integer('batch_size', 1, 'batch size')
flags.DEFINE_integer('class_num', 80, 'number of classes in the model')

flags.DEFINE_float('alpha', 0.5, 'alpha of mobilenet')
flags.DEFINE_float('iou_thresh', 0.3, 'iou thresh')
flags.DEFINE_float('obj_thresh', 0.7, 'obj thresh')

colormap = [
    (255, 82, 0), (0, 255, 245), (0, 61, 255), (0, 255, 112), (0, 255, 133),
    (255, 0, 0), (255, 163, 0), (255, 102, 0), (194, 255, 0), (0, 143, 255),
    (51, 255, 0), (0, 82, 255), (0, 255, 41), (0, 255, 173), (10, 0, 255),
    (173, 255, 0), (0, 255, 153), (255, 92, 0), (255, 0, 255), (255, 0, 245),
    (128, 0, 0), (0, 128, 0), (128, 128, 0), (0, 0, 128), (128, 0, 128),
    (0, 128, 128), (128, 128, 128), (64, 0, 0), (192, 0, 0), (64, 128, 0),
    (192, 128, 0), (64, 0, 128), (192, 0, 128), (64, 128, 128), (192, 128, 128),
    (0, 64, 0), (128, 64, 0), (0, 192, 0), (128, 192, 0), (0, 64, 128),
    (61, 230, 250), (255, 6, 51), (11, 102, 255), (255, 7, 71), (255, 9, 224),
    (9, 7, 230), (220, 220, 220), (255, 9, 92), (112, 9, 255), (8, 255, 214),
    (7, 255, 224), (255, 184, 6), (10, 255, 71), (255, 41, 10), (7, 255, 255),
    (224, 255, 8), (102, 8, 255), (255, 61, 6), (255, 194, 7), (255, 122, 8),
    (0, 255, 20), (255, 8, 41), (255, 5, 153), (6, 51, 255), (235, 12, 255),
    (160, 150, 20), (0, 163, 255), (140, 140, 140), (250, 10, 15), (20, 255, 0),
    (31, 255, 0), (255, 31, 0), (255, 224, 0), (153, 255, 0), (0, 0, 255),
    (255, 71, 0), (0, 235, 255), (0, 173, 255), (31, 0, 255), (11, 200, 200)]


def correct_box(box_xy: tf.Tensor, box_wh: tf.Tensor, input_shape: list, image_shape: list) -> tf.Tensor:
    """rescae predict box to orginal image scale

    Parameters
    ----------
    box_xy : tf.Tensor
        box xy
    box_wh : tf.Tensor
        box wh
    input_shape : list
        input shape
    image_shape : list
        image shape

    Returns
    -------
    tf.Tensor
        new boxes
    """
    box_yx = box_xy[..., ::-1]
    box_hw = box_wh[..., ::-1]
    input_shape = tf.cast(input_shape, tf.float32)
    image_shape = tf.cast(image_shape, tf.float32)
    new_shape = tf.round(image_shape * tf.reduce_min(input_shape / image_shape))
    offset = (input_shape - new_shape) / 2. / input_shape
    scale = input_shape / new_shape
    box_yx = (box_yx - offset) * scale
    box_hw *= scale

    box_mins = box_yx - (box_hw / 2.)
    box_maxes = box_yx + (box_hw / 2.)
    boxes = tf.concat([
        box_mins[..., 0:1],  # y_min
        box_mins[..., 1:2],  # x_min
        box_maxes[..., 0:1],  # y_max
        box_maxes[..., 1:2]  # x_max
    ], axis=-1)

    # Scale boxes back to original image shape.
    boxes *= tf.concat([image_shape, image_shape], axis=-1)
    return boxes


def convert_box(pred_xy, pred_wh, grid_size, anchors):
    grid = tf.meshgrid(tf.range(grid_size[1]), tf.range(grid_size[0]))
    grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2)

    pred_xy = tf.sigmoid(pred_xy)
    pred_xy = (pred_xy + tf.cast(grid, tf.float32)) / \
             tf.cast([grid_size[1], grid_size[0]], tf.float32)
    pred_wh = tf.exp(pred_wh) * anchors

    return pred_xy, pred_wh


def main(_argv):
    gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
    tf.config.experimental.set_visible_devices(devices=gpus[0], device_type='GPU')

    name_dir = 'data/{}/{}.names'.format(FLAGS.dataset.lower(), FLAGS.dataset.lower())
    f = open(name_dir, 'r')
    class_names = f.readlines()
    class_names = [class_name.strip('\n') for class_name in class_names]

    dataset_dir = 'data/{}/{}_img_ann.npy'.format(FLAGS.dataset.lower(), FLAGS.dataset.lower())
    anchor_dir = 'data/{}/{}_anchor.npy'.format(FLAGS.dataset.lower(), FLAGS.dataset.lower())
    ds = Dataset(dataset_dir, anchor_dir, FLAGS.batch_size, FLAGS.class_num, (FLAGS.img_h, FLAGS.img_w))

    network = eval(FLAGS.model_type)  # type :yolo_mobilev2
    yolo_model, yolo_model_warpper = network([FLAGS.img_h, FLAGS.img_w, 3], len(ds.anchor_masks[0]),
                                             FLAGS.class_num, alpha=FLAGS.alpha)
    yolo_model_warpper.summary()
    yolo_model_warpper.load_weights(str(FLAGS.ckpt_weights))

    print(INFO, f' Load CKPT {str(FLAGS.ckpt_weights)}')

    keras.models.save_model(yolo_model, FLAGS.ckpt_weights)

    img_dir = []
    if os.path.isfile(FLAGS.test_dir):
        img_dir.append(FLAGS.test_dir)
    else:
        files = os.listdir(FLAGS.test_dir)
        for file in files:
            img_dir.append(FLAGS.test_dir + '/' + file)

    np.random.shuffle(img_dir)

    for img_ in img_dir:
        img = cv2.imread(str(img_))
        img = img.astype(np.uint8)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        img = cv2.resize(img, (FLAGS.img_w, FLAGS.img_h))
        orig_img = img

        """ image preprocess """
        img = img / 127.0 - 1.0
        image_shape = img.shape[0:2]

        """ load images """
        img = tf.expand_dims(img, 0)
        y_pred = yolo_model_warpper.predict(img)

        """ box list """
        _yxyx_box = []
        _yxyx_box_scores = []
        """ preprocess label """
        for l, pred_label in enumerate(y_pred):
            """ split the label """
            pred_xy = pred_label[..., 0:2]
            pred_wh = pred_label[..., 2:4]
            pred_confidence = pred_label[..., 4:5]
            pred_cls = pred_label[..., 5:]
            box_scores = tf.sigmoid(pred_cls) * tf.sigmoid(pred_confidence)
            """ reshape box  """
            # NOTE tf_xywh_to_all will auto use sigmoid function
            grid_size = tf.shape(pred_label)[1:3]
            pred_xy, pred_wh = convert_box(pred_xy, pred_wh, grid_size, ds.anchors[ds.anchor_masks[l]])

            boxes = correct_box(pred_xy, pred_wh, [FLAGS.img_h, FLAGS.img_w], image_shape)
            boxes = tf.reshape(boxes, (-1, 4))
            box_scores = tf.reshape(box_scores, (-1, FLAGS.class_num))
            """ append box and scores to global list """
            _yxyx_box.append(boxes)
            _yxyx_box_scores.append(box_scores)

        yxyx_box = tf.concat(_yxyx_box, axis=0)
        yxyx_box_scores = tf.concat(_yxyx_box_scores, axis=0)

        mask = yxyx_box_scores >= FLAGS.obj_thresh

        """ do nms for every classes"""
        _boxes = []
        _scores = []
        _classes = []

        for c in range(FLAGS.class_num):
            class_boxes = tf.boolean_mask(yxyx_box, mask[:, c])
            class_box_scores = tf.boolean_mask(yxyx_box_scores[:, c], mask[:, c])
            select = tf.image.non_max_suppression(
                class_boxes, scores=class_box_scores, max_output_size=30, iou_threshold=FLAGS.iou_thresh)
            class_boxes = tf.gather(class_boxes, select)
            class_box_scores = tf.gather(class_box_scores, select)
            _boxes.append(class_boxes)
            _scores.append(class_box_scores)
            _classes.append(tf.ones_like(class_box_scores) * c)

        boxes = tf.concat(_boxes, axis=0)
        classes = tf.concat(_classes, axis=0)
        scores = tf.concat(_scores, axis=0)

        """ draw box  """
        font = ImageFont.truetype(font='asset/FiraMono-Medium.otf',
                                  size=tf.cast(tf.floor(3e-2 * image_shape[0] + 0.5), tf.int32).numpy())

        thickness = (image_shape[0] + image_shape[1]) // 300

        """ show result """
        if len(classes) > 0:
            pil_img = Image.fromarray(orig_img)
            print('-*' * 20)
            print(f'[top\tleft\tbottom\tright\tscore\tclass]')
            for i, c in enumerate(classes):
                box = boxes[i]
                score = scores[i]
                label = '{:2d} {:.2f}'.format(int(c), score.numpy())
                draw = ImageDraw.Draw(pil_img)
                label_size = draw.textsize(label, font)
                top, left, bottom, right = box
                print(f'[{top:.1f}\t{left:.1f}\t{bottom:.1f}\t{right:.1f}\t{score:.2f}\t{int(c):2d}\t]', class_names[int(c)])
                top = max(0, tf.cast(tf.floor(top + 0.5), tf.int32))
                left = max(0, tf.cast(tf.floor(left + 0.5), tf.int32))
                bottom = min(image_shape[0], tf.cast(tf.floor(bottom + 0.5), tf.int32))
                right = min(image_shape[1], tf.cast(tf.floor(right + 0.5), tf.int32))

                if top - image_shape[0] >= 0:
                    text_origin = tf.convert_to_tensor([left, top - label_size[1]])
                else:
                    text_origin = tf.convert_to_tensor([left, top + 1])

                for j in range(thickness):
                    draw.rectangle(
                        [left + j, top + j, right - j, bottom - j],
                        outline=colormap[int(c)])
                draw.rectangle(
                    [tuple(text_origin), tuple(text_origin + label_size)],
                    fill=colormap[int(c)])
                draw.text(text_origin, label, fill=(0, 0, 0), font=font)
                del draw
            pil_img.show()
            # pil_img.close()
        else:
            print(NOTE, ' no boxes detectd')

        cv2.imshow('img', orig_img)
        cv2.waitKey()


if __name__ == "__main__":
    try:
        app.run(main)
    except SystemExit:
        pass
