import os
import time
import tensorflow as tf
from absl import app, flags
from absl.flags import FLAGS
import core.utils as utils
from tensorflow.python.saved_model import tag_constants
from core.config import cfg
from PIL import Image, ImageDraw, ImageFont
import cv2
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
from deep_sort import preprocessing, nn_matching
from deep_sort.detection import Detection
from deep_sort.tracker import Tracker
from tools import generate_detections as gdet

flags.DEFINE_string('weights', './checkpoints/yolov4-416-tiny', 'path to weights file')
flags.DEFINE_integer('size', 416, 'resize images to')
flags.DEFINE_boolean('tiny', True, 'yolo or yolo-tiny')
flags.DEFINE_string('model', 'yolov4', 'yolov4')
flags.DEFINE_string('video', './data/video/custom.mp4', 'path to input video or set to 0 for webcam')
flags.DEFINE_string('output', './output/custom-tracker-tiny.mp4', 'path to output video')
flags.DEFINE_string('output_format', 'mp4v', 'codec used in VideoWriter when saving video to file')
flags.DEFINE_float('warn_num', 5, 'TRAFFIC VOLUME WARNING threshold')
flags.DEFINE_float('iou', 0.45, 'iou threshold')
flags.DEFINE_float('score', 0.45, 'score threshold')
flags.DEFINE_boolean('dont_show', False, 'dont show video output')
flags.DEFINE_boolean('info', True, 'show detailed info of tracked objects')
flags.DEFINE_boolean('count', True, 'count objects being tracked on screen')

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
    tf.config.experimental.set_memory_growth(physical_devices[0], True)


def cv2ImgAddText(img, text: str, location: tuple, text_color: tuple, text_size: int):
    if isinstance(img, np.ndarray):
        img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
    draw = ImageDraw.Draw(img)
    fontStyle = ImageFont.truetype("./data/font/simsun.ttc", text_size, encoding="utf-8")
    (r, g, b) = text_color
    text_color = (b, g, r)
    draw.text(location, text, text_color, font=fontStyle)
    return cv2.cvtColor(np.asarray(img), cv2.COLOR_BGR2RGB)


def main(_argv):
    max_cosine_distance = 0.4
    nn_budget = None
    nms_max_overlap = 1.0

    model_filename = 'data/model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)
    metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
    tracker = Tracker(metric)

    config = ConfigProto()
    config.gpu_options.allow_growth = True
    session = InteractiveSession(config=config)
    STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(FLAGS)
    input_size = FLAGS.size
    video_path = FLAGS.video

    saved_model_loaded = tf.saved_model.load(FLAGS.weights, tags=[tag_constants.SERVING])
    infer = saved_model_loaded.signatures['serving_default']

    try:
        vid = cv2.VideoCapture(int(video_path))
    except:
        vid = cv2.VideoCapture(video_path)

    out = None

    if FLAGS.output:
        width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
        fps = int(vid.get(cv2.CAP_PROP_FPS))
        codec = cv2.VideoWriter_fourcc(*FLAGS.output_format)
        out = cv2.VideoWriter(FLAGS.output, codec, fps, (width, height))

    frame_num = 0
    bboxs_0 = []
    bboxs_1 = []
    speed = []
    fps_list = []
    time_last = 0
    time_now = 0

    carSet = set([])
    personSet = set([])
    name_dict = {'person': '行人', 'car': '车辆', 'bus': '车辆'}

    while True:
        return_value, frame = vid.read()
        time_last = time_now
        time_now = vid.get(cv2.CAP_PROP_POS_MSEC)
        if return_value:
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            image = Image.fromarray(frame)
        else:
            print('程序运行结束！')
            break
        frame_num += 1
        frame_size = frame.shape[:2]
        image_data = cv2.resize(frame, (input_size, input_size))
        image_data = image_data / 255.
        image_data = image_data[np.newaxis, ...].astype(np.float32)
        start_time = time.time()

        batch_data = tf.constant(image_data)
        pred_bbox = infer(batch_data)
        for key, value in pred_bbox.items():
            boxes = value[:, :, 0:4]
            pred_conf = value[:, :, 4:]

        boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
            boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
            scores=tf.reshape(
                pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
            max_output_size_per_class=50,
            max_total_size=50,
            iou_threshold=FLAGS.iou,
            score_threshold=FLAGS.score
        )

        num_objects = valid_detections.numpy()[0]
        bboxes = boxes.numpy()[0]
        bboxes = bboxes[0:int(num_objects)]
        scores = scores.numpy()[0]
        scores = scores[0:int(num_objects)]
        classes = classes.numpy()[0]
        classes = classes[0:int(num_objects)]

        original_h, original_w, _ = frame.shape
        bboxes = utils.format_boxes(bboxes, original_h, original_w)

        pred_bbox = [bboxes, scores, classes, num_objects]

        class_names = utils.read_class_names(cfg.YOLO.CLASSES)

        allowed_classes = list(utils.read_class_names(cfg.CUSTOM.ALLOWED_CLASSES).values())

        names = []
        deleted_indx = []

        for i in range(num_objects):
            class_indx = int(classes[i])
            class_name = class_names[class_indx]
            if class_name not in allowed_classes:
                deleted_indx.append(i)
            else:
                names.append(class_name)
        names = np.array(names)
        count = len(names)
        if FLAGS.count:
            frame = cv2ImgAddText(frame,
                                  "当前交通流量: {}".format(count),
                                  location=(int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)) // 7 * 5, 35 * 0),
                                  text_color=(60, 255, 60) if count <= FLAGS.warn_num else (255, 60, 60),
                                  text_size=35
                                  )

        bboxes = np.delete(bboxes, deleted_indx, axis=0)
        scores = np.delete(scores, deleted_indx, axis=0)

        features = encoder(frame, bboxes)
        detections = [Detection(bbox, score, class_name, feature) for bbox, score, class_name, feature in
                      zip(bboxes, scores, names, features)]

        cmap = plt.get_cmap('tab20b')
        colors = [cmap(i)[:3] for i in np.linspace(0, 1, 20)]

        boxs = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        classes = np.array([d.class_name for d in detections])
        indices = preprocessing.non_max_suppression(boxs, classes, nms_max_overlap, scores)
        detections = [detections[i] for i in indices]

        tracker.predict()
        tracker.update(detections)
        i = 2
        for track in tracker.tracks:
            if not track.is_confirmed() or track.time_since_update > 1:
                continue
            bbox = track.to_tlbr()

            i += 1

            class_name = track.get_class()

            if FLAGS.count:
                if class_name == 'car' or class_name == 'bus':
                    carSet.add(track.track_id)
                if class_name == 'person':
                    personSet.add(track.track_id)
                frame = cv2ImgAddText(frame,
                                      "车辆统计: {}".format(len(carSet)),
                                      location=(5, 35 * 0),
                                      text_color=(255, 100, 100),
                                      text_size=35
                                      )
                frame = cv2ImgAddText(frame,
                                      "路人统计: {}".format(len(personSet)),
                                      location=(5, 35 * 1),
                                      text_color=(255, 100, 100),
                                      text_size=35
                                      )

            color = colors[int(track.track_id) % len(colors)]
            color = [i * 255 for i in color]
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, 2)
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1] - 30)),
                          (int(bbox[0]) + (len(name_dict.get(class_name)) + len(str(track.track_id))) * 20,
                           int(bbox[1])),
                          color, -1)
            try:
                frame = cv2ImgAddText(frame,
                                      name_dict.get(class_name) + "-" + str(track.track_id),
                                      location=(int(bbox[0]), int(bbox[1] - 25)),
                                      text_color=(255, 255, 255),
                                      text_size=20
                                      )
            except Exception as e:
                pass
            if FLAGS.info:
                frame = cv2ImgAddText(frame,
                                      "目标详细信息:",
                                      location=(5, 35 * 2),
                                      text_color=(0, 255, 255),
                                      text_size=35
                                      )
                frame = cv2ImgAddText(frame,
                                      "目标ID: {}, 类别: {}".format(str(track.track_id),
                                                                '行人' if class_name == 'person' else '汽车'),
                                      location=(5, 35 * i),
                                      text_color=(0, 255, 255),
                                      text_size=35
                                      )

        fps = 1.0 / (time.time() - start_time)
        fps_list.append(fps)
        print("FPS: %.2f" % fps)
        result = np.asarray(frame)
        result = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)

        if not FLAGS.dont_show:
            cv2.imshow("Output Video", result)

        if FLAGS.output:
            out.write(result)
        if cv2.waitKey(1) & 0xFF == ord('q'): break
    print('FPS AVG: %.2f' % (np.array(fps_list).sum() / len(fps_list)))
    cv2.destroyAllWindows()


if __name__ == '__main__':
    try:
        app.run(main)
    except SystemExit:
        pass
