from __future__ import division, print_function, absolute_import

import _thread
import sys
import os
import warnings
from timeit import time

import cv2
import numpy as np
from PIL import Image

from deep_sort import nn_matching
from deep_sort import preprocessing
from deep_sort.detection import Detection
from deep_sort.detection_yolo import Detection_YOLO
from deep_sort.tracker import Tracker
from tools import generate_detections as gdet
from yolo import YOLO

warnings.filterwarnings('ignore')


class TrackerArgs:
    def __init__(self):
        self.conn = True
        self.readAgain = False
        self.address = ''
        self.skipFrame = 0
        self.frameIndex = 0


input_args = TrackerArgs()


def receive_command():
    while True:
        command = input()
        if command == 'stop':
            input_args.conn = False
            input_args.address = ''
        elif command == 'pause':
            input_args.conn = False
        else:
            text = command.split(',')
            if text[0] == "continue":
                input_args.frameIndex = int(text[1])
                input_args.conn = True
            elif text[0] == "skipFrame":
                input_args.skipFrame = int(text[1])
            else:
                input_args.address = text[0]
                input_args.skipFrame = int(text[1])
                input_args.readAgain = True
                input_args.conn = True


def yolo_infer_worker(video_input, frame_skip, model, category_name=None):
    _thread.start_new_thread(receive_command, ())
    yolo_infer(video_input, frame_skip, model, category_name)


def yolo_infer(video_input, frame_skip, model, category_name):
    frame_index = -1
    input_args.address = video_input
    input_args.skipFrame = frame_skip
    input_args.frameIndex = frame_index

    oldAddress = video_input
    if oldAddress == '0':
        video_capture = cv2.VideoCapture(0)
    else:
        video_capture = cv2.VideoCapture(oldAddress)

    istart = True
    t1 = 0.0

    # Definition of the parameters
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 1.0

    # Deep SORT
    model_filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'model_data/mars-small128.pb')
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)

    metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
    tracker = Tracker(metric)

    tracking = False

    while True:
        if oldAddress != input_args.address:
            if len(input_args.address) == 0:
                oldAddress = ''
            else:
                oldAddress = input_args.address
                if oldAddress == '0':
                    video_capture = cv2.VideoCapture(0)
                else:
                    video_capture = cv2.VideoCapture(oldAddress)
                frame_index = -1
                input_args.frameIndex = -1
                istart = True
        if frame_index != input_args.frameIndex:
            frame_index = input_args.frameIndex
            video_capture.set(cv2.CAP_PROP_POS_FRAMES, frame_index)
        if input_args.skipFrame != frame_skip:
            frame_skip = input_args.skipFrame
        if input_args.conn:
            ret, frame = video_capture.read()  # frame shape 640*480*3
            if ret != True:
                if input_args.readAgain:
                    input_args.readAgain = False
                    continue
                else:
                    input_args.conn = False
                    oldAddress = ''
                    input_args.address = ''
                    print('end')
                    print("fps=%f" % ((frame_index + 1.0) / (time.time() - t1)))
                    continue
            frame_index = frame_index + 1
            input_args.frameIndex = frame_index
            if istart:
                print('start')
                t1 = time.time()
                istart = False

            if frame_index % (frame_skip + 1) == 0:
                # cv2.imwrite('E:\image/'+str(frame_index) + '.jpg',frame) #存储为图像
                image = Image.fromarray(frame[..., ::-1])  # bgr to rgb
                boxes, confidence, classes = yolo.detect_image(image)

                if tracking:
                    features = encoder(frame, boxes)

                    detections = [Detection(bbox, confidence, cls, feature) for bbox, confidence, cls, feature in
                                  zip(boxes, confidence, classes, features)]
                else:
                    detections = [Detection_YOLO(bbox, confidence, cls) for bbox, confidence, cls in
                                  zip(boxes, confidence, classes)]
                    # Run non-maxima suppression.
                boxes = np.array([d.tlwh for d in detections])
                scores = np.array([d.confidence for d in detections])
                indices = preprocessing.non_max_suppression(boxes, nms_max_overlap, scores)
                detections = [detections[i] for i in indices]

                sys.stdout.write("FrameIndex:"+str(frame_index) + ' ')

                for det in detections:
                    bbox = det.to_tlbr()
                    # score = "%.2f" % round(det.confidence * 100, 2) + "%"
                    # cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255, 0, 0), 2)
                    if len(classes) > 0:
                        cls = det.class_name
                        sys.stdout.write(
                            f'{str(cls)} {str(bbox[0])} {str(bbox[1])} {str(bbox[2])} {str(bbox[3])} ')
                        # cv2.putText(frame, str(cls) + " " + score, (int(bbox[0]), int(bbox[3])), 0,
                        #             1e-3 * frame.shape[0], (0, 255, 0), 1)
                if tracking:
                    # Call the tracker
                    tracker.predict()
                    tracker.update(detections)

                    for track in tracker.tracks:
                        if not track.is_confirmed() or track.time_since_update > 1:
                            continue
                        bbox = track.to_tlbr()
                        sys.stdout.write(
                            f'{str(track.track_id)} {str(bbox[0])} {str(bbox[1])} {str(bbox[2])} {str(bbox[3])} ')
                        # cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)
                        # cv2.putText(frame, "ID: " + str(track.track_id), (int(bbox[0]), int(bbox[1])), 0,
                        #             1e-3 * frame.shape[0], (0, 255, 0), 1)
                sys.stdout.write('\n')

                # if len(boxs) != 0:
                #     for i in range(0, len(boxs)):
                #         sys.stdout.write(
                #             f'{str(boxs[i][0])} {str(boxs[i][1])} {str(boxs[i][2])} {str(boxs[i][3])} {str(boxs[i][4])} ')
                # sys.stdout.write('\n')
        else:
            time.sleep(3)
    video_capture.release()


if __name__ == '__main__':
    print("进入 Main")
    yolo = YOLO()
    print("构造Yolo完毕，等待输入")
    start_command = input()
    # start_command = r"E:/Video/车辆视频/video(9).MP4,0"
    print("输出完毕，输入是：")
    print(start_command)
    start_command=start_command[1:]
    start_command=start_command.replace("\"","")
    print("替换后")
    print(start_command)
    text = start_command.split(',')
    print("拆分路径完毕")
    print(text[0])
    print(text[1])
    print(int(text[1]))
    yolo_infer_worker(text[0], int(text[1]), yolo)