import cv2
import numpy as np
import onnxruntime
import torch
from yolox.data.data_augment import preproc as preprocess
from yolox.data.datasets import COCO_CLASSES
from yolox.utils import mkdir, multiclass_nms, demo_postprocess, vis
import time
from threading import Thread
from flask import Flask
import logging
import ffmpegcv

population = 0
stop_monitor = False
app = Flask("ScenicSpotPopulationDetector")
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
onnxruntime.set_default_logger_severity(3)


def start_server_thread(port=8888):
    thread_flask = Thread(target=start_flask, args=(port,))
    thread_flask.daemon = True
    thread_flask.start()


def detect(video_path, detection_listener, interval, frame_interval):
    global population
    global stop_monitor

    capture = ffmpegcv.VideoCapture(video_path)

    # provides 参数用于应用玄铁 CPU 的优化代码
    session = onnxruntime.InferenceSession("../yolox_s.onnx", providers=["ShlExecutionProvider"])
    stop_monitor = False

    now_frame = 0

    while not stop_monitor:
        ret, frame = capture.read()
        if not ret:
            break

        now_frame += 1
        if now_frame % frame_interval != 0:
            continue

        if frame.shape != (640, 640, 3):
            raise Exception("Invalid vide size, only videos with 640x640 are supported.")

        img, ratio = preprocess(frame, frame.shape)

        ort_inputs = {session.get_inputs()[0].name: img[None, :, :, :]}
        output = session.run(None, ort_inputs)
        predictions = demo_postprocess(output[0], frame.shape)[0]

        boxes = predictions[:, :4]
        scores = predictions[:, 4:5] * predictions[:, 5:]

        boxes_xyxy = np.ones_like(boxes)
        boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2] / 2.
        boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3] / 2.
        boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2] / 2.
        boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3] / 2.
        boxes_xyxy /= ratio
        dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.45, score_thr=0.1)
        person_nums = 0

        origin_img = frame
        if dets is not None:
            final_boxes, final_scores, final_cls_inds = dets[:, :4], dets[:, 4], dets[:, 5]
            person_boxes, person_scores, person_inds = [], [], []
            for index, cls_ind in enumerate(final_cls_inds):
                if int(cls_ind) == 0:
                    person_boxes.append(final_boxes[index])
                    person_scores.append(final_scores[index])
                    person_inds.append(final_cls_inds[index])

            origin_img = vis(origin_img, person_boxes, person_scores, person_inds,
                             conf=0.1, class_names=COCO_CLASSES)
            person_nums = len(person_boxes)

        origin_img = cv2.cvtColor(origin_img, cv2.COLOR_BGR2RGB)

        result_number = person_nums
        population = result_number

        detection_listener(origin_img, result_number)

        time.sleep(interval)

    capture.release()


def start_detect_thread(video_path, detection_listener, interval, frame_interval):
    Thread(target=detect, args=(video_path, detection_listener, interval, frame_interval)).start()


@app.route('/population', methods=['GET'])
def get_data():
    return str(population)


def start_flask(port):
    app.run(port=port)


def stop_monitoring():
    global stop_monitor
    stop_monitor = True
