"""
An example that uses TensorRT's Python api to make inferences.
"""
import ctypes
import os
import random
import sys
import threading
import time

import cv2
import numpy as np
import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trt
import torch
import torchvision
from config.getConfigData import upload_detetion_result

from yolov5.yolov5_trt_video import *
from model.drowsiness_detection import start_detect
import multiprocessing as mp

def queue_img_put(input_quene):  # 实现一次读取摄像头分给多个算法计算

    cap = cv2.VideoCapture(0)
    while True:

        if not cap.isOpened():
            logger.error('摄像头读取失败，重新获取摄像头...')
            time.sleep(1)
            cap = cv2.VideoCapture(0)
            continue

        ret = cap.grab()
        if not ret:

            print(str(devid) + " isOpened but can not grab")
            cap.release()
            cap = cv2.VideoCapture(0)
            continue

        ret, frame = cap.retrieve()
        if not ret:
            logger.info("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
            continue

        for queue in input_quene:  # 一次读取摄像头分给多个算法计算，防止多次读取rtsp，导致带宽不够。

            if queue.qsize()<1:
                queue.put(frame)
            else:
                pass



def callModel(quene):
    # load custom plugins
    PLUGIN_LIBRARY = "build/libmyplugins.so"
    ctypes.CDLL(PLUGIN_LIBRARY)
    engine_file_path = "build/yolov5s.engine"

    # load coco labels

    categories = ['call']

    # a  YoLov5TRT instance
    yolov5_wrapper = YoLov5TRT(engine_file_path)

    # from https://github.com/ultralytics/yolov5/tree/master/inference/images
    #input_image_paths = ["zidane.jpg", "bus.jpg"]
    #for input_image_path in input_image_paths:

    # create a new thread to do inference
    # thread1 = myThread(yolov5_wrapper.infer)
    # thread1.start()
    # thread1.join()

    yolov5_wrapper.infer(quene)

    # destory the instance
    yolov5_wrapper.destory()



if __name__ == "__main__":
    mp.set_start_method('spawn') # cuda need this.

    queue = []
    for i in range(2):
        queue.append(mp.Queue(maxsize=10))


    process_intrusion = []


    # # 为每个算法创建输入队列
    process_intrusion.append(mp.Process(target=queue_img_put, args=(queue, )))  # why need coma ???
    process_intrusion.append(mp.Process(target=callModel, args=(queue[0], )))
    process_intrusion.append(mp.Process(target=start_detect, args=(queue[1], )))


    [process.start() for process in process_intrusion]
    while True:
        time.sleep(3)







