from multiprocessing import Process, Queue
from queue import LifoQueue
from tools.camera.search_camera import setCamera
from ai_library.components.tflite_infer import TfliteRun
from ai_library.components.config import cfg
from ai_library.components.prior_box import priorsBox
from ai_library.components.utils import parsePredict, showImageZH
from plugin.web.flask_app import flask_task
from tools.log import log
import numpy as np
import cv2
import time
import argparse

# 获取图像数据
def videoCapture(q_image:Queue, q_infer:Queue, q_out:Queue, img_mode=0):
    predictions = []

    q_data = LifoQueue(1)

    # rgb_control_thread = RGBControlSerialThread(q_data)
    # rgb_control_thread.start()

    cap = setCamera(img_mode)  # 设置摄像头

    try:
        while True:
            start = time.time()

            if cap != "":
                ret, frame = cap.read()
                if not ret:
                    cap = ""
                    frame = cv2.imread("./imgs/sample.jpg")
                    # frame = cv2.resize(frame, (cfg["cam_width"], cfg["cam_height"]))
                    print("read video frame failed.")
                # assert ret, 'read video frame failed.'
            else:
                frame = cv2.imread("./imgs/sample.jpg")

            frame = cv2.resize(frame, (cfg["cam_width"], cfg["cam_height"]))

            if not q_image.full():
                q_image.put(bytearray(frame))

            if not q_infer.empty():
                predictions = q_infer.get()

            if predictions:
                boxes, classes, scores = predictions
                for prior_index in range(len(classes)):
                    frame = showImageZH(frame, boxes, classes, scores,
                                        cfg["cam_height"], cfg["cam_width"],
                                        prior_index, cfg['labels_list'])

            # if not q_data.full():
            #     q_data.put(action_posen)

            # frame = cv2.resize(frame, (1280, 720), interpolation=cv2.INTER_CUBIC)
            #cv2.moveWindow(cfg["window_name"], 1080 // 3, 720 // 4)  # 设置窗口显示位置
            #cv2.imshow(cfg["window_name"], frame)
            #log.info('Camera Infer:{} ms. '.format((time.time() - start) * 1000))

            if not q_out.full():
                q_out.put(bytearray(frame))

            c = cv2.waitKey(5) & 0xff
            if c == 27:
                q_image.put(False)  # 用于退出另外一个进程
                break
    except KeyboardInterrupt:
        cap.release()
        print("exit video_capture!")

def bzwInfer(q_image:Queue, q_infer:Queue):
    tflite_run = TfliteRun(model_path=cfg["model_path"])
    priors, _ = priorsBox(cfg, image_sizes=cfg["input_size"])
    priors = priors.astype(np.float16)
    try:
        while True:
            if q_image.empty():
                continue
            else:
                image = q_image.get()
                if image != False:
                   image = np.array(image).reshape(cfg["cam_height"], cfg["cam_width"], 3)
                else:
                   break

            s = time.time()

            input_data = cv2.resize(image, (320, 240))
            input_data = np.float32(input_data.copy())
            input_data = cv2.cvtColor(input_data, cv2.COLOR_BGR2RGB)
            input_data = input_data / 255.0 - 0.5
            input_data = input_data[np.newaxis, ...]
            h, w, _ = image.shape

            predictions = tflite_run.inference(input_data)
            boxes, classes, scores = parsePredict(predictions, priors, cfg)

            #log.info('Tfliet Infer:{} ms.'.format((time.time() - s) * 1000))
            if q_infer.full():
                continue
            else:
                q_infer.put([boxes, classes, scores])

    except KeyboardInterrupt:
        log.info("--模型推理出错--")

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='--img_mode 设置模式 \
                                                 参数为\'rtsp\'时设置为网络摄像头模式 \
                                                 参数为数字(摄像头id)时设置为USB摄像头 \
                                                 参数为\'auto\'时自动搜索摄像头\
                                                 参数为空时识别本地图片 ')

    parser.add_argument('--img_mode', type=str, default='rtsp')
    args = parser.parse_args()
    print("img_mode==", args.img_mode)

    q_image = Queue(maxsize=1)
    q_infer = Queue(maxsize=1)
    q_web_view = Queue(maxsize=1)

    p_video = Process(target=videoCapture, args=(q_image, q_infer, q_web_view, args.img_mode))
    p_bzw_infer = Process(target=bzwInfer, args=(q_image, q_infer))

    p_video.start()
    p_bzw_infer.start()

    # flaskRun(host="127.0.0.1", port="8080", webViewIpcFrameQueue=q_web_view)

    p_video.join()
    # p_bzw_infer.join()

    p_video.terminate()
    p_bzw_infer.terminate()
