import os, sys
import numpy as np
import cv2
import time
import multiprocessing as mp
import threading

from inference import Inference
from utilities import *
from sound import *
import ops

queue_img = mp.Queue()
queue_yolo_result = mp.Queue()
queue_names = mp.Queue()

t_play = time.time()

confidence_threshold = 0.65
interval = 1.5


def process_yolo_working(queue_names, queue_img, queue_yolo_result):
    # inf = Inference("weights/2in1_yolo11n_ncnn_model")
    # inf = Inference(f"{os.getcwd()}/weights/2in1_infrared_ncnn_model")
    inf = Inference(f"{os.getcwd()}/weights/yolo11n_ncnn_model")
    names = inf.names

    queue_names.put(names)

    while True:
        origin = queue_img.get()
        profiler = ops.Profile()
        with profiler:
            result = inf.inference(origin)

        print("run time:")
        print(profiler.dt)
        print("")
        queue_yolo_result.put(result)


def get_sorted_results(results):
    global confidence_threshold

    dtype = [("cls", float), ("name", "U18"), ("confidence", float), ("bbox", object)]
    results = np.array(results, dtype=dtype)

    # 按浮点数字段降序排序
    sorted_indices = np.argsort(results["confidence"])[::-1]
    sorted_results = results[sorted_indices]

    # 根据字符串字段去重
    seen = set()
    unique_sorted_results = np.array([], dtype=dtype)
    for item in sorted_results:
        if item["name"] not in seen and item["name"] != "person":
            seen.add(item["name"])
            unique_sorted_results = np.append(unique_sorted_results, item)

    return unique_sorted_results


def play_sound(results):
    global confidence_threshold
    unique_sorted_results = get_sorted_results(results)
    for result in unique_sorted_results:
        cls, label, conf, bbox = result
        if conf > confidence_threshold:
            play_en(label)
            break


def show_camera():
    global interval
    result = None
    is_play_sound = False
    event = threading.Event()

    process_yolo = mp.Process(
        target=process_yolo_working, args=(queue_names, queue_img, queue_yolo_result)
    )
    process_yolo.start()

    names = queue_names.get()

    
    while True:
        try:
            cap = cv2.VideoCapture(0, cv2.CAP_V4L2)
            # cap.set(cv2.CAP_PROP_FPS, 30)

            unique_sorted_results = []
            while True:
                ret, frame = cap.read()
                if not ret:
                    break

                im_show = frame

                if queue_img.empty():
                    queue_img.put(frame)

                if not queue_yolo_result.empty():
                    result = queue_yolo_result.get()
                    is_play_sound = True

                if result is not None:
                    results = []
                    for i, data in enumerate(result.data):
                        cls, conf, bbox = result.cls[i], result.conf[i], result.data[i]
                        # im_show = cv2.rectangle(origin, bbox, cls, conf, names)
                        label = names[int(cls)]
                        color = colors(int(cls), True)
                        results.append((cls, label, conf, np.round(bbox, 0)))
                        # im_show = box_label(frame, bbox, label, conf, color)
                    # cv2.putText(im_show, f"{base_name}", (10, 30), cv2.FONT_HERSHEY_COMPLEX, 1, (50, 50, 220), 1)
                    if len(results):
                        unique_sorted_results = get_sorted_results(results)
                        print(unique_sorted_results)
                        for i, unique_sorted_result in enumerate(unique_sorted_results):
                            cls, label, conf, bbox = unique_sorted_result
                            if conf > confidence_threshold:
                                label = names[int(cls)]
                                color = colors(int(cls), True)
                                im_show = box_label(frame, bbox, label, conf, color)
                                if i == 0:
                                    cv2.putText(
                                        im_show,
                                        f"{label}",
                                        (20, 70),
                                        0,
                                        2.5,
                                        (20, 20, 250),
                                        thickness=2,
                                        lineType=cv2.LINE_AA,
                                    )

                global t_play
                if time.time() - t_play > interval:
                    if is_play_sound:
                        is_play_sound = False
                        t_play = time.time()

                        thread_play_sound = threading.Thread(
                            target=play_sound, args=[results], daemon=True
                        )
                        thread_play_sound.start()

                cv2.imshow("detect", im_show)
                cv2.waitKey(1)

            # 释放摄像头资源
            cap.release()
        except Exception as e:
            print(e)
            
        time.sleep(1)


if __name__ == "__main__":
    show_camera()
