# yolov4-tiny目标检测
import cv2
import numpy as np
from python.utils import *


class Detector(PipeNode):
    def __call__(self, frame: PipeFrame):
        app_logger.info("detecting...")

        if (
            frame.img is None
            or frame.cfg_path is None
            or frame.weights_path is None
            or frame.classes_path is None
        ):
            app_logger.info(
                "frame.img is None or frame.cfg_path is None or frame.weights_path is None or frame.classes_path is None"
            )
            return

        # 在这里进行视频检测操作，例如人脸检测、物体跟踪等

        # 传入模型结构.cfg文件,模型权重参数.weight文件
        net = cv2.dnn.readNet(frame.cfg_path, frame.weights_path)
        # 定义一个目标检测模型,将模型传进去
        model = cv2.dnn_DetectionModel(net)

        """
        设置模型的输入
        size:将输入的图像缩放至指定大小。size越大检测效果越好,但是检测速度越慢
        scale:像素值的缩放大小。在opencv中每个像素值的范围在0-255之间,而在神经网络中每个像素值在0-1之间
        """
        model.setInputParams(size=(416, 416), scale=1 / 255)

        # 获取分类文本的信息
        classes = []  # 存放每个分类的名称
        with open(frame.classes_path) as file_obj:
            # 获取文本中的每一行
            for class_name in file_obj.readlines():
                # 删除文本中的换行符、空格等
                class_name = class_name.strip()
                # 将每个分类名保存到列表中
                classes.append(class_name)

        # 目标检测
        """
        返回值
        classids:检测得到的类别
        score:检测得到的目标的概率
        bbox:检测框的85项信息
        参数
        confThreshold:目标检测最小置信度
        nmsThreshold:非极大值抑制的自定义参数
        """
        # classids, scores, bboxes = model.detect(frame.img, 0.5, 0.3)
        # indices = model.detect(frame.img, 0.5, 0.3)
        # for i in indices:
        #     print(i)
            
        # for classid, score, bbox in zip(classids, scores, bboxes):
        #     print(classid, score, bbox)
            # frame.det_box_vec.append(
            #     DetBox(
            #         bbox[0],
            #         bbox[1],
            #         bbox[0] + bbox[2],
            #         bbox[1] + bbox[3],
            #         score,
            #         classid,
            #     )
            # )
            
        pass


class Render(PipeNode):
    def __call__(self, frame: PipeFrame):
        app_logger.info("Render...")
        #     self.classids = result[0]
        #     self.scores = result[1]
        #     self.bboxes = result[2]
        #     self.classes = result[3]

        #     app_logger.info(self.classids)
        #     app_logger.info(self.scores)
        #     app_logger.info(self.bboxes)
        #     app_logger.info(self.classes)

        #     for class_id, score, bbox in zip(self.classids, self.scores, self.bboxes):
        #         # 获取检测框的左上角坐标和宽高
        #         x, y, w, h = bbox

        #         # 获取检测框对应的分类名
        #         class_name = self.classes[class_id]

        #         self.draw_box(frame, x, y, w, h, class_name, score)

        # return ""
        pass

    # def draw_box(self, img, x, y, w, h, predName, score):
    #     # 定义检测框绘制函数
    #     colorline = (0, 255, 0)  # 角点线段颜色
    #     angerline = 13  # 角点线段长度
    #     # 检测框
    #     cv2.rectangle(img, (x, y), (x + w, y + h), (255, 255, 0), 1)
    #     # 角点美化
    #     cv2.line(img, (x, y), (x + angerline, y), colorline, 2)
    #     cv2.line(img, (x, y), (x, y + angerline), colorline, 2)
    #     cv2.line(img, (x + w, y), (x + w, y + angerline), colorline, 2)
    #     cv2.line(img, (x + w, y), (x + w - angerline, y), colorline, 2)
    #     cv2.line(img, (x, y + h), (x, y + h - angerline), colorline, 2)
    #     cv2.line(img, (x, y + h), (x + angerline, y + h), colorline, 2)
    #     cv2.line(img, (x + w, y + h), (x + w, y + h - angerline), colorline, 2)
    #     cv2.line(img, (x + w, y + h), (x + w - angerline, y + h), colorline, 2)

    #     # 显示预测的类别
    #     cv2.putText(
    #         img, predName, (x, y + h + 20), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2
    #     )

    #     # 显示预测概率
    #     cv2.putText(
    #         img,
    #         str(int(score * 100)) + "%",
    #         (x, y - 5),
    #         cv2.FONT_HERSHEY_COMPLEX,
    #         1,
    #         (0, 255, 255),
    #         2,
    #     )


class OutPut(PipeNode):
    def __call__(self, frame: PipeFrame):
        app_logger.info("output...")
        # app_logger.info(frame.to_json())

        # 在这里进行视频处理操作，例如图像增强、去噪等
        pass
