﻿#include <iostream>
#include <string>
#include <vector>
#include<ctime>
#include <opencv2/opencv.hpp>
#include <opencv2/dnn/dnn.hpp>

using namespace cv;
using namespace cv::dnn;
using namespace std;

const char* classNames[] = { "reflective_clothes", "other_clothes", "hat", "person" }; // 类别标签列表
const int numClasses = sizeof(classNames) / sizeof(classNames[0]);

int main(int argc, char** argv)
{
    clock_t  Begin, End;
    
    
    // 加载YOLOv5目标检测模型
    Net net = readNet("final.onnx");
    //net.setPreferableBackend(DNN_BACKEND_CUDA); // 使用CUDA加速
    //net.setPreferableTarget(DNN_TARGET_CUDA);

    //调用摄像头
    VideoCapture cap(0);

    // 创建窗口以显示摄像头捕捉的图像
    //namedWindow("Detection Results", WINDOW_NORMAL);


    while (1)
    {
        
        Mat frame,frame1;
        // 加载测试图片并记录其大小
        //frame = imread(".\\2.jpg");

        // 从摄像头捕捉一帧图像
        cap.read(frame);
        int height = frame.rows;
        int width = frame.cols;

        // 对测试图片进行预处理，转换为模型所需的输入格式
        Mat inputBlob = blobFromImage(frame, 1.0 / 255.0, Size(640, 640), Scalar(0, 0, 0), true, false, CV_32F);
        //Mat inputBlob1 = blobFromImage(frame, 1.0 / 255.0, Size(640, 640), Scalar(0, 0, 0), true, false, CV_32F);
        /*Mat cv::dnn::blobFromImage(InputArrayimage, double scalefactor = 1.0, constSize & size = Size(), constScalar & mean = Scalar(), bool swapRB = false, bool crop = false, int ddepth = CV_32F);
        */
        
        //cout << "tick=" << double(End1 - Begin1) << endl;
        Begin = clock();
        net.setInput(inputBlob);
        //net.setInput(inputBlob1);

        // 执行模型推理，获取检测结果
        Mat output = net.forward();
        vector<Mat> detections;
        for (int i = 0; i < output.size[0]; i++)
        {
            Mat detection(output.size[1], output.size[2], CV_32F, output.ptr<float>(i));
            detections.push_back(detection);
        }
        End = clock();
        cout << "tick=" << double(End - Begin) << endl;
     
        // 设置过滤阈值
        float threshold = 0.85;
        // 对每个检测结果进行可视化
        for (int i = 0; i < detections.size(); i++)
        {
            Mat detection = detections[i];
            for (int j = 0; j < detection.rows; j++)
            {
                float* data = detection.ptr<float>(j);
                float* class_scores = data + 5; // class scores are located in data[5..8]
                auto max_it = std::max_element(class_scores, class_scores + 4); // find iterator to maximum score
                int classId = std::distance(class_scores, max_it); // calculate index of maximum score
                //int classId = (int)(data[5]);
                float score = data[4] * data[5 + classId];
                if (score > threshold)
                {
                    printf("%s\t%f\n", classNames[classId], score);
                    //printf("%f\t%f\t%f\t%f\n", data[0], data[1], data[2], data[3]);
                    //int left = (int)(((data[0] - (data[2] / 2)) / 640) * width);
                    //int top = (int)(((data[1] - (data[3] / 2)) / 640) * height);
                    //int width1 = (int)(((data[2]) / 640) * width);
                    //int height1 = (int)(((data[3]) / 640) * height);
                    //printf("%d\t%d\t%d\t%d\n", left, top, width1, height1);
                    //printf("%f %f %f %f ", left, top, right, bottom);
                    //Rect box(left, top, width1, height1);

                    //String label = classNames[classId];
                    //putText(frame, label, Point(left, top - 5), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 255, 0), 1);
                    //rectangle(frame, box, Scalar(0, 255, 0), 1);
                }
            }
        }

        // 将可视化后的图像保存到文件或显示在屏幕上
        //imwrite("C:\\Users\\jinri\\Desktop\\c++\\ConsoleApplication3\\result1.jpg", frame);
        //imshow("Detection Results", frame);
      
        //cout << "tick=" << double(End3 - Begin3) << endl;
        //waitKey(1);
    }
    // 释放VideoCapture对象
    cap.release();

    // 销毁窗口
    //destroyWindow("Detection Results");


    return 0;
}