#include <iostream>
#include <vector>
#include <getopt.h>
#include <random>
#include <fstream>
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/dnn.hpp>
using namespace std;
using namespace cv;

class TIMER{
public:
    TIMER(){
        my_start = std::chrono::high_resolution_clock::now();
    }
    ~TIMER(){
        my_end = std::chrono::high_resolution_clock::now();
        auto start = std::chrono::time_point_cast<std::chrono::nanoseconds>(my_start).time_since_epoch().count();
        auto end = std::chrono::time_point_cast<std::chrono::nanoseconds>(my_end).time_since_epoch().count();
        double during = (double)(end - start)*0.001;
        std::cout << "time:" << during << "us" << std::endl;
    }
private:
    std::chrono::time_point<std::chrono::high_resolution_clock> my_start;
    std::chrono::time_point<std::chrono::high_resolution_clock> my_end;
};

struct Detection
{
    int class_id{0};
    string className{};
    float confidence{0.0};
    Scalar color{};
    Rect box{};
};

vector<string> classes{"person", "bicycle", "car",
                       "motorcycle", "airplane", "bus", "train", "truck", "boat",
                       "traffic light", "fire hydrant", "stop sign", "parking meter",
                       "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
                       "elephant", "bear", "zebra", "giraffe", "backpack",
                       "umbrella", "handbag", "tie", "suitcase", "frisbee",
                       "skis", "snowboard", "sports ball", "kite", "baseball bat",
                       "baseball glove", "skateboard", "surfboard", "tennis racket",
                       "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl",
                       "banana", "apple", "sandwich", "orange", "broccoli", "carrot",
                       "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant",
                       "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote",
                       "keyboard", "cell phone", "microwave", "oven", "toaster", "sink",
                       "refrigerator", "book", "clock", "vase", "scissors", "teddy bear",
                       "hair drier", "toothbrush"};

float modelConfidenceThreshold {0.25};
float modelScoreThreshold      {0.45};
float modelNMSThreshold        {0.50};


int main(void)
{

    dnn::Net net; // 声明网络

    string ModelPath = "../yolov8np.onnx"; // onnx模型路径
    Size ModelSize = Size(640, 480);       // 模型默认输入图片大小

    string ImageName = "../image.jpg";

    Mat frame = imread(ImageName); // 读取输入图片
    resize(frame, frame, ModelSize);

    net = dnn::readNetFromONNX(ModelPath); // 网络设置
    net.setPreferableBackend(dnn::DNN_BACKEND_OPENCV);
    net.setPreferableTarget(dnn::DNN_TARGET_CPU);

    TIMER t;

    Mat blob;
    dnn::blobFromImage(frame, blob, 1.0 / 255.0, ModelSize, Scalar(), true, false);

    net.setInput(blob);

    vector<Mat> outputs;
    net.forward(outputs, net.getUnconnectedOutLayersNames());

    int rows = outputs[0].size[2];
    int dimensions = outputs[0].size[1];

    outputs[0] = outputs[0].reshape(1, dimensions);
    transpose(outputs[0], outputs[0]); // 图像转置

    float *data = (float *)outputs[0].data;

    float x_factor = frame.cols / ModelSize.width;
    float y_factor = frame.rows / ModelSize.height;

    vector<int> class_ids;
    vector<float> confidences;
    vector<Rect> boxes;

    for (int i = 0; i < rows; ++i)
    {
        float *classes_scorces = data + 4;
        Mat scores(1,classes.size(),CV_32FC1,classes_scorces);
        Point class_id;
        double maxClassScore;

        minMaxLoc(scores,0,&maxClassScore,0,&class_id);     //获取最大最小值及位置
        if(maxClassScore > modelScoreThreshold){
            confidences.push_back(maxClassScore);
            class_ids.push_back(class_id.x);

            float x = data[0];
            float y = data[1];
            float w = data[2];
            float h = data[3];

            int left = int((x-0.5*w)*x_factor);
            int top = int((y-0.5*h)*y_factor);
            int width = int(w*x_factor);
            int height = int(h*y_factor);

            boxes.push_back(Rect(left,top,width,height));
        }
        data += dimensions;
    }

    vector<int> nms_result;
    dnn::NMSBoxes(boxes,confidences,modelScoreThreshold,modelNMSThreshold,nms_result);

    vector<Detection> detections{};
    for(int i = 0; i < nms_result.size(); ++i){
        int idx = nms_result[i];

        Detection result;
        result.class_id = class_ids[idx];
        result.confidence = confidences[idx];
        random_device rd;
        mt19937 gen(rd());
        uniform_int_distribution<int> dis(100, 255);
        result.color = cv::Scalar(dis(gen),dis(gen),dis(gen));
        result.className = classes[result.class_id];
        result.box = boxes[idx];

        detections.push_back(result);
    }

    vector<Detection> output = detections;

    for(int i = 0; i < output.size(); ++i){
        Detection detection = output[i];

        Rect box = detection.box;
        Scalar color = detection.color;

        // Detection box
        rectangle(frame, box, color, 2);

        // Detection box text
        string classString = detection.className + ' ' + to_string(detection.confidence).substr(0, 4);
        Size textSize = getTextSize(classString, FONT_HERSHEY_DUPLEX, 1, 2, 0);
        Rect textBox(box.x, box.y - 40, textSize.width + 10, textSize.height + 20);

        rectangle(frame, textBox, color, FILLED);
        putText(frame, classString, Point(box.x + 5, box.y - 10), FONT_HERSHEY_DUPLEX, 1, Scalar(0, 0, 0), 2, 0);
    }
    // imshow("Inference", frame);
    // waitKey(-1);
    return 0;
}