#include "onnxruntime_cxx_api.h"
#include "cpu_provider_factory.h"
#include <opencv2/opencv.hpp>
#include <fstream>

int main(int argc, char **argv)
{
    float x_factor = 0.0;
    float y_factor = 0.0;

    cv::RNG rng;
    std::vector<cv::Scalar> color_tables;
    for (int i = 0; i < 5; i++)
    {
        int a = rng.uniform(0, 255);
        int b = rng.uniform(0, 255);
        int c = rng.uniform(0, 255);
        color_tables.push_back(cv::Scalar(a, b, c));
    }

    cv::Mat frame = cv::imread("D:/bird_test/mytest.jpg");
    // 创建InferSession, 查询支持硬件设备
    // GPU Mode, 0 - gpu device id
    std::string onnxpath = "D:/projects/yolov6n_face.onnx";
    std::wstring modelPath = std::wstring(onnxpath.begin(), onnxpath.end());
    Ort::SessionOptions session_options;
    Ort::Env env = Ort::Env(ORT_LOGGING_LEVEL_ERROR, "face-landmark-onnx");

    session_options.SetGraphOptimizationLevel(ORT_ENABLE_BASIC);
    std::cout << "onnxruntime inference try to use GPU Device" << std::endl;
    // OrtSessionOptionsAppendExecutionProvider_CUDA(session_options, 0);
    OrtSessionOptionsAppendExecutionProvider_CPU(session_options, 0);
    Ort::Session session_(env, onnxpath.c_str(), session_options);

    // get input and output info
    int input_nodes_num = session_.GetInputCount();
    int output_nodes_num = session_.GetOutputCount();
    std::vector<std::string> input_node_names;
    std::vector<std::string> output_node_names;
    Ort::AllocatorWithDefaultOptions allocator;
    int input_h = 0;
    int input_w = 0;

    // query input data format
    for (int i = 0; i < input_nodes_num; i++)
    {
        auto input_name = session_.GetInputNameAllocated(i, allocator);
        input_node_names.push_back(input_name.get());
        auto inputShapeInfo = session_.GetInputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape();
        int ch = inputShapeInfo[1];
        input_h = inputShapeInfo[2];
        input_w = inputShapeInfo[3];
        std::cout << "input format: " << ch << "x" << input_h << "x" << input_w << std::endl;
    }

    // query output data format
    int out_h = 0; // 16
    int out_w = 0; // 8400
    for (int i = 0; i < output_nodes_num; i++)
    {
        auto output_name = session_.GetOutputNameAllocated(i, allocator);
        output_node_names.push_back(output_name.get());
        auto outShapeInfo = session_.GetOutputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape();
        out_h = outShapeInfo[1];
        out_w = outShapeInfo[2];
        std::cout << "output format: " << out_h << "x" << out_w << std::endl;
    }

    // 图象预处理 - 格式化操作
    int64 start = cv::getTickCount();
    int w = frame.cols;
    int h = frame.rows;
    int _max = std::max(h, w);
    cv::Mat image = cv::Mat::zeros(cv::Size(_max, _max), CV_8UC3);
    cv::Rect roi(0, 0, w, h);
    frame.copyTo(image(roi));
    x_factor = image.cols / static_cast<float>(640);
    y_factor = image.rows / static_cast<float>(640);
    cv::Mat m1 = cv::Mat::zeros(cv::Size(2, 5), CV_32FC1);
    for (int i = 0; i < 5; i++)
    {
        m1.at<float>(i, 0) = x_factor;
        m1.at<float>(i, 1) = y_factor;
    }

    cv::Mat blob = cv::dnn::blobFromImage(image, 1.0 / 255.0, cv::Size(input_w, input_h), cv::Scalar(0, 0, 0), true, false);
    size_t tpixels = input_h * input_w * 3;
    std::array<int64_t, 4> input_shape_info{1, 3, input_h, input_w};

    // set input data and inference
    auto allocator_info = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
    Ort::Value input_tensor_ = Ort::Value::CreateTensor<float>(allocator_info, blob.ptr<float>(), tpixels, input_shape_info.data(), input_shape_info.size());
    const std::array<const char *, 1> inputNames = {input_node_names[0].c_str()};
    const std::array<const char *, 1> outNames = {output_node_names[0].c_str()};

    std::vector<Ort::Value> ort_outputs;
    try
    {
        ort_outputs = session_.Run(Ort::RunOptions{nullptr}, inputNames.data(), &input_tensor_, 1, outNames.data(), outNames.size());
    }
    catch (std::exception e)
    {
        std::cout << e.what() << std::endl;
    }
    // 56x84
    const float *pdata = ort_outputs[0].GetTensorMutableData<float>();

    // 后处理, 1x8400x16,  box , 80- min/max
    std::vector<cv::Rect> boxes;
    std::vector<cv::Mat> multiple_kypts;
    std::vector<float> confidences;
    cv::Mat det_output(out_h, out_w, CV_32F, (float *)pdata);

    for (int i = 0; i < det_output.rows; i++)
    {
        double score = det_output.at<float>(i, 15);
        double conf = det_output.at<float>(i, 14);
        // 置信度 0～1之间
        if (score > 0.5 && conf > 0.5)
        {
            float cx = det_output.at<float>(i, 0);
            float cy = det_output.at<float>(i, 1);
            float ow = det_output.at<float>(i, 2);
            float oh = det_output.at<float>(i, 3);
            int x = static_cast<int>((cx - 0.5 * ow) * x_factor);
            int y = static_cast<int>((cy - 0.5 * oh) * y_factor);
            int width = static_cast<int>(ow * x_factor);
            int height = static_cast<int>(oh * y_factor);
            cv::Rect box;
            box.x = x;
            box.y = y;
            box.width = width;
            box.height = height;

            boxes.push_back(box);
            confidences.push_back(score);
            cv::Mat pts = det_output.row(i).colRange(4, 14);
            multiple_kypts.push_back(pts);
        }
    }

    // NMS
    std::vector<int> indexes;
    cv::dnn::NMSBoxes(boxes, confidences, 0.25, 0.45, indexes);

    // show boxes with objects
    // frame = cv::Mat::zeros(frame.size(), frame.type());
    for (size_t i = 0; i < indexes.size(); i++)
    {
        int idx = indexes[i];
        cv::rectangle(frame, boxes[idx], cv::Scalar(0, 0, 255), 2, 8, 0);
        putText(frame, "face", boxes[idx].tl(), cv::FONT_HERSHEY_PLAIN, 1.0, cv::Scalar(255, 0, 0), 1, 8);
        cv::Mat one_kypts = multiple_kypts[idx];
        std::cout << one_kypts << std::endl;
        cv::Mat m2 = one_kypts.reshape(0, 5);
        cv::Mat kpts; // 5x2
        cv::multiply(m2, m1, kpts);

        for (int row = 0; row < kpts.rows; row++)
        {
            int x = static_cast<int>(kpts.at<float>(row, 0));
            int y = static_cast<int>(kpts.at<float>(row, 1));
            cv::circle(frame, cv::Size(x, y), 3, cv::Scalar(255, 0, 255), 4, 8, 0);
        }
    }
    // 计算FPS render it
    float t = (cv::getTickCount() - start) / static_cast<float>(cv::getTickFrequency());
    putText(frame, cv::format("FPS: %.2f", 1.0 / t), cv::Point(20, 40), cv::FONT_HERSHEY_PLAIN, 2.0, cv::Scalar(255, 0, 0), 2, 8);

    cv::imshow("ONNXRUNTIME1.13 + Face Landmark 推理演示", frame);
    cv::waitKey(0);

    // relase resource
    session_options.release();
    session_.release();
    return 0;
}