#include "onnx_caller.h"

OnnxCaller* OnnxCaller::m_instance = nullptr;


void printTensorInfo(const Ort::Value& tensor, const std::string& name) {
    // 获取张量的形状和数据类型信息
    auto tensor_info = tensor.GetTensorTypeAndShapeInfo();
    std::vector<int64_t> shape = tensor_info.GetShape();
    ONNXTensorElementDataType data_type = tensor_info.GetElementType();

    // 打印张量名称、形状和数据类型
    std::cout << "Tensor: " << name << ", Shape: [";
    for (size_t i = 0; i < shape.size(); i++) {
        std::cout << shape[i];
        if (i < shape.size() - 1) {
            std::cout << ", ";
        }
    }
    std::cout << "], Data Type: ";

    // 打印数据类型
    switch (data_type) {
        case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT:
            std::cout << "float";
            break;
        case ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE:
            std::cout << "double";
            break;
        case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32:
            std::cout << "int32";
            break;
        case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64:
            std::cout << "int64";
            break;
        case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8:
            std::cout << "uint8";
            break;
        default:
            std::cout << "unknown";
            break;
    }
    std::cout << std::endl;
}


OnnxCaller::OnnxCaller()
{
    __load_Onnx();
}


int OnnxCaller::__load_Onnx()
{
    std::cout << "OnnxCaller::__load_Onnx()" << std::endl;
    m_pOrtEnv = std::make_unique<Ort::Env>(ORT_LOGGING_LEVEL_WARNING, "ONNXRuntime");
    //Ort::Env env(ORT_LOGGING_LEVEL_WARNING, "ONNXRuntime");
    Ort::SessionOptions session_options;
    m_pOrtSession = std::make_unique<Ort::Session>(*m_pOrtEnv, "/home/marvsmart/workspace/shoe_keypoint_infer/yolo_nas_pose_n_yes.onnx", session_options);
    //m_ortSession = Ort::Session(env, "/home/marvsmart/workspace/shoe_keypoint_infer/yolo_nas_pose_n_yes.onnx", session_options);
    std::cout << "ONNX Runtime installed and configured successfully!" << std::endl;

    return 0;
}


OnnxCaller* OnnxCaller::getInstance()
{
    if(m_instance == nullptr)
        m_instance = new OnnxCaller();

    return m_instance;
}

std::vector<double> OnnxCaller::ProcessImage(cv::Mat &image)
{
    std::cout << "OnnxCaller::ProcessImage()" << std::endl;
    // Load and preprocess the image
    cv::Mat image_resized;
    cv::resize(image, image_resized, cv::Size(640, 640));

    // std::cout << "convert img" << std::endl;
    // 将图像从 HWC 转换为 CHW，并添加批次维度
    std::vector<cv::Mat> channels(3);
    cv::split(image_resized, channels);  // 分离通道

     // 合并通道为 CHW 格式
    cv::Mat image_chw;
    cv::vconcat(channels, image_chw);  // 将通道堆叠为 CHW 格式

    // 添加批次维度 (1, C, H, W)
    std::vector<int64_t> shape = {1, 3, 640, 640};
    std::vector<int> shape_int(shape.begin(), shape.end());  // 将 int64_t 转换为 int

    // 重塑为 BCHW 格式
    cv::Mat image_bchw = image_chw.reshape(1, shape_int.size(), shape_int.data());


    // std::vector<uint8_t> image_data(image_resized.data, image_resized.data + image_resized.total() * image_resized.channels());
    std::vector<int64_t> input_shape = {1, 3, 640, 640};

    // Prepare input tensor
    Ort::MemoryInfo memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
    Ort::Value input_tensor = Ort::Value::CreateTensor<uint8_t>(memory_info, image_bchw.data, image_bchw.total(), input_shape.data(), input_shape.size());
    //Ort::Value input_tensor = Ort::Value::CreateTensor<uint8_t>(memory_info, image_data.data(), image_data.size(), input_shape.data(), input_shape.size());
    

    // Get input and output names
    Ort::AllocatorWithDefaultOptions allocator;
    std::vector<const char*> input_names;
    std::vector<const char*> output_names;
    // Get input names
    for (size_t i = 0; i < m_pOrtSession->GetInputCount(); i++) {
        input_names.push_back(m_pOrtSession->GetInputNameAllocated(i, allocator).release());
        // std::cout << "input_names pushed " << input_names[i] << std::endl;
    }

    // Get output names
    for (size_t i = 0; i < m_pOrtSession->GetOutputCount(); i++) {
        output_names.push_back(m_pOrtSession->GetOutputNameAllocated(i, allocator).release());
        // std::cout << "output_names pushed " << output_names[i] << std::endl;
    }

    

    // const char* input_namess[] = {"input"};
    // const char* output_namess[] = {"graph2_num_predictions", "graph2_post_nms_boxes", "graph2_post_nms_scores", "graph2_post_nms_joints"};

    // std::cout << "run" << std::endl;

    Ort::RunOptions run_options;
    // Run inference
    // std::vector<Ort::Value> output_tensors = m_pOrtSession->Run(run_options, input_names.data(), &input_tensor, 1, output_names.data(), output_names.size());
    //std::vector<Ort::Value> output_tensors = m_pOrtSession->Run(run_options, input_namess, &input_tensor, 1, output_namess, 4);
    std::vector<Ort::Value> output_tensors;
    int64_t* num_detections = nullptr;
    float* batch_boxes = nullptr;
    float* batch_scores = nullptr;
    float* batch_joints = nullptr;

    try {
        output_tensors = m_pOrtSession->Run(run_options, input_names.data(), &input_tensor, 1, output_names.data(), output_names.size());
        // output_tensors = m_pOrtSession->Run(run_options, input_namess, &input_tensor, 1, output_namess, 4);
    } catch (const Ort::Exception& e) {
        std::cerr << "ONNX Runtime error: " << e.what() << std::endl;
    }

    // 打印输出张量的形状
    // printTensorInfo(output_tensors[0], "num_detections");
    // printTensorInfo(output_tensors[1], "batch_boxes");
    // printTensorInfo(output_tensors[2], "batch_scores");
    // printTensorInfo(output_tensors[3], "batch_joints");

    // Extract outputs
    num_detections = output_tensors[0].GetTensorMutableData<int64_t>();
    batch_boxes = output_tensors[1].GetTensorMutableData<float>();
    batch_scores = output_tensors[2].GetTensorMutableData<float>();
    batch_joints = output_tensors[3].GetTensorMutableData<float>();

    int num_idx = num_detections[0];
    // std::cout << num_idx << std::endl;
    std::cout << "score: " << batch_scores[num_idx] << std::endl;
    if(batch_scores[num_idx] < 0.4)
    {
        return {};
    }

    double height_scale = double(image.rows) / 640;
    double width_scale = double(image.cols) / 640;
    // std::cout << "height_scale: " << height_scale << ",width_scale: " << width_scale << std::endl;
    

    // 绘制矩形
    // std::cout << "x1: " << batch_boxes[num_idx * 4 + 0]  << ", y1: " << batch_boxes[num_idx * 4 + 1] << ", x2: " << batch_boxes[num_idx * 4 + 2] << ", y2: " << batch_boxes[num_idx * 4 + 3] << std::endl;
    int x1 = int(batch_boxes[num_idx * 4 + 0] * width_scale);
    int y1 = int(batch_boxes[num_idx * 4 + 1] * height_scale);
    int x2 = int(batch_boxes[num_idx * 4 + 2] * width_scale);
    int y2 = int(batch_boxes[num_idx * 4 + 3] * height_scale);
    // std::cout << "x1: " << x1 << ", y1: " << y1 << ", x2: " << x2 << ", y2: " << y2 << std::endl;
    cv::rectangle(image,
                  cv::Point(x1, y1), cv::Point(x2, y2),
                  cv::Scalar(0, 255, 0), 2);

    // 绘制第一个关键点
    cv::circle(image,
               cv::Point(int(batch_joints[num_idx * 6 + 0] * width_scale), 
                           int(batch_joints[num_idx * 6 + 1] * height_scale)),
               3, cv::Scalar(0, 0, 255), 3);

    // 绘制第二个关键点
    cv::circle(image,
               cv::Point(int(batch_joints[num_idx * 6 + 3] * width_scale), 
                           int(batch_joints[num_idx * 6 + 4] * height_scale)),
               3, cv::Scalar(0, 0, 255), 3);

    //cv::imwrite("r0.jpg", image);

    return {};
}

