#include "dualdetect.h"
#include "errordialogmanager.h"
#include "source/aiimageutils.h"
#include "source/logger.h"
#include <iostream>
#include <vector>
#include <algorithm>
#include <numeric>
#include <opencv2/opencv.hpp>

DualDetect::DualDetect(std::string labelNameTxTPath_, int objClassNum_)
{
    input_num = 0;
    objThresh = 0.25;
    nmsThresh = 0.45;
    modelDataSize = 0;
    modelLoaded = false;
    memset(&rknn_app_ctx, 0, sizeof(rknn_app_context_t));

    aiPostProcess = new AiPostProcess(objNumbMaxSize, nmsThresh, objThresh,objClassNum);


    if (aiPostProcess->initPostProcess(labelNameTxTPath_ , objClassNum_) != 0) {
        labelNameTxTPath = labelNameTxTPath_;
        LogError << "Failed to initialize post-process with labels from: " << labelNameTxTPath;
        delete aiPostProcess;
    }
    //qInfo() << "rknn_run Successful! : " << aiPostProcess->objClassNum;
}

DualDetect::~DualDetect()
{
    LogDebug  << "Attempting to rknn_destroy DualDetect";
    if (modelLoaded) {
        rknn_destroy_mem(rknn_app_ctx.rknn_ctx, input_mems[0]);
        for (uint32_t i = 0; i < 9; ++i) {
            rknn_destroy_mem(rknn_app_ctx.rknn_ctx, output_mems[i]);
        }
        rknn_destroy(rknn_app_ctx.rknn_ctx);

        delete aiPostProcess;
        aiPostProcess = nullptr;
        if (modelData) {
            free(modelData);
        }
        modelLoaded = false;
    }
}

bool DualDetect::loadModel(const QString &modelPath)
{

     // qInfo() << "rknn_run Successful!12 : " << aiPostProcess->objClassNum;
    LogDebug << "Attempting to DualDetect::loadModel " << modelPath;
    rknn_context ctx = 0;

    FILE *fp = fopen(modelPath.toStdString().c_str(), "rb");
    if (fp == nullptr) {
        ErrorDialogManager::instance().showNonBlockingError("载入模型错误", "文件不存在....");
        LogWarning << "Open file" << modelPath << "failed.";
        return false;
    }

    fseek(fp, 0, SEEK_END);
    modelDataSize = ftell(fp);
    fseek(fp, 0, SEEK_SET);

    //载入模型到内存
    modelData = (unsigned char*)malloc(modelDataSize);
    if (modelData == nullptr) {
        ErrorDialogManager::instance().showNonBlockingError("载入模型错误", "内存分配错误");
        LogWarning << "Malloc model data failed.";
        fclose(fp);
        return false;
    }
    fread(modelData, 1, modelDataSize, fp);
    fclose(fp);
    LogDebug << "read loadModel Successfully modelDataSize:" << modelDataSize;



    //初始化模型
    int ret = rknn_init(&ctx, modelData, modelDataSize, 0, nullptr);
    if (ret < 0) {
        ErrorDialogManager::instance().showNonBlockingError("载入模型错误", "模型无法初始化检查文件有效性....");
        LogWarning << "rknn_init error :" << ret;
        free(modelData);
        return false;
    }

    //获取SDK版本号

    ret = rknn_query(ctx, RKNN_QUERY_SDK_VERSION, &version, sizeof(rknn_sdk_version));
    if (ret < 0) {
        ErrorDialogManager::instance().showNonBlockingError("载入模型错误", "模型无法初始化，失败，检查文件有效性....");
        LogWarning << "rknn_init error ret: " << ret;
        return -1;
    }
    LogDebug << "sdk version: "<< version.api_version << " driver version: "<< version.drv_version;

    rknn_input_output_num ioNum;
    ret = rknn_query(ctx, RKNN_QUERY_IN_OUT_NUM, &ioNum, sizeof(ioNum));
    if (ret != 0) {
        ErrorDialogManager::instance().showNonBlockingError("载入模型错误", "模型输入输出存在问题....");
        LogWarning << "rknn_query RKNN_QUERY_IN_OUT_NUM error" << ret;
        return -1;
    }
    LogDebug << "model input num: "<< ioNum.n_input << " output num: "<< ioNum.n_output;

    rknn_tensor_attr inputAttrs[ioNum.n_input];
    memset(inputAttrs, 0, sizeof(inputAttrs));
    for (int i = 0; i < ioNum.n_input; i++) {
        inputAttrs[i].index = i;
        ret = rknn_query(ctx, RKNN_QUERY_INPUT_ATTR, &(inputAttrs[i]), sizeof(rknn_tensor_attr));
        if (ret != 0) {
            ErrorDialogManager::instance().showNonBlockingError("载入模型错误", "无法读取输入输出....");
            LogWarning << "rknn_query RKNN_QUERY_INPUT_ATTR error" << ret;
            return false;
        }
        dump_tensor_attr(&(inputAttrs[i]));
    }


    rknn_tensor_attr outputAttrs[ioNum.n_output];
    memset(outputAttrs, 0, sizeof(outputAttrs));
    for (int i = 0; i < ioNum.n_output; i++) {
        outputAttrs[i].index = i;
        ret = rknn_query(ctx, RKNN_QUERY_OUTPUT_ATTR, &(outputAttrs[i]), sizeof(rknn_tensor_attr));
        if (ret != 0) {
            ErrorDialogManager::instance().showNonBlockingError("载入模型错误", "模型输出存在问题....");
            LogWarning << "rknn_query RKNN_QUERY_OUTPUT_ATTR error" << ret;
            return false;
        }
        dump_tensor_attr(&(outputAttrs[i]));
    }

    rknn_tensor_attr output_attrs[ioNum.n_output];
    memset(output_attrs, 0, sizeof(output_attrs));
    for (int i = 0; i < ioNum.n_output; i++) {
        output_attrs[i].index = i;
        ret                   = rknn_query(ctx, RKNN_QUERY_OUTPUT_ATTR, &(output_attrs[i]), sizeof(rknn_tensor_attr));
        //dump_tensor_attr(&(output_attrs[i]));
    }

    rknn_app_ctx.rknn_ctx = ctx;

    //TODO
    if (output_attrs[0].qnt_type == RKNN_TENSOR_QNT_AFFINE_ASYMMETRIC && output_attrs[0].type == RKNN_TENSOR_INT8)
    {
        rknn_app_ctx.is_quant = true;
    }
    else
    {
        rknn_app_ctx.is_quant = false;
    }

    rknn_app_ctx.io_num = ioNum;
    rknn_app_ctx.input_attrs = (rknn_tensor_attr *)malloc(ioNum.n_input * sizeof(rknn_tensor_attr));
    memcpy(rknn_app_ctx.input_attrs, inputAttrs, ioNum.n_input * sizeof(rknn_tensor_attr));
    rknn_app_ctx.output_attrs = (rknn_tensor_attr *)malloc(ioNum.n_output * sizeof(rknn_tensor_attr));
    memcpy(rknn_app_ctx.output_attrs, output_attrs, ioNum.n_output * sizeof(rknn_tensor_attr));


    rknn_custom_string custom_string;
    ret = rknn_query(ctx, RKNN_QUERY_CUSTOM_STRING, &custom_string, sizeof(custom_string));
    if (ret != RKNN_SUCC) {
        printf("rknn_query fail! ret=%d\n", ret);
        return -1;
    }
    printf("custom string: %s\n", custom_string.string);

    rknn_tensor_type   input_type   = RKNN_TENSOR_UINT8;
    rknn_tensor_format input_layout = RKNN_TENSOR_NHWC;

    rknn_app_ctx.rknn_ctx = ctx;

    //TODO
    if (rknn_app_ctx.output_attrs[0].qnt_type == RKNN_TENSOR_QNT_AFFINE_ASYMMETRIC && rknn_app_ctx.output_attrs[0].type == RKNN_TENSOR_INT8)
    {
        rknn_app_ctx.is_quant = true;
    }
    else
    {
        rknn_app_ctx.is_quant = false;
    }

    rknn_app_ctx.input_attrs[0].type = RKNN_TENSOR_UINT8;
    rknn_app_ctx.input_attrs[0].fmt = RKNN_TENSOR_NHWC;
    input_mems[0] = rknn_create_mem(ctx, rknn_app_ctx.input_attrs[0].size_with_stride);
    if (input_mems[0] == nullptr) {
        LogError << "rknn_create_mem fail for output tensor ";
        ErrorDialogManager::instance().showNonBlockingError("分配输出内存错误", "无法为输出张量分配内存。");
        return false;
    }

    if (rknn_app_ctx.input_attrs[0].fmt == RKNN_TENSOR_NCHW)
    {
        printf("model is NCHW input fmt\n");
        rknn_app_ctx.model_channel = rknn_app_ctx.input_attrs[0].dims[1];
        rknn_app_ctx.model_height = rknn_app_ctx.input_attrs[0].dims[2];
        rknn_app_ctx.model_width = rknn_app_ctx.input_attrs[0].dims[3];
    }
    else
    {
        printf("model is NHWC input fmt\n");
        rknn_app_ctx.model_height = rknn_app_ctx.input_attrs[0].dims[1];
        rknn_app_ctx.model_width = rknn_app_ctx.input_attrs[0].dims[2];
        rknn_app_ctx.model_channel = rknn_app_ctx.input_attrs[0].dims[3];
    }


    rknn_app_ctx.input_attrs[0].type = input_type;
    rknn_app_ctx.input_attrs[0].fmt = input_layout;
    input_mems[0] = rknn_create_mem(ctx, rknn_app_ctx.input_attrs[0].size_with_stride);


    for (uint32_t i = 0; i < rknn_app_ctx.io_num.n_output; ++i) {
        // default output type is depend on model, this require float32 to compute top5
        // allocate float32 output tensor
        int output_size = rknn_app_ctx.output_attrs[i].size_with_stride;
        output_mems[i]  = rknn_create_mem(ctx, output_size);
    }
     //qInfo() << "rknn_run Successful! : " << aiPostProcess->objClassNum;

    ret = rknn_set_io_mem(ctx, input_mems[0], &rknn_app_ctx.input_attrs[0]);
    if (ret < 0) {
        printf("rknn_set_io_mem fail! ret=%d\n", ret);
        return -1;
    }

    for (uint32_t i = 0; i < rknn_app_ctx.io_num.n_output; ++i) {
        // default output type is depend on model, this require float32 to compute top5
        rknn_app_ctx.output_attrs[i].type = RKNN_TENSOR_INT8;
        // set output memory and attribute
        ret = rknn_set_io_mem(ctx, output_mems[i], &rknn_app_ctx.output_attrs[i]);
        if (ret < 0) {
            printf("rknn_set_io_mem fail! ret=%d\n", ret);
            return -1;
        }
    }

    modelLoaded = true;
    LogDebug << "loadModel Successed ";
    //qInfo() << "rknn_run Successful! : " << aiPostProcess->objClassNum;
    return true;
}


void DualDetect::dump_tensor_attr(const rknn_tensor_attr* attr) const
{
    std::string shape_str = attr->n_dims < 1 ? "" : std::to_string(attr->dims[0]);
    for (int i = 1; i < attr->n_dims; ++i) {
        shape_str += ", " + std::to_string(attr->dims[i]);
    }

    LogDebug << "  index=" << attr->index
             << ", name=" << attr->name
             << ", n_dims=" << attr->n_dims
             << ", dims=[" << shape_str.c_str() << "]"
             << ", n_elems=" << attr->n_elems
             << ", size=" << attr->size
             << ", w_stride=" << attr->w_stride
             << ", size_with_stride=" << attr->size_with_stride
             << ", fmt=" << get_format_string(attr->fmt)
             << ", type=" << get_type_string(attr->type)
             << ", qnt_type=" << get_qnt_type_string(attr->qnt_type)
             << ", zp=" << attr->zp
             << ", scale=" << attr->scale;

}

cv::Mat DualDetect::letterbox_image(const cv::Mat &image, int target_width, int target_height, const cv::Scalar &bg_color, letterbox_t &letter_box) {
    int original_width = image.cols;
    int original_height = image.rows;
    float aspect_ratio = static_cast<float>(original_width) / static_cast<float>(original_height);

    int new_width, new_height;
    if (static_cast<float>(target_width) / static_cast<float>(target_height) > aspect_ratio) {
        new_height = target_height;
        new_width = static_cast<int>(aspect_ratio * target_height);
    } else {
        new_width = target_width;
        new_height = static_cast<int>(target_width / aspect_ratio);
    }

    cv::Mat resized_image;
    cv::resize(image, resized_image, cv::Size(new_width, new_height));

    letter_box.x_pad = (target_width - new_width) / 2;
    letter_box.y_pad = (target_height - new_height) / 2;
    letter_box.scale = static_cast<float>(new_width) / original_width;

    cv::Mat letterbox_image(target_height, target_width, image.type(), bg_color);
    resized_image.copyTo(letterbox_image(cv::Rect(letter_box.x_pad, letter_box.y_pad, new_width, new_height)));

    return letterbox_image;
}

cv::Mat DualDetect::cropToMultipleOf16(const cv::Mat& image) {
    // 获取图像的当前尺寸
    int height = image.rows;
    int width = image.cols;

    // 计算裁剪后的宽高，使其为16的倍数
    int new_width = (width / 16) * 16;
    int new_height = (height / 16) * 16;

    // 裁剪图像
    cv::Rect cropRegion(0, 0, new_width, new_height);
    cv::Mat croppedImage = image(cropRegion);
    printf("Cropped dimensions: %dx%d\n", new_width, new_height);
    return croppedImage;
}

bool DualDetect::preprocess(const cv::Mat &baseImage,const cv::Mat &detecImage,
                            image_buffer_t &dst_detect, image_buffer_t &dst_base)
{
#if 0
    cv::Mat baseImage_ = cropToMultipleOf16(baseImage);
    cv::Mat detecImage_ = cropToMultipleOf16(detecImage);

    image_buffer_t detect_image;
    detect_image.width = detecImage_.cols;
    detect_image.height = detecImage_.rows;
    detect_image.format = IMAGE_FORMAT_RGB888;
    detect_image.virt_addr = (unsigned char*)detecImage_.data;

    image_buffer_t base_image;
    base_image.width = baseImage_.cols;
    base_image.height = baseImage_.rows;
    base_image.format = IMAGE_FORMAT_RGB888;
    base_image.virt_addr = (unsigned char*)baseImage_.data;

    memset(&dst_detect, 0, sizeof(image_buffer_t));
    memset(&dst_base, 0, sizeof(image_buffer_t));

    dst_detect.width = rknn_app_ctx.model_width;
    dst_detect.height = rknn_app_ctx.model_height;
    dst_detect.format = IMAGE_FORMAT_RGB888;
    dst_detect.size = get_image_size(&dst_detect);// 3 bytes per pixel for RGB888

    dst_base.width = rknn_app_ctx.model_width;
    dst_base.height = rknn_app_ctx.model_height;
    dst_base.format = IMAGE_FORMAT_RGB888;
    dst_base.size =  get_image_size(&dst_base);

    dst_base.virt_addr = (unsigned char *)malloc(dst_base.size);
    if (dst_base.virt_addr == NULL) {
        LogError << "malloc buffer size: " << dst_base.size << " fail!" ;
        return false;
    }

    dst_detect.virt_addr = (unsigned char *)malloc(dst_detect.size);
    if (dst_detect.virt_addr == NULL) {
        LogError << "malloc buffer size: " << dst_detect.size << " fail!" ;
        return false;
    }

    letter_box = {0, 0, 0};
    cv::Mat base_resized = letterbox_image(baseImage_, rknn_app_ctx.model_width, rknn_app_ctx.model_height, cv::Scalar(bg_color, bg_color, bg_color), letter_box);
    dst_base.virt_addr = base_resized.data;
    std::memcpy(dst_base.virt_addr, base_resized.data, dst_base.size);
    //cv::imwrite("./base.jpg", base_resized);
    letter_box = {0, 0, 0};
    cv::Mat detect_resized = letterbox_image(detecImage_, rknn_app_ctx.model_width, rknn_app_ctx.model_height, cv::Scalar(bg_color, bg_color, bg_color), letter_box);
    dst_detect.virt_addr = detect_resized.data;
    std::memcpy(dst_detect.virt_addr, detect_resized.data, dst_detect.size);
    //cv::imwrite("./detect.jpg", detect_resized);
    LogDebug << "preprocess base_resized: " << base_resized.size;
    LogDebug << "preprocess detect_resized: " << base_resized.size;
    LogDebug << "AI preprocess11 successful!" ;
    return true;

#else
    // qInfo() << "rknn_run Successful! 1: " << aiPostProcess->objClassNum;
    cv::Mat testimage,baseimage;
    image_buffer_t detect_image;
    image_buffer_t base_image;
    memset(&detect_image, 0, sizeof(image_buffer_t));
    memset(&base_image, 0, sizeof(image_buffer_t));

    cv::Mat baseImage_ = cropToMultipleOf16(baseImage);
    cv::Mat detecImage_ = cropToMultipleOf16(detecImage);

    baseimage = baseImage_ ;
    testimage = detecImage_ ;
    // cv::cvtColor(baseImage_, baseimage, cv::COLOR_BGR2RGB);
    // cv::cvtColor(detecImage_, testimage, cv::COLOR_BGR2RGB);

    LogDebug << "base_image[20]" << (int)baseimage.data[20];
    LogDebug << "base_image[2000]" << (int)baseimage.data[2000];
    LogDebug << "base_image[20000]" << (int)baseimage.data[20000];

    detect_image.width  = testimage.cols;
    detect_image.height = testimage.rows;
    detect_image.format = IMAGE_FORMAT_RGB888;
    detect_image.virt_addr = (unsigned char*)testimage.data;


    base_image.width  = baseimage.cols;
    base_image.height = baseimage.rows;
    base_image.format = IMAGE_FORMAT_RGB888;
    base_image.virt_addr = (unsigned char*)baseimage.data;


    memset(&dst_detect, 0, sizeof(image_buffer_t));
    memset(&dst_base, 0, sizeof(image_buffer_t));

    dst_detect.width = rknn_app_ctx.model_width;
    dst_detect.height = rknn_app_ctx.model_height;
    dst_detect.format = IMAGE_FORMAT_RGB888;
    dst_detect.size = get_image_size(&dst_detect);

    dst_base.width = rknn_app_ctx.model_width;
    dst_base.height = rknn_app_ctx.model_height;
    dst_base.format = IMAGE_FORMAT_RGB888;
    dst_base.size = get_image_size(&dst_base);

    dst_base.virt_addr = (unsigned char *)malloc(dst_base.size);
    if (dst_base.virt_addr == NULL)
    {
        LogError << "malloc buffer size:%d fail!\n" << dst_base.size;
        return -1;
    }

    dst_detect.virt_addr = (unsigned char *)malloc(dst_detect.size);
    if (dst_detect.virt_addr == NULL)
    {
        LogError << "malloc buffer size:%d fail!\n" << dst_detect.size;
        return -1;
    }

    LogDebug << "base_image.fd:" << base_image.fd;
    int ret = convert_image_with_letterbox(&base_image, &dst_base, &letter_box, bg_color);
    if (ret < 0)
    {
        printf("convert_image_with_letterbox fail! ret=%d\n", ret);
        return -1;
    }
    memset(&letter_box, 0, sizeof(letterbox_t));
    ret = convert_image_with_letterbox(&detect_image, &dst_detect, &letter_box, bg_color);
    if (ret < 0)
    {
        printf("convert_image_with_letterbox fail! ret=%d\n", ret);
        return -1;
    }

    LogDebug << "AI preprocess successful !";
    return 0;
#endif

}

letterbox_t DualDetect::preprocessImage(const cv::Mat& inputImage, cv::Mat& outputImage, int weight) {
    // qInfo() << "rknn_run Successful! 2: " << aiPostProcess->objClassNum;
    int originalWidth = inputImage.cols;
    int originalHeight = inputImage.rows;
    float scale = std::min(static_cast<float>(weight) / originalWidth, static_cast<float>(weight) / originalHeight);

    int newWidth = static_cast<int>(originalWidth * scale);
    int newHeight = static_cast<int>(originalHeight * scale);

    int x_pad = (weight - newWidth) / 2;
    int y_pad = (weight - newHeight) / 2;

    // 调整图像大小
    cv::Mat resizedImage;
    cv::resize(inputImage, resizedImage, cv::Size(newWidth, newHeight));

    // 创建一个新的填充图像，初始化为127
    cv::Mat paddedImage = cv::Mat::ones(weight, weight, inputImage.type()) * 127;

    // 将调整大小后的图像复制到填充图像的中心位置
    resizedImage.copyTo(paddedImage(cv::Rect(x_pad, y_pad, newWidth, newHeight)));

    // 将结果赋值给输出图像
    outputImage = paddedImage;

    // 返回填充和缩放信息
    letterbox_t letterBox;
    letterBox.x_pad = x_pad;
    letterBox.y_pad = y_pad;
    letterBox.scale = scale;

    return letterBox;
}


int DualDetect::detect(const cv::Mat &baseImage,const cv::Mat &detecImage,object_detect_result_list *od_results)
{
#if 1
    memset(od_results, 0x00, sizeof(*od_results));
    if(!aiPostProcess)
    {
        LogWarning << "aiPostProcess is Null";
        return -1;
    }
    if (baseImage.empty() || detecImage.empty())
    {
        LogWarning << "AI detetc input image is NUll";
        return -1;
    }

    // image_buffer_t dst_detect;
    // image_buffer_t dst_base;
    // if(preprocess(baseImage, detecImage,dst_detect,dst_base) != 0 )
    // {
    //     LogDebug <<"preprocess error" ;
    //     return -1;
    // }
    // LogDebug <<"preprocess go " ;

    // rknn_input inputs[rknn_app_ctx.io_num.n_input];
    // rknn_output outputs[rknn_app_ctx.io_num.n_output];
    // memset(inputs, 0, sizeof(inputs));
    // memset(outputs, 0, sizeof(outputs));

    // inputs[0].index = 0;
    // inputs[0].type = RKNN_TENSOR_UINT8;
    // inputs[0].fmt = RKNN_TENSOR_NHWC;
    // inputs[0].size = rknn_app_ctx.model_width * rknn_app_ctx.model_height *rknn_app_ctx.model_channel;
    // inputs[0].buf = dst_detect.virt_addr;

    // if(input_num > 1){
    //     inputs[1].index = 1;
    //     inputs[1].type = RKNN_TENSOR_UINT8;
    //     inputs[1].fmt = RKNN_TENSOR_NHWC;
    //     inputs[1].size = rknn_app_ctx.model_width * rknn_app_ctx.model_height * rknn_app_ctx.model_channel;
    //     inputs[1].buf = dst_detect.virt_addr;
    // }
    LogTrack <<"rknn_inputs_set go " ;

    // uint8_t* buffer = static_cast<uint8_t*>(inputs[0].buf);

    // for(int i = 300000; i < 500000; i++)
    // {
    //     printf("%d ", buffer[i]);
    // }


    int ret = 0;
    {
        std::lock_guard<std::mutex> lock(mutexRKNN_);
        cv::Mat resized_image;
        int weight = rknn_app_ctx.input_attrs[0].dims[2];

        letterbox_t letterBox = preprocessImage(detecImage, resized_image, weight);
        resized_image.convertTo(resized_image, CV_8UC3);
        memcpy(input_mems[0]->virt_addr, resized_image.data,  weight * rknn_app_ctx.input_attrs[0].dims[1] * rknn_app_ctx.input_attrs[0].dims[3]);

        LogTrack <<"rknn_run" ;
        // qInfo() << "rknn_run Successful! 3: " << aiPostProcess->objClassNum;
        ret = rknn_run(rknn_app_ctx.rknn_ctx, nullptr);
        if (ret < 0)
        {
            LogWarning << "rknn_run fail! ret=" << ret;
            return -1;
        }
        //LogTrack <<"rknn_run Successful! : " << aiPostProcess->objClassNum ;
        //qInfo() << "rknn_run Successful! 4: " << aiPostProcess->objClassNum;
        aiPostProcess->postProcess(&rknn_app_ctx, output_mems, &letterBox, objThresh, nmsThresh, od_results);
        //rknn_output *outputs;
        // ret = rknn_outputs_get(rknn_app_ctx.rknn_ctx, rknn_app_ctx.io_num.n_output, outputs, NULL);
        // if (ret < 0)
        // {
        //     LogWarning << "rknn_outputs_get fail! ret=" << ret;
        //     return -1;
        // }
    }
    LogTrack <<"rknn_outputs_get Successful!" ;


    return ret;
#else
    std::vector<cv::Rect> detectedBoxes;
    if (!modelLoaded) {
        LogWarning << "Model is not loaded."int convert_image_with_letterbox(image_buffer_t* src_image, image_buffer_t* dst_image, letterbox_t* letterbox, char color)；

            // AIIMAGEUTILS_H;
            return detectedBoxes;
    }

    cv::Mat resizedBaseImage,resizedDetecImage;
    preprocess(baseImage, resizedBaseImage);
    preprocess(detecImage, resizedDetecImage);

    rknn_input inputs[2];
    memset(inputs, 0, sizeof(inputs));
    inputs[0].index = 0;
    inputs[0].type = RKNN_TENSOR_UINT8;
    inputs[0].size = modelInputWidth * modelInputHeight * imgChannel;
    inputs[0].fmt = RKNN_TENSOR_NHWC;
    inputs[0].buf = resizedBaseImage.data;

    inputs[1].index = 1;
    inputs[1].type = RKNN_TENSOR_UINT8;
    inputs[1].size = modelInputWidth * modelInputHeight * imgChannel;
    inputs[1].fmt = RKNN_TENSOR_NHWC;
    inputs[1].buf = resizedDetecImage.data;

    int ret = rknn_inputs_set(ctx, ioNum.n_input, inputs);
    if (ret < 0) {
        LogWarning << "rknn_inputs_set error" << ret;
        return detectedBoxes;
    }

    ret = rknn_run(ctx, nullptr);RKNN_TENSOR_FLOAT32
    if (ret < 0) {
        LogWarning << "rknn_run error" << ret;
        return detectedBoxes;
    }

    rknn_output outputs[ioNum.n_output];
    memset(outputs, 0, sizeof(outputs));
    for (int i = 0; i < ioNum.n_output; i++) {
        outputs[i].want_float = 0;
    }

    ret = rknn_outputs_get(ctx, ioNum.n_output, outputs, nullptr);
    if (ret < 0) {
        LogWarning << "rknn_outputs_get error" << ret;
        return detectedBoxes;
    }



    LogDebug << "detectedBoxes:" << detectedBoxes ;
    // detect_result_group_t detect_result_group;
    // std::vector<float> out_scales = {1.0, 1.0, 1.0};
    // std::vector<int32_t> out_zps = {0, 0, 0};
    // post_process((int8_t*)outputs[0].buf, (int8_t*)outputs[1].buf, (int8_t*)outputs[2].buf, modelInputHeight, modelInputWidth,
    //              boxConfThreshold, nmsThreshold, 1.0, 1.0, out_zps, out_scales, &detect_result_group);

    // for (int i = 0; i < detect_result_group.count; i++) {
    //     detect_result_t* det_result = &(detect_result_group.results[i]);
    //     cv::Rect box;
    //     box.x = det_result->box.left;
    //     box.y = det_result->box.top;
    //     box.width = det_result->box.right - det_result->box.left;
    //     box.height = det_result->box.bottom - det_result->box.top;
    //     detectedBoxes.push_back(box);
    // }

    rknn_outputs_release(ctx, 3, outputs);


    return detectedBoxes;
#endif
}

void DualDetect::setobjThresh(float objThresh_)
{
    LogDebug << "DualDetect AI objThresh:" << objThresh;
    objThresh = objThresh_;
    if(aiPostProcess)
    {
        aiPostProcess->setboxThresh(objThresh);
    }
    LogDebug << "AI objThresh:" << objThresh;
}

void DualDetect::setnmsThresh(float nmsThresh_)
{
    nmsThresh = nmsThresh_;
    if(aiPostProcess)
        aiPostProcess->setnmsThresh(nmsThresh);
    LogDebug << "AI setnmsThresh:" << nmsThresh;

}

char *DualDetect::getCocoClsToName(int &cls_id)
{
    if(cls_id >= objClassNum)
        cls_id = objClassNum - 1;
    if(!aiPostProcess)
    {
        LogWarning << "getCocoClsToName Fail";
        return "null";
    }
    return aiPostProcess->cocoClsToName(cls_id);
}
/*

// 过滤框函数
std::tuple<std::vector<DualDetect::Box>, std::vector<int>, std::vector<float> > DualDetect::filter_boxes(const std::vector<Box> &boxes, const std::vector<float> &box_confidences, const std::vector<std::vector<float> > &box_class_probs)
{
    std::vector<float> class_max_score;
    std::vector<int> classes;
    std::vector<float> scores;

    for (size_t i = 0; i < box_class_probs.size(); ++i) {
        auto max_it = std::max_element(box_class_probs[i].begin(), box_class_probs[i].end());
        class_max_score.push_back(*max_it);
        classes.push_back(std::distance(box_class_probs[i].begin(), max_it));
    }

    std::vector<int> _class_pos;
    for (size_t i = 0; i < class_max_score.size(); ++i) {
        if (class_max_score[i] * box_confidences[i] >= objThresh) {
            _class_pos.push_back(i);
            scores.push_back(class_max_score[i] * box_confidences[i]);
        }
    }

    std::vector<Box> filtered_boxes;
    std::vector<int> filtered_classes;

    for (auto idx : _class_pos) {
        filtered_boxes.push_back(boxes[idx]);
        filtered_classes.push_back(classes[idx]);
    }

    return {filtered_boxes, filtered_classes, scores};
}

// 非极大值抑制函数
std::vector<int> DualDetect::nms_boxes(const std::vector<Box>& boxes, const std::vector<float>& scores) {
    std::vector<int> keep;
    std::vector<int> order(scores.size());
    std::iota(order.begin(), order.end(), 0);
    std::sort(order.begin(), order.end(), [&scores](int a, int b) { return scores[a] > scores[b]; });

    while (!order.empty()) {
        int i = order.front();
        keep.push_back(i);
        std::vector<int> remaining;

        for (size_t j = 1; j < order.size(); ++j) {
            int k = order[j];
            float xx1 = std::max(boxes[i].x, boxes[k].x);
            float yy1 = std::max(boxes[i].y, boxes[k].y);
            float xx2 = std::min(boxes[i].x + boxes[i].w, boxes[k].x + boxes[k].w);
            float yy2 = std::min(boxes[i].y + boxes[i].h, boxes[k].y + boxes[k].h);

            float w = std::max(0.0f, xx2 - xx1 + 0.00001f);
            float h = std::max(0.0f, yy2 - yy1 + 0.00001f);
            float inter = w * h;
            float ovr = inter / (boxes[i].w * boxes[i].h + boxes[k].w * boxes[k].h - inter);

            if (ovr <= nmsThresh) {
                remaining.push_back(k);
            }
        }
        order = remaining;
    }
    return keep;
}

// DFL 函数
std::vector<cv::Mat> DualDetect::dfl(const std::vector<cv::Mat>& positions) {
    // 实现 DFL 逻辑（类似于 Python 代码中使用的 torch 操作）
    std::vector<cv::Mat> results;
    for (const auto& position : positions) {
        // 添加 DFL 实现细节...
    }
    return results;
}

std::vector<cv::Mat> DualDetect::box_process(const cv::Mat& position) {
    int grid_h = position.size[2];
    int grid_w = position.size[3];

    cv::Mat col, row;
    cv::meshgrid(cv::range(0, grid_w), cv::range(0, grid_h), col, row);
    cv::Mat grid = cv::Mat(2, 1, CV_32F);
    cv::hconcat(col.reshape(1, 1), row.reshape(1, 1), grid);

    cv::Mat stride = (cv::Mat_<float>(2, 1) << IMG_SIZE[1] / grid_h, IMG_SIZE[0] / grid_w);
    auto positions_dfl = dfl(position);

    // Implement box processing logic...
    // ...

    std::vector<cv::Mat> xyxy;
    return xyxy;
}

std::tuple<std::vector<Box>, std::vector<int>, std::vector<float>> DualDetect::post_process(const std::vector<cv::Mat>& input_data) {
    std::vector<std::vector<Box>> boxes;
    std::vector<std::vector<float>> scores, classes_conf;

    int defualt_branch = 3;
    int pair_per_branch = input_data.size() / defualt_branch;

    for (int i = 0; i < defualt_branch; ++i) {
        boxes.push_back(box_process(input_data[pair_per_branch * i]));
        classes_conf.push_back(input_data[pair_per_branch * i + 1]);
        scores.push_back(cv::Mat::ones(input_data[pair_per_branch * i + 1].size[1], CV_32F));
    }

    // Implement flattening and concatenation logic...

    std::tie(boxes, classes, scores) = filter_boxes(boxes, scores, classes_conf);

    // NMS 逻辑
    std::vector<Box> nboxes;
    std::vector<int> nclasses;
    std::vector<float> nscores;

    for (const auto& c : unique(classes)) {
        auto inds = std::find(classes.begin(), classes.end(), c);
        auto b = boxes[inds];
        auto s = scores[inds];
        auto keep = nms_boxes(b, s);

        if (!keep.empty()) {
            nboxes.insert(nboxes.end(), b[keep.begin(), keep.end()]);
            nclasses.insert(nclasses.end(), c[keep.begin(), keep.end()]);
            nscores.insert(nscores.end(), s[keep.begin(), keep.end()]);
        }
    }

    if (nclasses.empty() && nscores.empty()) {
        return std::make_tuple(std::vector<Box>(), std::vector<int>(), std::vector<float>());
    }

    boxes = nboxes;
    classes = nclasses;
    scores = nscores;

    return std::make_tuple(boxes, classes, scores);
}*/
