#include "resdiff.h"
#include "source/logger.h"
#include "ImageProcessor.h"
#include "errordialogmanager.h"
#include "source/rknntool.h"

resDiff::resDiff() {}

bool resDiff::unloadModel()
{
    rknn_destroy_mem(ctx, input_mems[0]);
    rknn_destroy_mem(ctx, input_mems[1]);
    rknn_destroy_mem(ctx, output_mems[0]);
    rknn_destroy_mem(ctx, output_mems[1]);
    rknn_destroy(ctx);
    ctx = 0;
    return 0;
}

bool resDiff::loadModel()
{
    QMutexLocker locker(&resDiffRknn);

    if(ctx)
    {
        LogWarning << "The model has been opened and is currently being closed";
        unloadModel();
    }
    // 打开模型文件
    FILE *fp = fopen(modelPath.toStdString().c_str(), "rb");
    if (fp == nullptr) {
        ErrorDialogManager::instance().showNonBlockingError("载入模型错误", "文件不存在....");
        LogWarning << "Open file" << modelPath << "failed.";
        return false;
    }

    // 读取模型文件大小
    fseek(fp, 0, SEEK_END);
    modelDataSize = ftell(fp);
    fseek(fp, 0, SEEK_SET);

    // 将模型载入内存
    modelData = (unsigned char*)malloc(modelDataSize);
    if (modelData == nullptr) {
        ErrorDialogManager::instance().showNonBlockingError("载入模型错误", "内存分配错误");
        LogWarning << "Malloc model data failed.";
        fclose(fp);
        return false;
    }
    fread(modelData, 1, modelDataSize, fp);
    fclose(fp);
    LogDebug << "read loadModel Successfully modelDataSize:" << modelDataSize;

    // 初始化模型
    int ret = rknn_init(&ctx, modelData, modelDataSize, 0, nullptr);
    if (ret < 0) {
        ErrorDialogManager::instance().showNonBlockingError("载入模型错误", "模型无法初始化检查文件有效性....");
        LogWarning << "rknn_init error :" << ret;
        free(modelData);
        return false;
    }

    ret = rknn_query(ctx, RKNN_QUERY_IN_OUT_NUM, &ioNum, sizeof(ioNum));
    if (ret != 0) {
        ErrorDialogManager::instance().showNonBlockingError("载入模型错误", "模型输入输出存在问题....");
        LogWarning << "rknn_query RKNN_QUERY_IN_OUT_NUM error" << ret;
        return false;
    }
    LogDebug << "model input num: " << ioNum.n_input << " output num: " << ioNum.n_output;

    memset(inputAttrs, 0, sizeof(inputAttrs));
    input_num = ioNum.n_input;
    for (int i = 0; i < ioNum.n_input; i++) {
        inputAttrs[i].index = i;
        ret = rknn_query(ctx, RKNN_QUERY_INPUT_ATTR, &(inputAttrs[i]), sizeof(rknn_tensor_attr));
        if (ret != 0) {
            ErrorDialogManager::instance().showNonBlockingError("载入模型错误", "无法读取输入输出....");
            LogWarning << "rknn_query RKNN_QUERY_INPUT_ATTR error" << ret;
            return false;
        }
        dump_tensor_attr(&(inputAttrs[i]));
    }
    // 获取输出张量属性
    memset(outputAttrs, 0, sizeof(outputAttrs));
    for (int i = 0; i < ioNum.n_output; i++) {
        outputAttrs[i].index = i;
        ret = rknn_query(ctx, RKNN_QUERY_OUTPUT_ATTR, &(outputAttrs[i]), sizeof(rknn_tensor_attr));
        if (ret != 0) {
            ErrorDialogManager::instance().showNonBlockingError("载入模型错误", "模型输出存在问题....");
            LogWarning << "rknn_query RKNN_QUERY_OUTPUT_ATTR error" << ret;
            return false;
        }
        dump_tensor_attr(&(outputAttrs[i]));
    }


    for (uint32_t i = 0; i < ioNum.n_output; ++i)
    {
        int output_size = outputAttrs[i].n_elems * 2;
        output_mems[i] = rknn_create_mem(ctx, output_size);
        if (output_mems[i] == nullptr) {
            LogError << "rknn_create_mem fail for output tensor " << i;
            ErrorDialogManager::instance().showNonBlockingError("分配输出内存错误", "无法为输出张量分配内存。");
            return false;
        }
    }

    rknn_custom_string custom_string;
    ret = rknn_query(ctx, RKNN_QUERY_CUSTOM_STRING, &custom_string, sizeof(custom_string));
    if (ret != RKNN_SUCC)
    {
        LogError << "rknn_query fail! ret=" << ret;
        ErrorDialogManager::instance().showNonBlockingError("载入模型错误", "读取自定义失败....");
        return false;
    }

    inputAttrs[0].type = RKNN_TENSOR_FLOAT16;
    inputAttrs[1].type = RKNN_TENSOR_FLOAT16;
    //   // default fmt is NHWC, npu only support NHWC in zero copy mode
    inputAttrs[0].fmt = RKNN_TENSOR_NHWC;
    inputAttrs[1].fmt = RKNN_TENSOR_NHWC;

    input_mems[0] = rknn_create_mem(ctx, inputAttrs[0].size_with_stride);
    input_mems[1] = rknn_create_mem(ctx, inputAttrs[1].size_with_stride);
    if (input_mems[0] == nullptr || input_mems[1] == nullptr) {
        LogError << "rknn_create_mem fail for output tensor ";
        ErrorDialogManager::instance().showNonBlockingError("分配输出内存错误", "无法为输出张量分配内存。");
        return false;
    }
    ret = rknn_set_io_mem(ctx, input_mems[0], &inputAttrs[0]);
    if (ret < 0)
    {
        LogWarning <<  "rknn_set_io_mem1 fail! ret=" << ret;
        ErrorDialogManager::instance().showNonBlockingError("设置输入内存错误", "无法设置输入张量内存。");
        return false;
    }

    ret = rknn_set_io_mem(ctx, input_mems[1], &inputAttrs[1]);
    if (ret < 0)
    {
        LogError << "rknn_set_io_mem fail for input tensor 1, ret=" << ret;
        ErrorDialogManager::instance().showNonBlockingError("设置输入内存错误", "无法设置输入张量内存。");
        return false;
    }

    for (uint32_t i = 0; i < ioNum.n_output; ++i)
    {
        // 默认输出类型取决于模型，这里需要 float32 来计算 top5
        outputAttrs[i].type = RKNN_TENSOR_FLOAT16;
        // 设置输出内存和属性
        ret = rknn_set_io_mem(ctx, output_mems[i], &outputAttrs[i]);
        if (ret < 0)
        {
            LogError << "rknn_set_io_mem fail for output tensor " << i << ", ret=" << ret;
            ErrorDialogManager::instance().showNonBlockingError("设置输出内存错误", "无法设置输出张量内存。");
            return false;
        }
    }

    LogDebug << "Load resDiff Successful";

    isInit = 1;
    return true;
}


cv::Mat resDiff::detect(cv::Mat &Image,float &pred_score_,qint64 &elapsed,float &scale_)
{

    //cv::cvtColor(Image, Image, cv::COLOR_BGR2RGB);
    LogTrack << "Attempting to PatchCoreAi detect";
    if(isInit != 2)
    {
        LogTrack << "fail to PatchCoreAi detect isInit :"<< isInit;
        return cv::Mat() ;
    }
    cv::Mat resized_image;
    cv::resize(Image, resized_image, cv::Size(input_size, input_size), 0, 0, cv::INTER_LINEAR);
    cv::Mat fp16_image(resized_image.size(), CV_16FC3);

    resized_image.convertTo(fp16_image, CV_16FC3);

    QMutexLocker locker(&resDiffRknn);

    if(inputAttrs[0].size_with_stride != fp16_image.total() * fp16_image.elemSize())
    {
        LogWarning << "inputAttrs[0].size_with_stride != fp16_image.total() * fp16_image.elemSize()" ;
        return cv::Mat();
    }


    memcpy(input_mems[0]->virt_addr, fp16_image.data, inputAttrs[0].size);
    LogTrack << "Attempting to Prknn_run input_size:"<< input_size ;
    QElapsedTimer timer;
    timer.start();

    int ret = rknn_run(ctx, NULL);
    if (ret < 0)
    {
        LogWarning << "rknn run error: " << ret;
        return cv::Mat();
    }
    elapsed = timer.nsecsElapsed(); // 获取执行时间（纳秒）
    LogTrackq << "rknn_run 函数执行时间:" << elapsed << "纳秒";

    fp16_t *fp16_ptr = reinterpret_cast<fp16_t *>(output_mems[1]->virt_addr);
    cv::Mat fp16_mat(1, 1, CV_16FC1, fp16_ptr);
    cv::Mat float_mat;
    fp16_mat.convertTo(float_mat, CV_32F);
    // 读取转换后的float值
    pred_score_ = float_mat.at<float>(0, 0);
    LogTrack << "Patch Core pred_score :" << pred_score_;
    scale_ = 240 / pred_score_;
    cv::Mat outImage(input_size, input_size, CV_16FC1, output_mems[0]->virt_addr);
    cv::Mat outImage_;
    outImage.convertTo(outImage_, CV_8U , scale_); // 将像素值缩放到0-255范围

    // double minVal, maxVal;
    // //cv::minMaxLoc(image, &minVal, &maxVal);
    // cv::minMaxLoc(outImage_, &minVal, &maxVal);
    // LogDebug << "outImage_ : "<< maxVal;
    // //计算外框1

    // cv::Mat binaryImage;
    // cv::threshold(outImage_, binaryImage, objThresh * (255.0 / 100.0), 255 , cv::THRESH_BINARY);
    // std::vector<std::vector<cv::Point>> contours;
    // cv::findContours(binaryImage, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);

    // float scaleX = static_cast<float>(srcSize.width) / input_size;
    // float scaleY = static_cast<float>(srcSize.height) / input_size;
    // od_results->count = 0;
    // for (size_t i = 0; i < contours.size() && od_results->count < OBJ_NUMB_MAX_SIZE; ++i) {
    //     cv::Rect boundingRect = cv::boundingRect(contours[i]);
    //     image_rect_t rect;
    //     rect.left = static_cast<int>(boundingRect.x * scaleX);
    //     rect.top = static_cast<int>(boundingRect.y * scaleY);
    //     rect.right = static_cast<int>((boundingRect.x + boundingRect.width) * scaleX);
    //     rect.bottom = static_cast<int>((boundingRect.y + boundingRect.height) * scaleY);

    //     object_detect_result result;
    //     result.box = rect;
    //     result.prop = pred_score_; // 可以根据需求设置置信度
    //     result.cls_id = 0; // 类别ID, 可以根据需求设置

    //     od_results->results[od_results->count++] = result;
    // }

    // 找到最小值和最大值
    // double min_val, max_val;
    // cv::minMaxLoc(float_image, &min_val, &max_val);

    // // 映射到 [0, 255]
    // cv::Mat normalized_image;
    // float_image.convertTo(normalized_image, CV_8U, 255.0 / (max_val - min_val), -min_val * 255.0 / (max_val - min_val));

    // // 保存图像
    // cv::imwrite(filename, normalized_image);

    return outImage_;
}


int resDiff::setembedding(cv::Mat baseimg)
{
    QMutexLocker locker(&resDiffRknn);
    if(!isInit)
    {
        ErrorDialogManager::instance().showNonBlockingError("设置基准图失败", "请先初始化模型");
        LogWarning <<  "fail to resDiff::setembedding please Init First";
        return -1;
    }
    cv::Mat img = baseimg;
    if(img.empty())
    {
        ErrorDialogManager::instance().showNonBlockingError("设置基准图失败", "未设置基准图，请先设置");
        LogDebug << "esDiff::setembedding fail baseImage is empty";
        return -1;
    }
    rknn_context Extractctx = 0;
    FILE *fp = fopen(extractPath.toStdString().c_str(), "rb");
    if (fp == nullptr) {
        ErrorDialogManager::instance().showNonBlockingError("载入模型错误", "文件不存在....");
        LogWarning << "Open file" << modelPath << "failed.";
        return false;
    }

    // 读取模型文件大小
    fseek(fp, 0, SEEK_END);
    modelExtracDataSize = ftell(fp);
    fseek(fp, 0, SEEK_SET);

    // 将模型载入内存
    modelExtracData = (unsigned char*)malloc(modelExtracDataSize);
    if (modelExtracData == nullptr) {
        ErrorDialogManager::instance().showNonBlockingError("载入模型错误", "内存分配错误");
        LogWarning << "Malloc model data failed.";
        fclose(fp);
        return false;
    }
    fread(modelExtracData, 1, modelExtracDataSize, fp);
    fclose(fp);
    LogDebug << "read loadModel Successfully modelExtracDataSize:" << modelExtracDataSize;

    // 初始化模型
    int ret = rknn_init(&Extractctx, modelExtracData, modelExtracDataSize, 0, nullptr);
    if (ret < 0) {
        ErrorDialogManager::instance().showNonBlockingError("载入模型错误", "模型无法初始化检查文件有效性....");
        LogWarning << "rknn_init error :" << ret;
        free(modelExtracData);
        return false;
    }

    rknn_input_output_num extractIoNum;
    ret = rknn_query(Extractctx, RKNN_QUERY_IN_OUT_NUM, &extractIoNum, sizeof(extractIoNum));
    if (ret != 0) {
        ErrorDialogManager::instance().showNonBlockingError("载入模型错误", "模型输入输出存在问题....");
        LogWarning << "rknn_query RKNN_QUERY_IN_OUT_NUM error" << ret;
        return false;
    }
    LogDebug << "model input num: " << extractIoNum.n_input << " output num: " << extractIoNum.n_output;

    rknn_tensor_attr extractinputAttrs[1];
    rknn_tensor_attr extractoutputAttrs[1];
    memset(extractinputAttrs, 0, sizeof(extractinputAttrs));
    for (int i = 0; i < extractIoNum.n_input; i++) {
        extractinputAttrs[i].index = i;
        ret = rknn_query(Extractctx, RKNN_QUERY_INPUT_ATTR, &(extractinputAttrs[i]), sizeof(rknn_tensor_attr));
        if (ret != 0) {
            ErrorDialogManager::instance().showNonBlockingError("载入模型错误", "无法读取输入输出....");
            LogWarning << "rknn_query RKNN_QUERY_INPUT_ATTR error" << ret;
            return false;
        }
        dump_tensor_attr(&(extractinputAttrs[i]));
    }
    // 获取输出张量属性
    memset(extractoutputAttrs, 0, sizeof(extractoutputAttrs));
    for (int i = 0; i < extractIoNum.n_output; i++) {
        extractoutputAttrs[i].index = i;
        ret = rknn_query(Extractctx, RKNN_QUERY_OUTPUT_ATTR, &(extractoutputAttrs[i]), sizeof(rknn_tensor_attr));
        if (ret != 0) {
            ErrorDialogManager::instance().showNonBlockingError("载入模型错误", "模型输出存在问题....");
            LogWarning << "rknn_query RKNN_QUERY_OUTPUT_ATTR error" << ret;
            return false;
        }
        dump_tensor_attr(&(extractoutputAttrs[i]));
    }

    cv::Mat resized_image;
    cv::resize(img, resized_image, cv::Size(224, 224), 0, 0, cv::INTER_LINEAR);
    resized_image.convertTo(resized_image, CV_16FC3);

    rknn_tensor_mem* inputMems[1];
    extractinputAttrs[0].type = RKNN_TENSOR_FLOAT16;
    // default fmt is NHWC, npu only support NHWC in zero copy mode
    extractinputAttrs[0].fmt = RKNN_TENSOR_NHWC;

    inputMems[0] = rknn_create_mem(Extractctx, extractinputAttrs[0].size);
    memcpy(inputMems[0]->virt_addr, resized_image.data, extractinputAttrs[0].size_with_stride);


    rknn_tensor_mem* outputMems[1];
    int output_size = extractoutputAttrs[0].size;
    outputMems[0]  = rknn_create_mem(Extractctx, output_size);

    ret = rknn_set_io_mem(Extractctx, inputMems[0], &extractinputAttrs[0]);
    if (ret < 0) {
        LogError << "rknn_set_io_mem fail! ret=%d\n" << ret;
        return -1;
    }

    ret = rknn_set_io_mem(Extractctx, outputMems[0], &extractoutputAttrs[0]);
    if (ret < 0) {
        LogError << "rknn_set_io_mem fail! ret=%d\n" << ret;
        return -1;
    }

    ret = rknn_run(Extractctx, NULL);
    if (ret < 0)
    {
        LogWarning << "rknn run error: " << ret;
        return -1;
    }
    LogDebug << "resDiff::setembedding is successful";

    memcpy(input_mems[1]->virt_addr, outputMems[0]->virt_addr, output_size);
    LogDebug << "resDiff::setembedding memcpy is successful";


    rknn_destroy_mem(Extractctx, inputMems[0]);
    rknn_destroy_mem(Extractctx, outputMems[0]);
    rknn_destroy(Extractctx);
    isInit = 2;
    return 0;
}


