#include "pcdual.h"
#include "source/rknntool.h"

#include <QDebug>
#include <QElapsedTimer>
#include <rknn_api.h>
#include <QMutexLocker>

PCDual::PCDual(QObject *parent): QObject(parent), ctx(0), modelData(nullptr), modelDataSize(0), canRun(false) {}

PCDual::~PCDual() {
    unloadModel();
}

int PCDual::loadModel(const QString &modelPath, QVariantMap &modelDetail)
{
    QMutexLocker locker(&mutex);
    qInfo() << "Loading model:" << modelPath;

    // Unload if already loaded
    if (ctx) unloadModel();

    FILE *fp = fopen(modelPath.toStdString().c_str(), "rb");
    if (!fp) {
        errorString = "无法打开模型文件: 文件不存在";
        return -1;
    }

    // Load model data
    fseek(fp, 0, SEEK_END);
    modelDataSize = ftell(fp);
    rewind(fp);
    modelData = (unsigned char*)malloc(modelDataSize);
    if (!modelData) {
        fclose(fp);
        errorString = "模型加载失败: 内存分配错误";
        return -1;
    }
    fread(modelData, 1, modelDataSize, fp);
    fclose(fp);

    // Initialize RKNN model
    int ret = rknn_init(&ctx, modelData, modelDataSize, 0, nullptr);
    if (ret < 0) {
        free(modelData);
        errorString = "模型初始化失败: 请检查模型文件的有效性";
        return -1;
    }

    // Query model details
    if (queryModelInfo(modelDetail) < 0) {
        free(modelData);
        return -1;
    }

    canRun = true;
    return 0;
}

int PCDual::unloadModel()
{
    QMutexLocker locker(&mutex);
    if (!ctx) return 0;

    for (int i = 0; i < 2; ++i) {
        if (input_mems[i]) rknn_destroy_mem(ctx, input_mems[i]);
    }
    if (output_mems[0]) rknn_destroy_mem(ctx, output_mems[0]);
    rknn_destroy(ctx);

    free(modelData);
    ctx = 0;
    modelData = nullptr;
    canRun = false;

    qInfo() << "Model successfully unloaded.";
    return 0;
}

int PCDual::queryModelInfo(QVariantMap &modelDetail)
{
    // Query SDK version
    int ret = rknn_query(ctx, RKNN_QUERY_SDK_VERSION, &version, sizeof(version));
    if (ret < 0) return -1;

    // Query input and output attributes
    for (int i = 0; i < 2; ++i) {
        memset(&inputAttrs[i], 0, sizeof(rknn_tensor_attr));
        inputAttrs[i].index = i;
        if (rknn_query(ctx, RKNN_QUERY_INPUT_ATTR, &inputAttrs[i], sizeof(rknn_tensor_attr)) < 0) return -1;
    }

    for (int i = 0; i < 2; ++i) {
        memset(&outputAttrs[i], 0, sizeof(rknn_tensor_attr));
        outputAttrs[i].index = i;
        if (rknn_query(ctx, RKNN_QUERY_OUTPUT_ATTR, &outputAttrs[i], sizeof(rknn_tensor_attr)) < 0) return -1;
    }

    // Allocate IO memory
    for (int i = 0; i < 2; ++i) {
        input_mems[i] = rknn_create_mem(ctx, inputAttrs[i].size);
        if (!input_mems[i]) return -1;
    }
    dump_tensor_attr(&(inputAttrs[0]));
    dump_tensor_attr(&(inputAttrs[1]));

    for (int i = 0; i < 2; ++i) {
        output_mems[i] = rknn_create_mem(ctx, outputAttrs[i].size);
        if (!output_mems[i]) return -1;
    }
    dump_tensor_attr(&(outputAttrs[0]));
    dump_tensor_attr(&(outputAttrs[1]));

    for (int i = 0; i < 2; ++i) {
        rknn_set_io_mem(ctx, input_mems[i], &inputAttrs[i]);
    }
    for (int i = 0; i < 2; ++i) {
        rknn_set_io_mem(ctx, output_mems[i], &outputAttrs[i]);
    }

    modelDetail["typeA"] = get_type_string(inputAttrs[0].type);
    modelDetail["typeB"] = get_type_string(inputAttrs[1].type);
    modelDetail["size"] = static_cast<quint64>(modelDataSize);
    return 0;
}

const char* PCDual::get_type_string(int type) {
    switch (type) {
    case RKNN_TENSOR_FLOAT16: return "FP16";
    case RKNN_TENSOR_FLOAT32: return "FP32";
    case RKNN_TENSOR_INT8: return "INT8";
    default: return "Unknown";
    }
}

uint16_t float_to_fp16(float value)
{
    uint32_t bits = *reinterpret_cast<uint32_t*>(&value);
    uint32_t sign = (bits >> 31) & 0x1;
    uint32_t exponent = ((bits >> 23) & 0xFF) - 127 + 15; // Adjust exponent bias from 127 to 15
    uint32_t mantissa = (bits & 0x7FFFFF) >> 13;

    // Handle special cases
    if (exponent <= 0) {
        exponent = 0;
        mantissa = 0; // Underflow to zero
    } else if (exponent >= 31) {
        exponent = 31;
        mantissa = 0; // Overflow to infinity
    }

    return static_cast<uint16_t>((sign << 15) | (exponent << 10) | mantissa);
}

cv::Mat PCDual::detect(void* inputA, const QVector<int8_t> &inputB , float &core)
{
    QMutexLocker locker(&mutex);
    if (!canRun) {
        qWarning() << "Model not loaded, cannot process.";
        return cv::Mat();
    }

    if (inputB.size()*2 != inputAttrs[1].size) {
        qWarning() << "Input size mismatch.";
        return cv::Mat();
    }

    auto inputAInt8 = reinterpret_cast<int8_t*>(inputA);
    cv::Mat inputAImage(1, inputAttrs[0].size /2 , CV_8SC1, inputAInt8);
    cv::Mat inputAImageFloat;
    inputAImage.convertTo(inputAImageFloat, CV_16FC1);
    memcpy(input_mems[0]->virt_addr, inputAImageFloat.data, inputAttrs[0].size);

    cv::Mat inputBImage(1, inputB.size(), CV_8SC1, const_cast<int8_t*>(inputB.data()));
    cv::Mat inputBImageFloat;
    inputBImage.convertTo(inputBImageFloat, CV_16FC1);
    memcpy(input_mems[1]->virt_addr, inputBImageFloat.data, inputAttrs[1].size);

    QElapsedTimer timer;
    timer.start();
    if (rknn_run(ctx, nullptr) < 0) return cv::Mat();
    qInfo() << "PCDual::process:" << timer.nsecsElapsed() << "ns";

    cv::Mat intFP16(1, 1, CV_16FC1, output_mems[1]->virt_addr);
    cv::Mat intInt;
    intFP16.convertTo(intInt, CV_32FC1); // Convert FP16 to FP32
    core = intInt.at<float>(0, 0); // Access the first element as float
    // Create output image from model output
    cv::Mat outputImage(outputAttrs[0].dims[2], outputAttrs[0].dims[3], CV_16FC1, output_mems[0]->virt_addr);
    cv::Mat outPut;
    outputImage.convertTo(outPut,CV_8UC1 , 255 / core);

    qInfo() << "outputImage size : " << outPut.type() << outPut.cols << " X " << outPut.rows;
    return outPut; // Clone to ensure data validity after model execution
}


