#include "yolov8.h"
#include <random>
#include <sys/time.h>
#include "utils/engine_helper.h"
#include "utils/logging.h"
#include "postprocess.h"
#include "preprocess.h"

#define OBJ_NAME_MAX_SIZE 64
#define OBJ_NUMB_MAX_SIZE 128
#define OBJ_CLASS_NUM 80
#define NMS_THRESH 0.45
#define BOX_THRESH 0.25

const int RK3588 = 3;

double __get_us(struct timeval t)
{
    return (t.tv_sec * 1000000 + t.tv_usec);
}

// 设置模型需要绑定的核心
// Set the core of the model that needs to be bound
int get_core_num()
{
    static int core_num = 0;
    static std::mutex mtx;
    std::lock_guard<std::mutex> lock(mtx);
    int temp = core_num % RK3588;
    core_num++;
    return temp;
}

static std::vector<std::string> g_classes = {
    "fist", "stop", "palm", "rock", "one", "no_gesture"};

Yolov8Custom::Yolov8Custom(std::string &&model_path) : model_path_(model_path)
{
    // Init();
}

Yolov8Custom::~Yolov8Custom()
{
    Releaserknn();
}

nn_error_e Yolov8Custom::Init(rknn_context *ctx_in, bool copy_weight)
{
    int model_len = 0; // 模型文件大小
    int ret = 0;
    auto model = load_model(model_path_.c_str(), &model_len); // 加载模型文件

    if (model == nullptr)
    {
        NN_LOG_ERROR("load model file %s fail!", model_path_);
        return NN_LOAD_MODEL_FAIL; // 返回错误码：加载模型文件失败
    }

    if (copy_weight)
    {
        NN_LOG_INFO("copy weight from other context");
        // 复用模型参数
        ret = rknn_dup_context(ctx_in, &rknn_ctx_);

        if (ret != RKNN_SUCC)
        {
            NN_LOG_ERROR("rknn_dup_context fail! ret=%d", ret);
            return NN_RKNN_INIT_FAIL; // 返回错误码：初始化rknn context失败
        }
    }
    else
    {
        NN_LOG_INFO("load model and init new context");
        int ret = rknn_init(&rknn_ctx_, model, model_len, 0, NULL); // 初始化rknn context
        free(model);

        if (ret != RKNN_SUCC)
        {
            NN_LOG_ERROR("rknn_init fail! ret=%d", ret);
            return NN_RKNN_INIT_FAIL; // 返回错误码：初始化rknn context失败
        }
    }

    // if (ret < 0)
    // {
    //     NN_LOG_ERROR("rknn_init fail! ret=%d", ret);
    //     return NN_RKNN_INIT_FAIL; // 返回错误码：初始化rknn context失败
    // }
    rknn_core_mask core_mask;

    switch (get_core_num())
    {
    case 0:
        core_mask = RKNN_NPU_CORE_0;
        break;
    case 1:
        core_mask = RKNN_NPU_CORE_1;
        break;
    case 2:
        core_mask = RKNN_NPU_CORE_2;
        /* code */
        break;
    }

    ret = rknn_set_core_mask(rknn_ctx_, core_mask);
    if (ret < 0)
    {
        NN_LOG_ERROR("rknn_set_core_mask fail! ret=%d", ret);
        return NN_RKNN_SET_CORE_MASK_FAIL;
    }

    // 打印初始化成功信息
    NN_LOG_INFO("rknn_init success!");

    // 获取rknn版本信息
    rknn_sdk_version version;
    ret = rknn_query(rknn_ctx_, RKNN_QUERY_SDK_VERSION, &version, sizeof(rknn_sdk_version));
    if (ret < 0)
    {
        NN_LOG_ERROR("rknn_query fail! ret=%d", ret);
        return NN_RKNN_QUERY_FAIL;
    }

    // 打印rknn版本信息
    NN_LOG_INFO("RKNN API version: %s", version.api_version);
    NN_LOG_INFO("RKNN Driver version: %s", version.drv_version);

    // 获取输入输出个数
    rknn_input_output_num io_num;
    ret = rknn_query(rknn_ctx_, RKNN_QUERY_IN_OUT_NUM, &io_num, sizeof(io_num));
    if (ret != RKNN_SUCC)
    {
        NN_LOG_ERROR("rknn_query fail! ret=%d", ret);
        return NN_RKNN_QUERY_FAIL;
    }
    NN_LOG_INFO("model input num: %d, output num: %d", io_num.n_input, io_num.n_output);

    // 输入属性
    NN_LOG_INFO("input tensors:");
    rknn_tensor_attr input_attrs[io_num.n_input];
    memset(input_attrs, 0, sizeof(input_attrs));
    for (int i = 0; i < io_num.n_input; i++)
    {
        input_attrs[i].index = i;
        ret = rknn_query(rknn_ctx_, RKNN_QUERY_INPUT_ATTR, &(input_attrs[i]), sizeof(rknn_tensor_attr));
        if (ret != RKNN_SUCC)
        {
            NN_LOG_ERROR("rknn_query fail! ret=%d", ret);
            return NN_RKNN_QUERY_FAIL;
        }
        print_tensor_attr(&(input_attrs[i]));
        // set input_shapes_
        // in_shapes_.push_back(rknn_tensor_attr_convert(input_attrs[i]));
    }

    // 输出属性
    NN_LOG_INFO("output tensors:");
    rknn_tensor_attr output_attrs[io_num.n_output];
    memset(output_attrs, 0, sizeof(output_attrs));
    model_type_ = ModelType::DETECTION;
    for (int i = 0; i < io_num.n_output; i++)
    {
        output_attrs[i].index = i;
        ret = rknn_query(rknn_ctx_, RKNN_QUERY_OUTPUT_ATTR, &(output_attrs[i]), sizeof(rknn_tensor_attr));
        if (ret != RKNN_SUCC)
        {
            NN_LOG_ERROR("rknn_query fail! ret=%d", ret);
            return NN_RKNN_QUERY_FAIL;
        }
        print_tensor_attr(&(output_attrs[i]));
        // set output_shapes_
        // out_shapes_.push_back(rknn_tensor_attr_convert(output_attrs[i]));
    }

    // Set to context
    app_ctx_.rknn_ctx = rknn_ctx_;
    if (output_attrs[0].qnt_type == RKNN_TENSOR_QNT_AFFINE_ASYMMETRIC &&
        output_attrs[0].type == RKNN_TENSOR_INT8)
    {
        app_ctx_.is_quant = true;
    }
    else
    {
        app_ctx_.is_quant = false;
    }

    app_ctx_.io_num = io_num;
    app_ctx_.input_attrs =
        (rknn_tensor_attr *)malloc(io_num.n_input * sizeof(rknn_tensor_attr));
    memcpy(app_ctx_.input_attrs, input_attrs,
           io_num.n_input * sizeof(rknn_tensor_attr));

    app_ctx_.output_attrs =
        (rknn_tensor_attr *)malloc(io_num.n_output * sizeof(rknn_tensor_attr));
    memcpy(app_ctx_.output_attrs, output_attrs,
           io_num.n_output * sizeof(rknn_tensor_attr));

    if (input_attrs[0].fmt == RKNN_TENSOR_NCHW)
    {
        NN_LOG_INFO("model is NCHW input fmt");
        app_ctx_.model_channel = input_attrs[0].dims[1];
        app_ctx_.model_height = input_attrs[0].dims[2];
        app_ctx_.model_width = input_attrs[0].dims[3];
    }
    else
    {
        NN_LOG_INFO("model is NHWC input fmt");
        app_ctx_.model_height = input_attrs[0].dims[1];
        app_ctx_.model_width = input_attrs[0].dims[2];
        app_ctx_.model_channel = input_attrs[0].dims[3];
    }
    NN_LOG_INFO("model input height=%d, width=%d, channel=%d",
                app_ctx_.model_height, app_ctx_.model_width,
                app_ctx_.model_channel);

    // 初始化输入输出参数
    inputs_ = std::make_unique<rknn_input[]>(app_ctx_.io_num.n_input);
    outputs_ = std::make_unique<rknn_output[]>(app_ctx_.io_num.n_output);
    inputs_[0].index = 0;
    inputs_[0].type = RKNN_TENSOR_UINT8;
    inputs_[0].fmt = RKNN_TENSOR_NHWC;
    inputs_[0].size =
        app_ctx_.model_width * app_ctx_.model_height * app_ctx_.model_channel;
    inputs_[0].buf = nullptr;

    return NN_SUCCESS;
}

nn_error_e Yolov8Custom::Inference(void *image_buf, object_detect_result_list *od_results,
                                   letterbox_t letter_box)
{

    // total_duration
    struct timeval start_time, stop_time;

    gettimeofday(&start_time, NULL);

    inputs_[0].buf = image_buf;
    int ret = rknn_inputs_set(app_ctx_.rknn_ctx, app_ctx_.io_num.n_input,
                              inputs_.get());
    if (ret < 0)
    {
        NN_LOG_ERROR("rknn_input_set failed! error code = ", ret);
        return NN_RKNN_OUTPUT_GET_FAIL;
    }
    // total_duration

    ret = rknn_run(app_ctx_.rknn_ctx, nullptr);
    // 计时

    if (ret != RKNN_SUCC)
    {
        NN_LOG_ERROR("rknn_run failed, error code = ", ret);
        return NN_RKNN_RUNTIME_ERROR;
    }

    for (int i = 0; i < app_ctx_.io_num.n_output; ++i)
    {
        outputs_[i].index = i;
        outputs_[i].want_float = (!app_ctx_.is_quant);
    }

    // outputs_lock_.lock();
    std::lock_guard<std::mutex> lock(outputs_lock_);

    ret = rknn_outputs_get(app_ctx_.rknn_ctx, app_ctx_.io_num.n_output,
                           outputs_.get(), nullptr);
    if (ret != RKNN_SUCC)
    {
        NN_LOG_ERROR("rknn_outputs_get failed, error code = ", ret);
        return NN_RKNN_OUTPUT_ATTR_ERROR;
    }

    const float nms_threshold = NMS_THRESH;      // 默认的NMS阈值
    const float box_conf_threshold = BOX_THRESH; // 默认的置信度阈值

    // 把输出结果列表置零
    memset(od_results, 0, sizeof(object_detect_result_list));
    od_results->model_type = model_type_;

    NN_LOG_INFO("rknn_outputs_post_process");
    post_process(&app_ctx_, outputs_.get(), &letter_box, box_conf_threshold,
                 nms_threshold, od_results);

    gettimeofday(&stop_time, NULL);
    // printf("rknn run and process use %f ms\n", (__get_us(stop_time) - __get_us(start_time)) / 1000);
    NN_LOG_INFO("rknn run and process use %f ms\n", (__get_us(stop_time) - __get_us(start_time)) / 1000);
    // od_results->model_type = model_type_;

    // Remeber to release rknn outputs_
    rknn_outputs_release(app_ctx_.rknn_ctx, app_ctx_.io_num.n_output,
                         outputs_.get());

    // outputs_lock_.unlock();
    return NN_SUCCESS;
}

rknn_context *Yolov8Custom::get_rknn_context() { return &(this->rknn_ctx_); }

int Yolov8Custom::get_model_width() { return app_ctx_.model_width; }

int Yolov8Custom::get_model_height() { return app_ctx_.model_height; }

nn_error_e Yolov8Custom::Releaserknn()
{
    if (app_ctx_.rknn_ctx != 0)
    {
        NN_LOG_INFO("release rknn context");
        rknn_destroy(app_ctx_.rknn_ctx);
        app_ctx_.rknn_ctx = 0;
    }
    if (app_ctx_.input_attrs != nullptr)
    {
        NN_LOG_INFO("release input_attrs");
        free(app_ctx_.input_attrs);
    }
    if (app_ctx_.output_attrs != nullptr)
    {
        NN_LOG_INFO("release output_attrs");
        free(app_ctx_.output_attrs);
    }
    return NN_SUCCESS;
}