#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <math.h>
#include <stdbool.h>

#include "rknn_engine.h"
#include "util.h"

// Class names corresponding to the model's output indices.
static const char *CLASS_NAMES[] = {"hand-raising", "read", "write"};

// Intermediate structure for raw detection candidates before NMS.
typedef struct
{
    float x1, y1, x2, y2;
    float score;
    int class_id;
} candidate_box_t;


// --- Helper Functions ---

/**
 * @brief Reads a file into a buffer.
 * @param file_path Path to the file.
 * @param file_size Pointer to store the size of the file.
 * @return Pointer to the allocated buffer with file content, or NULL on failure.
 * The caller is responsible for freeing this buffer.
 */
static unsigned char *read_file(const char *file_path, int *file_size)
{
    FILE *fp = fopen(file_path, "rb");
    if (fp == NULL)
    {
        fprintf(stderr, "ERROR: fopen %s failed.\n", file_path);
        return NULL;
    }

    fseek(fp, 0, SEEK_END);
    int size = ftell(fp);
    fseek(fp, 0, SEEK_SET);

    unsigned char *buffer = (unsigned char *)malloc(size);
    if (buffer == NULL)
    {
        fprintf(stderr, "ERROR: malloc size %d failed.\n", size);
        fclose(fp);
        return NULL;
    }

    int read_size = fread(buffer, 1, size, fp);
    if (read_size != size)
    {
        fprintf(stderr, "ERROR: fread failed, read %d != %d\n", read_size, size);
        free(buffer);
        fclose(fp);
        return NULL;
    }

    *file_size = size;
    fclose(fp);
    return buffer;
}


/**
 * @brief Dequantizes an int8 value to a float.
 */
static inline float dequantize(int8_t value, int32_t zero_point, float scale)
{
    return ((float)value - (float)zero_point) * scale;
}

/**
 * @brief Clamps a value between a minimum and maximum.
 */
static inline float clamp(float val, float min, float max)
{
    return val < min ? min : (val > max ? max : val);
}

/**
 * @brief Calculates Intersection over Union (IoU) between two bounding boxes.
 */
static float calculate_iou(const candidate_box_t a, const candidate_box_t b)
{
    float area_a = (a.x2 - a.x1) * (a.y2 - a.y1);
    float area_b = (b.x2 - b.x1) * (b.y2 - a.y1);

    float inter_left = fmaxf(a.x1, b.x1);
    float inter_top = fmaxf(a.y1, b.y1);
    float inter_right = fminf(a.x2, b.x2);
    float inter_bottom = fminf(a.y2, b.y2);

    float inter_width = fmaxf(0.0f, inter_right - inter_left);
    float inter_height = fmaxf(0.0f, inter_bottom - inter_top);
    float inter_area = inter_width * inter_height;

    float union_area = area_a + area_b - inter_area;
    return union_area > 0.0f ? inter_area / union_area : 0.0f;
}

// qsort comparison function for sorting boxes by score in descending order.
static int compare_boxes(const void *a, const void *b)
{
    float score_a = ((candidate_box_t *)a)->score;
    float score_b = ((candidate_box_t *)b)->score;
    if (score_a < score_b) return 1;
    if (score_a > score_b) return -1;
    return 0;
}


/**
 * @brief Performs Non-Maximum Suppression (NMS) on detection candidates.
 * This function filters boxes on a per-class basis.
 */
static void nms(candidate_box_t *boxes, int total_boxes, float nms_threshold, detect_result_group_t *final_results)
{
    if (total_boxes <= 0) {
        return;
    }

    // Sort all boxes by confidence score in descending order.
    qsort(boxes, total_boxes, sizeof(candidate_box_t), compare_boxes);

    bool *is_suppressed = (bool *)calloc(total_boxes, sizeof(bool));
    if (!is_suppressed) {
        fprintf(stderr, "ERROR: Failed to allocate memory for NMS.\n");
        return;
    }

    for (int i = 0; i < total_boxes; i++) {
        if (is_suppressed[i]) {
            continue;
        }

        // Keep this box
        if (final_results->count < MAX_DETECT_RESULTS) {
            detect_result_t *res = &final_results->results[final_results->count];
            res->left = (int)boxes[i].x1;
            res->top = (int)boxes[i].y1;
            res->right = (int)boxes[i].x2;
            res->bottom = (int)boxes[i].y2;
            res->confidence = boxes[i].score;
            res->class_id = boxes[i].class_id;
            res->class_name = CLASS_NAMES[boxes[i].class_id];
            final_results->count++;
        }

        // Suppress overlapping boxes of the same class
        for (int j = i + 1; j < total_boxes; j++) {
            if (is_suppressed[j] || boxes[i].class_id != boxes[j].class_id) {
                continue;
            }

            float iou = calculate_iou(boxes[i], boxes[j]);
            if (iou > nms_threshold) {
                is_suppressed[j] = true;
            }
        }
    }

    free(is_suppressed);
}


/**
 * @brief Main post-processing function for the YOLOv8 model.
 *
 * This function decodes the two output tensors (boxes and classes),
 * filters them by confidence, performs NMS, and scales the final
 * bounding boxes to the original frame size. This is a direct C implementation
 * of the user-provided Python logic.
 */
// 在 rknn_engine.c 中找到 post_process 函数，然后用下面的代码完全替换它

static int post_process(rknn_engine_context_t *eng_ctx, detect_result_group_t *results,
                        int frame_width, int frame_height)
{
    memset(results, 0, sizeof(detect_result_group_t));

    // 确保模型输出数量符合预期
    if (eng_ctx->io_num.n_output != 2) {
        fprintf(stderr, "ERROR: This post-process logic requires a model with 2 outputs, but got %d\n", eng_ctx->io_num.n_output);
        return -1;
    }

    rknn_tensor_attr *box_attr, *cls_attr;
    int8_t *box_tensor, *cls_tensor;

    // 根据维度动态识别 box 和 class 张量。box 张量有4个通道(cx,cy,w,h)，class 张量有 NUM_CLASSES 个通道
    if (eng_ctx->output_attrs[0].dims[1] == 4 && eng_ctx->output_attrs[1].dims[1] == NUM_CLASSES) {
        box_attr = &eng_ctx->output_attrs[0];
        cls_attr = &eng_ctx->output_attrs[1];
        box_tensor = (int8_t *)eng_ctx->output_mems[0]->virt_addr;
        cls_tensor = (int8_t *)eng_ctx->output_mems[1]->virt_addr;
    } else if (eng_ctx->output_attrs[1].dims[1] == 4 && eng_ctx->output_attrs[0].dims[1] == NUM_CLASSES) {
        box_attr = &eng_ctx->output_attrs[1];
        cls_attr = &eng_ctx->output_attrs[0];
        box_tensor = (int8_t *)eng_ctx->output_mems[1]->virt_addr;
        cls_tensor = (int8_t *)eng_ctx->output_mems[0]->virt_addr;
    } else {
        fprintf(stderr, "ERROR: Failed to identify box and class tensors from output shapes.\n");
        fprintf(stderr, "       Output 0 channels: %d, Output 1 channels: %d\n", eng_ctx->output_attrs[0].dims[1], eng_ctx->output_attrs[1].dims[1]);
        return -1;
    }

    int num_proposals = box_attr->dims[2]; // 通常是 8400
    int num_classes = cls_attr->dims[1];

    if (num_classes != NUM_CLASSES) {
        fprintf(stderr, "ERROR: Model NUM_CLASSES (%d) differs from code's NUM_CLASSES (%d).\n", num_classes, NUM_CLASSES);
        return -1;
    }

    candidate_box_t *candidate_boxes = (candidate_box_t *)malloc(num_proposals * sizeof(candidate_box_t));
    if (!candidate_boxes) {
        fprintf(stderr, "ERROR: Failed to malloc candidate_boxes.\n");
        return -1;
    }
    int candidate_count = 0;

    // 解码，过滤低置信度候选框
    for (int i = 0; i < num_proposals; i++) {
        // 找到该提案对应的最大类别分数
        float max_score = -1.0f;
        int max_class_id = -1;
        for (int j = 0; j < num_classes; j++) {
            // 注意Yolov8输出张量的布局是 [1, num_classes, 8400]
            int8_t score_val_s8 = cls_tensor[j * num_proposals + i];
            float score = dequantize(score_val_s8, cls_attr->zp, cls_attr->scale);
            if (score > max_score) {
                max_score = score;
                max_class_id = j;
            }
        }

        // 仅保留高于置信度阈值的候选框
        if (max_score >= CONF_THRESHOLD) {
            // 反量化box坐标 (cx, cy, w, h)，同样注意张量布局 [1, 4, 8400]
            float cx = dequantize(box_tensor[0 * num_proposals + i], box_attr->zp, box_attr->scale);
            float cy = dequantize(box_tensor[1 * num_proposals + i], box_attr->zp, box_attr->scale);
            float w  = dequantize(box_tensor[2 * num_proposals + i], box_attr->zp, box_attr->scale);
            float h  = dequantize(box_tensor[3 * num_proposals + i], box_attr->zp, box_attr->scale);

            // 存入候选列表
            candidate_boxes[candidate_count].x1 = cx - w / 2.0f;
            candidate_boxes[candidate_count].y1 = cy - h / 2.0f;
            candidate_boxes[candidate_count].x2 = cx + w / 2.0f;
            candidate_boxes[candidate_count].y2 = cy + h / 2.0f;
            candidate_boxes[candidate_count].score = max_score;
            candidate_boxes[candidate_count].class_id = max_class_id;
            candidate_count++;
        }
    }

    // 执行 NMS (非极大值抑制)
    if (candidate_count > 0) {
        nms(candidate_boxes, candidate_count, NMS_THRESHOLD, results);
    }
    free(candidate_boxes);

    // 缩放坐标以匹配原始图像尺寸 (补偿letterbox)
    float scale = fminf((float)MODEL_INPUT_WIDTH / (float)frame_width, (float)MODEL_INPUT_HEIGHT / (float)frame_height);
    float pad_x = ((float)MODEL_INPUT_WIDTH - (float)frame_width * scale) / 2.0f;
    float pad_y = ((float)MODEL_INPUT_HEIGHT - (float)frame_height * scale) / 2.0f;


    int final_count = 0;
    for (int i = 0; i < results->count; i++) {
        detect_result_t *res = &results->results[i];

        // 坐标逆变换
        float x1 = ((float)res->left - pad_x) / scale;
        float y1 = ((float)res->top - pad_y) / scale;
        float x2 = ((float)res->right - pad_x) / scale;
        float y2 = ((float)res->bottom - pad_y) / scale;

        // 裁剪坐标使其不超过图像边界
        res->left   = (int)clamp(x1, 0, frame_width - 1);
        res->top    = (int)clamp(y1, 0, frame_height - 1);
        res->right  = (int)clamp(x2, 0, frame_width - 1);
        res->bottom = (int)clamp(y2, 0, frame_height - 1);

        // 确保框的有效性 (width > 0 and height > 0)
        if (res->right > res->left && res->bottom > res->top) {
            // 如果需要原地更新，需要将有效结果移动到数组前面
            if (i != final_count) {
                results->results[final_count] = *res;
            }
            final_count++;
        }
    }
    results->count = final_count;

    return 0;
}



// --- Public API Implementation ---

int rknn_engine_init(rknn_engine_context_t *eng_ctx, const char *model_path)
{
    memset(eng_ctx, 0, sizeof(rknn_engine_context_t));

    eng_ctx->model_data = read_file(model_path, &eng_ctx->model_data_size);
    RETURN_ON_FAIL(eng_ctx->model_data == NULL, "Failed to read model file\n");

    int ret = rknn_init(&eng_ctx->ctx, eng_ctx->model_data, eng_ctx->model_data_size, 0, NULL);
    RETURN_ON_FAIL(ret < 0, "rknn_init failed, ret=%d\n", ret);

    // *** OPTIMIZATION: Enable dual-core NPU on RK3576 ***
    // According to the documentation, RKNN_NPU_CORE_0_1 enables core 0 and 1 for inference.
    // This should be called after rknn_init.
    ret = rknn_set_core_mask(eng_ctx->ctx, RKNN_NPU_CORE_0_1);
    if (ret < 0) {
        // Not a fatal error, might be a single-core chip. Print a warning.
        fprintf(stderr, "WARN: rknn_set_core_mask to dual-core failed, ret=%d. Continuing in single-core mode.\n", ret);
    } else {
        printf("NPU dual-core mode enabled (RKNN_NPU_CORE_0_1).\n");
    }

    ret = rknn_query(eng_ctx->ctx, RKNN_QUERY_IN_OUT_NUM, &eng_ctx->io_num, sizeof(eng_ctx->io_num));
    RETURN_ON_FAIL(ret < 0, "rknn_query IO_NUM failed, ret=%d\n", ret);
    printf("Model inputs: %d, outputs: %d\n", eng_ctx->io_num.n_input, eng_ctx->io_num.n_output);
    RETURN_ON_FAIL(eng_ctx->io_num.n_output > MAX_RKNN_OUTPUT_COUNT, "Model output count exceeds max\n");
    RETURN_ON_FAIL(eng_ctx->io_num.n_input != 1 || eng_ctx->io_num.n_output != 2, "ERROR: This application requires a model with 1 input and 2 outputs.\n");

    eng_ctx->input_attrs = (rknn_tensor_attr *)malloc(eng_ctx->io_num.n_input * sizeof(rknn_tensor_attr));
    eng_ctx->output_attrs = (rknn_tensor_attr *)malloc(eng_ctx->io_num.n_output * sizeof(rknn_tensor_attr));

    // Query Input Attributes
    for (uint32_t i = 0; i < eng_ctx->io_num.n_input; i++)
    {
        eng_ctx->input_attrs[i].index = i;
        ret = rknn_query(eng_ctx->ctx, RKNN_QUERY_INPUT_ATTR, &eng_ctx->input_attrs[i], sizeof(rknn_tensor_attr));
        RETURN_ON_FAIL(ret < 0, "rknn_query INPUT_ATTR for input %d failed, ret=%d\n", i, ret);
    }

    // Query Output Attributes and create output memory
    for (uint32_t i = 0; i < eng_ctx->io_num.n_output; i++)
    {
        eng_ctx->output_attrs[i].index = i;
        ret = rknn_query(eng_ctx->ctx, RKNN_QUERY_OUTPUT_ATTR, &eng_ctx->output_attrs[i], sizeof(rknn_tensor_attr));
        RETURN_ON_FAIL(ret < 0, "rknn_query OUTPUT_ATTR for output %d failed, ret=%d\n", i, ret);

        eng_ctx->output_mems[i] = rknn_create_mem(eng_ctx->ctx, eng_ctx->output_attrs[i].size_with_stride);
        RETURN_ON_FAIL(eng_ctx->output_mems[i] == NULL, "Failed to create output memory for output %d\n", i);
    }

    // Get model input shape
    if (eng_ctx->input_attrs[0].fmt == RKNN_TENSOR_NCHW)
    {
        printf("Model is NCHW input layout\n");
        eng_ctx->model_channel = eng_ctx->input_attrs[0].dims[1];
        eng_ctx->model_height = eng_ctx->input_attrs[0].dims[2];
        eng_ctx->model_width = eng_ctx->input_attrs[0].dims[3];
    }
    else
    {
        printf("Model is NHWC input layout\n");
        eng_ctx->model_height = eng_ctx->input_attrs[0].dims[1];
        eng_ctx->model_width = eng_ctx->input_attrs[0].dims[2];
        eng_ctx->model_channel = eng_ctx->input_attrs[0].dims[3];
    }
    printf("Model input dims: H=%d, W=%d, C=%d\n", eng_ctx->model_height, eng_ctx->model_width, eng_ctx->model_channel);


    return 0;
}

void rknn_engine_release(rknn_engine_context_t *eng_ctx)
{
    // Release output memories before destroying context
    for (int i = 0; i < MAX_RKNN_OUTPUT_COUNT; i++)
    {
        if (eng_ctx->output_mems[i])
        {
            rknn_destroy_mem(eng_ctx->ctx, eng_ctx->output_mems[i]);
            eng_ctx->output_mems[i] = NULL;
        }
    }
    
    if (eng_ctx->ctx > 0)
    {
        rknn_destroy(eng_ctx->ctx);
        eng_ctx->ctx = 0;
    }

    if (eng_ctx->model_data)
    {
        free(eng_ctx->model_data);
        eng_ctx->model_data = NULL;
    }

    if (eng_ctx->input_attrs)
    {
        free(eng_ctx->input_attrs);
        eng_ctx->input_attrs = NULL;
    }

    if (eng_ctx->output_attrs)
    {
        free(eng_ctx->output_attrs);
        eng_ctx->output_attrs = NULL;
    }
}
// 在 rknn_engine.c 中，用这个版本替换 rknn_engine_run
int rknn_engine_run(rknn_engine_context_t *eng_ctx, rknn_tensor_mem *input_mem, detect_result_group_t *results,
                    int frame_width, int frame_height)
{
    // Set Input Tensor
    int ret = rknn_set_io_mem(eng_ctx->ctx, input_mem, &eng_ctx->input_attrs[0]);
    if (ret < 0)
    {
        fprintf(stderr, "ERROR: rknn_set_io_mem for input failed, ret=%d\n", ret);
        return -1;
    }

    // Set Output Tensors
    for (uint32_t i = 0; i < eng_ctx->io_num.n_output; i++)
    {
        ret = rknn_set_io_mem(eng_ctx->ctx, eng_ctx->output_mems[i], &eng_ctx->output_attrs[i]);
        if (ret < 0)
        {
            fprintf(stderr, "ERROR: rknn_set_io_mem for output %d failed, ret=%d\n", i, ret);
            return -1;
        }
    }

    // Run Inference
    ret = rknn_run(eng_ctx->ctx, NULL);
    if (ret < 0)
    {
        fprintf(stderr, "ERROR: rknn_run failed, ret=%d\n", ret);
        return -1;
    }

    // Sync all output memories to ensure CPU can access the data.
    for (uint32_t i = 0; i < eng_ctx->io_num.n_output; i++)
    {
        rknn_mem_sync(eng_ctx->ctx, eng_ctx->output_mems[i], RKNN_MEMORY_SYNC_FROM_DEVICE);
    }

    // Post Process the results
    post_process(eng_ctx, results, frame_width, frame_height);
    
    // 打印结果
    if (results->count > 0)
    {
        printf("--- Detected %d objects ---\n", results->count);
        for (int i = 0; i < results->count; i++)
        {
            detect_result_t *res = &results->results[i];
            printf("  - %s @ [%d %d %d %d] conf: %.2f\n",
                   res->class_name,
                   res->left, res->top,
                   res->right, res->bottom,
                   res->confidence);
        }
    }
    return 0;
}
