#include <pthread.h>

#include <thread>
#include <vector>
#include <mutex>
#include <condition_variable>

#include <unistd.h>
#include <dirent.h>
#include "imu_track/imu_track.h"
#include "imu_track/track.h"
#include "utils/global_var.h"
#include "utils/BasicType.h"
#include "imu_smooth/default.h"
#include "rolling_shutter_correct.h"
#include <opencv2/opencv.hpp>

#include <stdio.h>
#include <stdlib.h>

#include <fstream>
#include <iomanip>

#include <iostream>


#define BUFFER_SIZE 30000
#define CUSTOMER_BUFFER_SIZE 200
#define SAMPLE_RATE 200
#define TIME_GAP 94246
#define NOGT 0
#define IMGWIDTH 746
#define IMGHEIGHT 640
#define IMG_WIDTH 746
#define IMG_HEIGHT 640
#define THREAD_COUNT 4

#define FLOAT_POINT 1

Queue quaternion_queue;
Queue img_imu_queue;
pthread_t producer_thread, rolling_thread;




float gyrBiasSet[3] = { 0.973, 0.227, 0.76 };

// img dir path
//char *imageFolder = "D:\\camera_reader\\img\\dataset_001";
char *imageFolder = "D:\\camera_reader\\img\\dataset_gt\\pic";
//char *imageFolder = "D:\\camera_reader\\img\\eis_video\\1104pic";

// sensor_data.log path
char logPath[] = "D:\\camera_reader\\img\\dataset_gt\\gt.txt";

// the imu pose result path
// qTracker.q0, qTracker.q1, qTracker.q2, qTracker.q3, qSmooth.q0, qSmooth.q1, qSmooth.q2, qSmooth.q3, curr_time
char orientLogPath[] = "D:\\camera_reader\\img\\dataset_gt\\orient_result.log";
char matrixLogPath[] = "D:\\camera_reader\\img\\dataset_gt\\matrix_result.log";


typedef struct {
    unsigned char *srcY, *srcU, *srcV;
    unsigned char *dstY, *dstU, *dstV;
    int start_row;
    int end_row;
    int thread_id;
    int is_task_ready;
    int image_width;
    int image_height;
    MatrixCD M;
} ThreadTask;

pthread_mutex_t mutex;
pthread_cond_t cond;
int tasks_done = 0;
ThreadTask tasks[THREAD_COUNT];



//############################################# functions #######################################################//

// void *producer(void *arg) {
//     //读取文件，计算imu
//     //1. 初始化
//     // 1.1 初始化 complementary_filter结构体
//     ComplementaryFilterV2 *filter = (ComplementaryFilterV2*)malloc(sizeof(ComplementaryFilterV2));
//     initComplementaryFilter(filter);
//     Quaternion qTracker;
//     Quaternion qSmooth;
//     Quaternion qSmoothRotate;
//     Quaternion qAcc;

//     // 1.2 初始化时间戳
//     float curr_time = 0.0f;
//     float prev_time = 0.0f;
//     float dt;

//     // 1.3 初始化局部变量
//     float phi, n, ny, nz;
//     float smoothAlpha;
//     float angVelGyr, filtAngVel, angVel;
//     Pose3 pose;

//     smoothAlpha = get_alpha(0.25f, 1000.f);

//     // 1.4 初始化数据文件路径 和 结果保存文件路径 -- windows仿真环境
// //    char logPath[] = "imu_data.log";

//     FILE* fpImuLog, *fpPoseLog;
//     ImuLogLine imuData;
//     ImuLogLineGT imuDatagt;
//     SensorImu sensor;
//     if (fopen_s(&fpImuLog, logPath, "r") != 0) {
//         printf("Couldn't open the file.");
//         pthread_exit(NULL);
//     }

//     if (fopen_s(&fpPoseLog, orientLogPath, "w") != 0) {
//         printf("Error opening file.\n");
//         pthread_exit(NULL);
//     }

//     //2. 循环读取文件，获取imu数值,执行本次计算，结果写入文件
//     int cccount = 0;
//     int trackInited = 0;

// #if NOGT
//     while (fscanf(fpImuLog, "%d %d %d %d %d %d %d %d", &imuData.timeStamp, &imuData.AccX, &imuData.AccY, &imuData.AccZ, &imuData.GyroX,
//                   &imuData.GyroY, &imuData.GyroZ, &imuData.temp) != EOF) {
//         cccount++;
//         sensor.a_x = imuData.AccX ;
//         sensor.a_y = imuData.AccY ;
//         sensor.a_z = imuData.AccZ ;
//         sensor.g_x = imuData.GyroX;
//         sensor.g_y = imuData.GyroY;
//         sensor.g_z = imuData.GyroZ;

//         sensor.a_x = sensor.a_x / 1000;
//         sensor.a_y = sensor.a_y / 1000;
//         sensor.a_z = sensor.a_z / 1000;
//         sensor.g_x = sensor.g_x / 1000 - gyrBiasSet[0];
//         sensor.g_y = sensor.g_y / 1000 - gyrBiasSet[1];
//         sensor.g_z = sensor.g_z / 1000 - gyrBiasSet[2];

//         // 计算deltaT
//         curr_time = imuData.timeStamp;
//         dt = (curr_time - prev_time) / (float)1000;
//         prev_time = curr_time;
//     }
// #else

//     double t_updateQuaternionComp = 0.0f;
//     double t_slerp = 0.0f;
//     double t_calculateRotationQuaternion = 0.0f;


//     while (fscanf(fpImuLog, "%d %d %f %f %f %f %f %f", &imuDatagt.timeStamp, &imuDatagt.picTimeStamp,
//                                                        &imuDatagt.AccX, &imuDatagt.AccY, &imuDatagt.AccZ,
//                                                        &imuDatagt.GyroX, &imuDatagt.GyroY, &imuDatagt.GyroZ) != EOF) {
//         cccount++;
//         sensor.a_x = imuDatagt.AccX / 9.8f;
//         sensor.a_y = imuDatagt.AccY / 9.8f;
//         sensor.a_z = imuDatagt.AccZ / 9.8f;
//         sensor.g_x = imuDatagt.GyroX * 57.3f;
//         sensor.g_y = imuDatagt.GyroY * 57.3f;
//         sensor.g_z = imuDatagt.GyroZ * 57.3f;
// //        printf("%d\t%d\t%f\t%f\n", imuDatagt.timeStamp, imuDatagt.picTimeStamp, sensor.a_x, sensor.a_y);
// //        fflush(stdout);

//         // 计算deltaT
//         curr_time = (float)imuDatagt.timeStamp;
//         dt = (curr_time - prev_time) / 1000.f;
//         prev_time = curr_time;

// #endif

//         //计算每次方向与速度 -- ht
//         if(trackInited == 0) {
//             setQuaternion(&qAcc, 0.0f, sensor.a_x, sensor.a_y, sensor.a_z);
//             normalize(&qAcc);
//             phi = acosf(fabsf(qAcc.q1)) * 180.f / PI;
//             if(qAcc.q1 > 0.0f){
//                 phi = 180.f - phi;
//             }
//             float aixs_y = qAcc.q1*qAcc.q3;
//             float aixs_z = -qAcc.q1*qAcc.q2;
//             n = sqrtf(aixs_y*aixs_y + aixs_z*aixs_z);
//             ny = aixs_y / n;
//             nz = aixs_z / n;
//             // maybe not accurate, need two rotate, one rotate is x to overlaps, one rotate is yz to overlaps
//             setFromAngleAxis(&qTracker, phi, 0.0f, ny, nz);
//             setQuaternion(&qSmooth, qTracker.q0, qTracker.q1, qTracker.q2, qTracker.q3);
//             filtAngVel = sqrtf(sensor.g_x*sensor.g_x +  sensor.g_y*sensor.g_y + sensor.g_z*sensor.g_z);
//             angVel = 0;
//             trackInited = 1;

//             // qTracker.q0 = 1.0f; qTracker.q1 = 0.0f; qTracker.q2 = 0.0f; qTracker.q3 = 0.0f;
//             // qSmooth.q0 = 1.0f; qSmooth.q1 = 0.0f; qSmooth.q2 = 0.0f; qSmooth.q3 = 0.0f;

//         } else {
//             angVelGyr = sqrtf(sensor.g_x*sensor.g_x +  sensor.g_y*sensor.g_y + sensor.g_z*sensor.g_z);

//             updateQuaternionComp(&qTracker, angVelGyr,
//                                  sensor.g_x, sensor.g_y, sensor.g_z,
//                                  sensor.a_x, sensor.a_y, sensor.a_z,
//                                  dt);

//             slerp(&qSmooth, &qTracker, smoothAlpha, &qSmooth);

//             calculateRotationQuaternion(&qSmooth, &qTracker, &qSmoothRotate);

// //            pose = quaternion2EularAngles(qTracker);
//         }
// #if NOGT
//         while (!enqueue(&quaternion_queue, qTracker, qSmoothRotate, imuData.timeStamp - TIME_GAP)) {
//             usleep(10000); // 10ms
//             printf("insert failed: lenght %d, timeStamp: %d\n", quaternion_queue.length, imuData.timeStamp);
//             fflush(stdout);
//         }
// #else
//         if(imuDatagt.picTimeStamp != 0) {
//             enqueue(&quaternion_queue, qTracker, qSmoothRotate, imuDatagt.picTimeStamp);
//         }

// #endif


//         //记录每步计算结果并写入文件
// //        fprintf(fpPoseLog, "%f\t%f\t%f\t%f\t%f\n",
// //                filter->q->q0, filter->q->q1, filter->q->q2, filter->q->q3, filter->time);

// //        fprintf(fpPoseLog, "%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\n",
// //                qTracker.q0, qTracker.q1, qTracker.q2, qTracker.q3, dt, angVelGyr,
// //                pose.yaw * 57.3, pose.pitch * 57.3, pose.roll * 57.3);

//         fprintf(fpPoseLog, "%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\n",
//                 qTracker.q0, qTracker.q1, qTracker.q2, qTracker.q3, qSmooth.q0, qSmooth.q1, qSmooth.q2, qSmooth.q3, curr_time);

// //          fprintf(fpPoseLog, "%f\t%f\t%f\n", pose.yaw, pose.pitch, pose.roll);
//     }
//     printf("############ end of producer ##############\n");
//     fflush(stdout);
//     pthread_exit(NULL);
// }

float compute_scale_factor(int cols, int rows, Matrix3 correct_matrix)
{
    int width_x = cols;
    int height_y = rows;
    float warp_x,warp_y,warp_z;
    float center_x = (float)width_x / 2.f;
    float center_y = (float)height_y / 2.f;
    float min_disx = FLT_MAX;
    float min_disy = FLT_MAX;
    float disx,disy;
    float out_inv_aspect = (float)height_y / (float)width_x;
    for(int i = 0; i < width_x; i++) {
        float j = 0.f;
        warp_x = (float)i * correct_matrix.data[0][0] + (float)j * correct_matrix.data[0][1] + correct_matrix.data[0][2];
        warp_y = (float)i * correct_matrix.data[1][0] + (float)j * correct_matrix.data[1][1] + correct_matrix.data[1][2];
        warp_z = (float)i * correct_matrix.data[2][0] + (float)j * correct_matrix.data[2][1] + correct_matrix.data[2][2];
        warp_x /= warp_z;
        warp_y /= warp_z;
        disx = fabs(warp_x-center_x);
        disy = fabs(warp_y-center_y);
        if( (disx<min_disx) && (disy<min_disy) )
        {
            if(disy > (disx * out_inv_aspect))
            {
                min_disx = disy / out_inv_aspect;
                min_disy = disy;
            }
            else
            {
                min_disx = disx;
                min_disy = disx * out_inv_aspect;
            }
        }
    }

    for(int i = 0; i < width_x; i++){
        float j = height_y - 1.f;
        warp_x = (float)i * correct_matrix.data[0][0] + (float)j * correct_matrix.data[0][1] + correct_matrix.data[0][2];
        warp_y = (float)i * correct_matrix.data[1][0] + (float)j * correct_matrix.data[1][1] + correct_matrix.data[1][2];
        warp_z = (float)i * correct_matrix.data[2][0] + (float)j * correct_matrix.data[2][1] + correct_matrix.data[2][2];
        warp_x /= warp_z;
        warp_y /= warp_z;
        disx = fabs(warp_x-center_x);
        disy = fabs(warp_y-center_y);
        if( (disx<min_disx) && (disy<min_disy) ){
            if(disy > (disx * out_inv_aspect)){
                min_disx = disy / out_inv_aspect;
                min_disy = disy;
            }
            else{
                min_disx = disx;
                min_disy = disx * out_inv_aspect;
            }
        }
    }

    for(int j = 0; j < height_y; j++){
        float i = width_x - 1.f;
        warp_x = (float)i * correct_matrix.data[0][0] + (float)j * correct_matrix.data[0][1] + correct_matrix.data[0][2];
        warp_y = (float)i * correct_matrix.data[1][0] + (float)j * correct_matrix.data[1][1] + correct_matrix.data[1][2];
        warp_z = (float)i * correct_matrix.data[2][0] + (float)j * correct_matrix.data[2][1] + correct_matrix.data[2][2];
        warp_x /= warp_z;
        warp_y /= warp_z;
        disx = fabs(warp_x-center_x);
        disy = fabs(warp_y-center_y);
        if( (disx<min_disx) && (disy<min_disy) ){
            if(disy > (disx * out_inv_aspect)){
                min_disx = disy / out_inv_aspect;
                min_disy = disy;
            }
            else{
                min_disx = disx;
                min_disy = disx * out_inv_aspect;
            }
        }
    }

    for(int j = 0; j < height_y; j++){
        float i = 0.f;
        warp_x = (float)i * correct_matrix.data[0][0] + (float)j * correct_matrix.data[0][1] + correct_matrix.data[0][2];
        warp_y = (float)i * correct_matrix.data[1][0] + (float)j * correct_matrix.data[1][1] + correct_matrix.data[1][2];
        warp_z = (float)i * correct_matrix.data[2][0] + (float)j * correct_matrix.data[2][1] + correct_matrix.data[2][2];
        warp_x /= warp_z;
        warp_y /= warp_z;
        disx = fabs(warp_x-center_x);
        disy = fabs(warp_y-center_y);
        if( (disx<min_disx) && (disy<min_disy) ){
            if(disy > (disx * out_inv_aspect)){
                min_disx = disy / out_inv_aspect;
                min_disy = disy;
            }
            else{
                min_disx = disx;
                min_disy = disx * out_inv_aspect;
            }
        }
    }

    float scale_factor = (0.5f * (float)cols) / min_disx;
    return scale_factor;
}


void *process_quarter(void *arg) {

    int thread_id = *(int *)arg;

    while (1) {
        pthread_mutex_lock(&mutex);

        // 等待任务准备好
        while (!tasks[thread_id].is_task_ready) {
            pthread_cond_wait(&cond, &mutex);
        }

        // 检查是否退出
        // main中，处理完所有图片后，会设施srcY == null、is_task_ready=1,以便跳过上面的等待，走这里的判断条件，退出线程
        if (tasks[thread_id].srcY == NULL) {
            pthread_mutex_unlock(&mutex);
            break;
        }
        pthread_mutex_unlock(&mutex);
        ScalarCD borderValue = {{0, 0, 0, 0}};


        // 执行任务
        printf("Thread %d: Processing rows %d to %d\n", thread_id, tasks[thread_id].start_row, tasks[thread_id].end_row);
        warpPerspectiveQuarter(tasks[thread_id].srcY, tasks[thread_id].dstY,
                               tasks[thread_id].srcU, tasks[thread_id].dstU,
                               tasks[thread_id].srcV, tasks[thread_id].dstV,
                               tasks[thread_id].start_row, tasks[thread_id].end_row,
                               tasks[thread_id].image_width, tasks[thread_id].image_height,
                               &(tasks[thread_id].M),CD_INTER_LINEAR, CD_BORDER_CONSTANT, &borderValue);

        pthread_mutex_lock(&mutex);
        // 标记完成任务
        tasks[thread_id].is_task_ready = 0;
        tasks_done++;
        pthread_cond_broadcast(&cond);
        pthread_mutex_unlock(&mutex);
    }

    printf("Thread %d: Exiting.\n", thread_id);
    return NULL;

}


void handle_image(unsigned char *srcY, unsigned char *srcU, unsigned char *srcV,
                  unsigned char *dstY, unsigned char *dstU, unsigned char *dstV,
                  int width, int height) {

    double M_data[9] = {0.0};
    // test: 计算旋转30度的透视变换矩阵
    double angle = 5.0 * CV_PI / 180.0;
    double alpha = cos(angle);
    double beta = sin(angle);

    M_data[0] = alpha;
    M_data[1] = beta;
    M_data[2] = (1 - alpha) * (width / 2) - beta * (height / 2);
    M_data[3] = -beta;
    M_data[4] = alpha;
    M_data[5] = beta * (width / 2) + (1 - alpha) * (height / 2);
    M_data[6] = 0;
    M_data[7] = 0;
    M_data[8] = 1;
    MatrixCD M = {3, 3, M_data};



    std::chrono::microseconds totalElapsed(0);

    pthread_mutex_lock(&mutex);
    auto start = std::chrono::high_resolution_clock::now();

    // 初始化任务
    int rows_per_thread = height / THREAD_COUNT;
    for (int i = 0; i < THREAD_COUNT; i++) {
        tasks[i].srcY = srcY;
        tasks[i].srcU = srcU;
        tasks[i].srcV = srcV;
        tasks[i].dstY = dstY;
        tasks[i].dstU = dstU;
        tasks[i].dstV = dstV;
        tasks[i].start_row = i * rows_per_thread;
//        tasks[i].end_row = (i + 1) * rows_per_thread - 1;
        tasks[i].end_row = (i + 1) * rows_per_thread;
        tasks[i].image_height = height;
        tasks[i].image_width = width;
        tasks[i].is_task_ready = 1;
        tasks[i].M = M;
    }

    // 通知线程开始任务
    tasks_done = 0;
    pthread_cond_broadcast(&cond);

    // 等待所有线程完成
    while (tasks_done < THREAD_COUNT) {
        pthread_cond_wait(&cond, &mutex);
    }

    pthread_mutex_unlock(&mutex);
    auto end = std::chrono::high_resolution_clock::now();
    auto elapsed = std::chrono::duration_cast<std::chrono::microseconds>(end - start);
    totalElapsed += elapsed;
    printf("time cost: %lld us\n", totalElapsed.count());



    cv::Mat dst_yuv_image(height + height / 2, width, CV_8UC1);
    memcpy(dst_yuv_image.data, dstY, width * height);
    memcpy(dst_yuv_image.data + width * height, dstU, (width * height) / 4);
    memcpy(dst_yuv_image.data + width * height + (width * height) / 4, dstV, (width * height) / 4);

    cv::cvtColor(dst_yuv_image, dst_yuv_image, cv::COLOR_YUV2BGR_I420);

    char outputName[150];
    snprintf(outputName, sizeof(outputName), "%s\\warp\\warped_image_%d_thread.jpg", imageFolder, 9999);
    cv::imwrite(outputName, dst_yuv_image);

}


int test_main(const char *imagePath, int imageNumber, Matrix3 correct_matrix, std::chrono::microseconds& totalElapsed) {
    char outputName[150];

    // 读取图像
    cv::Mat image = cv::imread(imagePath, cv::IMREAD_COLOR);
    if (image.empty()) {
        std::cerr << "cannot read image!!!" << std::endl;
        return -1;
    }

    double aa = 5.0 * CV_PI / 180.0;
    double alpha = cos(aa);
    double beta = sin(aa);

    int w = image.cols;
    int h = image.rows;

    //saveMatAsHex(warped_image, "D:\\camera_reader\\img\\eis_video\\1104pic\\image_cv_hex.txt");

    // 将图像从BGR转换为YUV420p
    cv::Mat yuv_image;
    cv::cvtColor(image, yuv_image, cv::COLOR_BGR2YUV_I420);

    // 获取YUV通道的指针
    int width = image.cols;
    int height = image.rows;
    unsigned char* y_plane = yuv_image.data;
    unsigned char* u_plane = yuv_image.data + width * height;
    unsigned char* v_plane = u_plane + (width * height) / 4;

    // 创建目标图像
    ImageCD srcY = {width, height, y_plane};
    ImageCD dstY = {width, height, (unsigned char*)malloc(width * height)};
    ImageCD srcU = {width / 2, height / 2, u_plane};
    ImageCD dstU = {width / 2, height / 2, (unsigned char*)malloc((width * height) / 4)};
    ImageCD srcV = {width / 2, height / 2, v_plane};
    ImageCD dstV = {width / 2, height / 2, (unsigned char*)malloc((width * height) / 4)};

    double M_data[9] = {0.0};

#if 1
    // test: 计算旋转30度的透视变换矩阵
    double angle = 5.0 * CV_PI / 180.0;
    alpha = cos(angle);
    beta = sin(angle);

    M_data[0] = alpha;
    M_data[1] = beta;
    M_data[2] = (1 - alpha) * (width / 2) - beta * (height / 2);
    M_data[3] = -beta;
    M_data[4] = alpha;
    M_data[5] = beta * (width / 2) + (1 - alpha) * (height / 2);
    M_data[6] = 0;
    M_data[7] = 0;
    M_data[8] = 1;


#else

    for(int m = 0; m < 3; m++){
        for(int n = 0; n < 3; n++) {
            M_data[m * 3 + n] = correct_matrix.data[m][n];
        }
    }
#endif

    MatrixCD M = {3, 3, M_data};

    // 定义边界值
    ScalarCD borderValue = {{0, 0, 0, 0}};

    auto start = std::chrono::high_resolution_clock::now();
#if FLOAT_POINT
    // 对Y通道应用透视变换
//    warpPerspectiveCD(&srcY, &dstY, &M,CD_INTER_LINEAR, CD_BORDER_CONSTANT, &borderValue);

//    // 对YUV通道应用透视变换
    warpPerspectiveUV(&srcY, &dstY, &srcU, &dstU, &srcV, &dstV, &M,CD_INTER_LINEAR, CD_BORDER_CONSTANT, &borderValue);

#else

    // 对Y通道应用透视变换
    warpPerspectiveFixedPoint(&srcY, &dstY, &M, CD_INTER_LINEAR, CD_BORDER_CONSTANT, &borderValue);

//    // 对U通道应用透视变换
    warpPerspectiveFixedPoint(&srcU, &dstU, &M, CD_INTER_LINEAR, CD_BORDER_CONSTANT, &borderValue);

//    // 对V通道应用透视变换
    warpPerspectiveFixedPoint(&srcV, &dstV, &M, CD_INTER_LINEAR, CD_BORDER_CONSTANT, &borderValue);
#endif
    auto end = std::chrono::high_resolution_clock::now();
    auto elapsed = std::chrono::duration_cast<std::chrono::microseconds>(end - start);
    totalElapsed += elapsed;

    // 将结果保存到文件
    cv::Mat dst_yuv_image(height + height / 2, width, CV_8UC1);
    memcpy(dst_yuv_image.data, dstY.data, width * height);
    memcpy(dst_yuv_image.data + width * height, dstU.data, (width * height) / 4);
    memcpy(dst_yuv_image.data + width * height + (width * height) / 4, dstV.data, (width * height) / 4);

    cv::cvtColor(dst_yuv_image, dst_yuv_image, cv::COLOR_YUV2BGR_I420);

    snprintf(outputName, sizeof(outputName), "%s\\warp\\warped_image_%d_cd.jpg", imageFolder, imageNumber);
    cv::imwrite(outputName, dst_yuv_image);

    //saveMatAsHex(dst_yuv_image, "D:\\camera_reader\\img\\eis_video\\1104pic\\image_cd_hex.txt");

    // 释放内存
    free(dstY.data);
    free(dstU.data);
    free(dstV.data);

    return 0;
}

//因输入图片格式限制，直接使用opencv方法
void processImage(const char *imagePath, int imageNumber, Matrix3 correct_matrix, std::chrono::microseconds& totalElapsed) {

    // 读取图像
    cv::Mat image = cv::imread(imagePath, cv::IMREAD_COLOR);
    if (image.empty()) {
        printf("Could not read the image: %s\n", imagePath);
        return;
    }

    // 创建矩阵
    cv::Mat M(3, 3, CV_32F);
    M.at<float>(0, 0) = correct_matrix.data[0][0];
    M.at<float>(0, 1) = correct_matrix.data[0][1];
    M.at<float>(0, 2) = correct_matrix.data[0][2];
    M.at<float>(1, 0) = correct_matrix.data[1][0];
    M.at<float>(1, 1) = correct_matrix.data[1][1];
    M.at<float>(1, 2) = correct_matrix.data[1][2];
    M.at<float>(2, 0) = correct_matrix.data[2][0];
    M.at<float>(2, 1) = correct_matrix.data[2][1];
    M.at<float>(2, 2) = correct_matrix.data[2][2];

    //    cv::Mat warped_image;
//    cv::warpPerspective(image, warped_image, M, image.size());

    char outputName[150];
    snprintf(outputName, sizeof(outputName), "%s\\warp\\warped_image_%d.jpg", imageFolder, imageNumber);
//    cv::imwrite(outputName, warped_image);
//    test_main(imagePath, imageNumber, correct_matrix, totalElapsed);

    printf("Image %d processed and saved as %s\n", imageNumber, outputName);
}

// void *rolling(void *arg) {
//     Quaternion q;
//     Quaternion diffQ;
//     int timeStamp = 0;

//     Matrix3 intrinsics;
//     Matrix3 extrinsics_cam_imu;
//     Matrix3 extrinsics_imu_cam;
//     initK(&intrinsics);
//     Matrix3 intrinsics_inv = K_inverse(&intrinsics);
//     initC2I(&extrinsics_imu_cam);
//     extrinsics_cam_imu = matrix_transpose(&extrinsics_imu_cam);

//     FILE* fpMatrixLog;

//     if (fopen_s(&fpMatrixLog, matrixLogPath, "w") != 0) {
//         printf("rolling result: Error opening file.\n");
//         pthread_exit(NULL);
//     }

//     DIR *dir;
//     struct dirent *ent;

//     double t_correct_matrix = 0.0;
//     double t_processImage = 0.0;
//     double t_inv = 0.0;
//     int image_count = 0;
//     int logCount = 0;

//     std::chrono::microseconds totalElapsed(0);

//     if ((dir = opendir(imageFolder)) != NULL) {
//         int imageCount = 0;

//         //遍历路径下图片，逐张处理，并以图片名中提取的数字为图片时间戳
//         while ((ent = readdir(dir)) != NULL) {
//             char *imageName = ent->d_name;
//             const char *ptr = imageName;
//             while (*ptr && !isdigit(*ptr)) {
//                 ptr++;
//             }
//             if (*ptr) {
//                 // 数字字符串转换为数字，为图片时间戳
//                 int number = atoi(ptr);
//                 imageCount++;
//                 while(timeStamp < number) {
//                     //追加单张图片对应的imu队列
//                     bool b = dequeue(&quaternion_queue, &timeStamp, &q, &diffQ);
//                     if(b) {
//                         enqueue(&img_imu_queue, q, diffQ, timeStamp);
//                     }
//                 }
// #if NOGT
//                 /************** bug bypass ***********************/
//                 dequeue(&quaternion_queue, &timeStamp, &q, &diffQ);
//                 enqueue(&img_imu_queue, q, diffQ, timeStamp);
//                 /************** end of bug bypass ****************/
// #endif
//                 Matrix3 correct_matrix =  q_compute_correct_rolling_shutter_matrix(&img_imu_queue,
//                                                                                    number,
//                                                                                    &intrinsics, // 配置参数赋值
//                                                                                    &intrinsics_inv,
//                                                                                    &extrinsics_cam_imu,
//                                                                                    &extrinsics_imu_cam, // 配置参数赋值
//                                                                                    number);
//                 image_count++;

//                 float scale_factor = compute_scale_factor(IMGWIDTH, IMGHEIGHT, correct_matrix);

//                 // 创建三个单位矩阵 M_1, M_2, M_3
//                 Matrix3 M_1 = {{{1, 0, -(float)IMGWIDTH/2.f},
//                                 {0, 1, -(float)IMGHEIGHT/2.f},
//                                 {0, 0, 1}}};

//                 Matrix3 M_2 = {{{scale_factor, 0, 0},
//                                 {0, scale_factor, 0},
//                                 {0, 0, 1}}};

//                 Matrix3 M_3 = {{{1, 0, (float)IMGWIDTH/2.f},
//                                 {0, 1, (float)IMGHEIGHT/2.f},
//                                 {0, 0, 1}}};

//                 Matrix3 temp1, temp2, M_center;
//                 matrix_multiply(&temp1, &M_3, &M_2);
//                 matrix_multiply(&temp2, &temp1, &M_1);
//                 matrix_multiply(&M_center, &temp2, &correct_matrix);

// //                fprintf(fpMatrixLog, "%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%f\n",
// //                        M_center.data[0][0],
// //                        M_center.data[0][1],
// //                        M_center.data[0][2],
// //                        M_center.data[1][0],
// //                        M_center.data[1][1],
// //                        M_center.data[1][2],
// //                        M_center.data[2][0],
// //                        M_center.data[2][1],
// //                        M_center.data[2][2]);


//                 // 清空对应当前帧图片的img_imu_queue
//                 clearQueue(&img_imu_queue);

//                 // 对图片应用修正矩阵，调用opcv方法进行warp
//                 char inputName[150];
//                 snprintf(inputName, sizeof(inputName), "%s\\image_%d.jpg", imageFolder, number);
//                 processImage(inputName, number, M_center, totalElapsed);


//             }
//         }
//         printf("Total images processed: %d\n", imageCount);
//         printf("Total time cost: %lld us\n", totalElapsed.count());

//         closedir(dir);
//     } else {
//         printf("Could not open directory: %s\n", imageFolder);
//     }

//     printf("############ end of consumer ##############\n");
//     fflush(stdout);
//     pthread_exit(NULL);
// }


int main() {

    char *imagePath = "/home/firefly/neon/neon_test/tibet.jpg";
    char *imageWarpPath = "/home/firefly/neon/rkmove";
    char *imageFolderTest = "/home/firefly/neon/rkmove";
    imageFolder = imageFolderTest;
    char inputName[150];
    int number = 9999;
    Matrix3 correct_matrix;
    snprintf(inputName, sizeof(inputName), "%s\\image_%d.jpg", imageWarpPath, number);

    pthread_t threads[THREAD_COUNT];
    int thread_ids[THREAD_COUNT];

    // 初始化
    pthread_mutex_init(&mutex, NULL);
    pthread_cond_init(&cond, NULL);

    // 创建线程
    for (int i = 0; i < THREAD_COUNT; i++) {
        thread_ids[i] = i;
        pthread_create(&threads[i], NULL, process_quarter, &thread_ids[i]);
    }

    // 模拟图像处理
    for (int img_idx = 0; img_idx < 1; img_idx++) {
        printf("Processing image %d...\n", img_idx + 1);

        // 读取图像
        cv::Mat image = cv::imread(imagePath, cv::IMREAD_COLOR);
        if (image.empty()) {
            std::cerr << "cannot read image!!!" << std::endl;
            return -1;
        }
        cv::Mat yuv_image;
        cv::cvtColor(image, yuv_image, cv::COLOR_BGR2YUV_I420);

        int width = image.cols;
        int height = image.rows;
        unsigned char* y_plane = yuv_image.data;
        unsigned char* u_plane = yuv_image.data + width * height;
        unsigned char* v_plane = u_plane + (width * height) / 4;

        unsigned char* dist_y = (unsigned char*)malloc(width * height);
        unsigned char* dist_u = (unsigned char*)malloc((width * height) / 4);
        unsigned char* dist_v = (unsigned char*)malloc((width * height) / 4);

        handle_image(y_plane, u_plane, v_plane, dist_y, dist_u, dist_v, width, height);

        // 保存或进一步处理图像
        printf("Image %d processed.\n", img_idx + 1);

        free(dist_y);
        free(dist_u);
        free(dist_v);

    }

    // 退出线程
    pthread_mutex_lock(&mutex);
    for (int i = 0; i < THREAD_COUNT; i++) {
        tasks[i].srcY = NULL; // NULL 表示退出信号
        tasks[i].is_task_ready = 1;
    }
    pthread_cond_broadcast(&cond);
    pthread_mutex_unlock(&mutex);

    // 等待线程结束
    for (int i = 0; i < THREAD_COUNT; i++) {
        pthread_join(threads[i], NULL);
    }

    pthread_mutex_destroy(&mutex);
    pthread_cond_destroy(&cond);

    printf("All images processed and threads exited.\n");
    return 0;




////
////    预处理-初始化warp图片保存路径
//    warpDirInit();
//
////    初始化队列，设置最大队列长度为 30000(因为imu处理太快了，图像消费队列来不及)
//    queue_init(&quaternion_queue, BUFFER_SIZE);
//    queue_init(&img_imu_queue, CUSTOMER_BUFFER_SIZE);
//
////    创建生产者（imu位姿估计）和消费者（rolling）线程
//    pthread_create(&producer_thread, NULL, producer, NULL);
//    pthread_create(&rolling_thread, NULL, rolling, NULL);
//
////    主线程等待线程完成
//    pthread_join(producer_thread, NULL);
//    pthread_join(rolling_thread, NULL);

    return 0;
}
