#include "exponentialdeepsky_ros_node.h"

// #define ENABLE_DEBUG_TIMING

#ifdef ENABLE_DEBUG_TIMING
#define DEBUG_TIMING_START(name) ros::Time name##_start_time = ros::Time::now()
#define DEBUG_TIMING_END(name) \
    std::cout << #name " Time: " << (ros::Time::now() - name##_start_time).toSec() * 1000 << " ms" << std::endl
#else
#define DEBUG_TIMING_START(name)
#define DEBUG_TIMING_END(name)
#endif
EdsRosDriver::EdsRosDriver() : nh_(), pn_("~") {
    depth_pub_ = nh_.advertise<sensor_msgs::Image>("/depth_map", 1);
    ir_pub_ = nh_.advertise<sensor_msgs::Image>("/ir_map", 1);
    rgb_pub_ = nh_.advertise<sensor_msgs::Image>("/rgb_map", 1);
    cloud_pub_ = nh_.advertise<sensor_msgs::PointCloud2>("/cloud", 1);
    rgbd_depth_pub_ = nh_.advertise<sensor_msgs::Image>("/rgbd_depth_map", 1);
    rgbd_rgb_pub_ = nh_.advertise<sensor_msgs::Image>("/rgbd_rgb_map", 1);
    rgbd_depth_cloud_pub_ = nh_.advertise<sensor_msgs::PointCloud2>("/rgbd_depth_cloud", 1);

    pn_.param<int>("param_version", param_version_, 1);

    pn_.param<bool>("rgb_open", rgb_open_, false);
    pn_.param<bool>("cloud_open", cloud_open_, false);

    pn_.param<std::string>("ip", ip_, "192.168.10.100");
    pn_.param<int>("exposure", exposure_, 20);

    pn_.param<int>("resolution_height", resolution_height_, 360);
    pn_.param<bool>("cut_flag", cut_flag_, false);
    pn_.param<int>("rgbd_mode", rgbd_mode_, RGBD_CLOSE);

    pn_.param<int>("depth_height", depth_height_, 480);
    pn_.param<int>("depth_width", depth_width_, 640);
    pn_.param<int>("rgb_height", rgb_height_, 1080);
    pn_.param<int>("rgb_width", rgb_width_, 1920);

    isRunningOnOrin_ = isRunningOnOrin();
    if (isRunningOnOrin_) {
        std::cout << "Running on NVIDIA Orin" << std::endl;
    } else {
        std::cout << "Not on Orin" << std::endl;
    }
    
}

bool EdsRosDriver::isRunningOnOrin() {
    const std::string model_file = "/proc/device-tree/model";
    struct stat st;
    if (stat(model_file.c_str(), &st) != 0) {
        return false;
    }

    std::ifstream file(model_file.c_str(), std::ios::binary);
    std::string model(st.st_size, '\0');
    file.read(&model[0], st.st_size);
    return (model.find("Orin") != std::string::npos);
}

void EdsRosDriver::myResize(const cv::Mat& src, cv::Mat& dst, const cv::Size& size) {
    const int dstWidth = size.width;
    const int dstHeight = size.height;
    dst.create(dstHeight, dstWidth, src.type());

    const float scaleX = static_cast<float>(src.cols) / dstWidth;
    const float scaleY = static_cast<float>(src.rows) / dstHeight;
    const int channels = src.channels();

    // 预计算源图像行指针
    std::vector<const uchar*> rows(src.rows);
    for (int y = 0; y < src.rows; ++y) {
        rows[y] = src.ptr<uchar>(y);
    }

    #pragma omp parallel for schedule(static)
    for (int y = 0; y < dstHeight; ++y) {
        uchar* pdst = dst.ptr<uchar>(y);
        const float srcY = y * scaleY;
        const int y1 = static_cast<int>(srcY);
        const int y2 = std::min(y1 + 1, src.rows-1);
        const float wy = srcY - y1;

        const uchar* srow1 = rows[y1];
        const uchar* srow2 = rows[y2];

        for (int x = 0; x < dstWidth; ++x) {
            const float srcX = x * scaleX;
            const int x1 = static_cast<int>(srcX);
            const int x2 = std::min(x1 + 1, src.cols-1);
            const float wx = srcX - x1;

            const int px1 = x1 * channels;
            const int px2 = x2 * channels;

            for (int c = 0; c < channels; ++c) {
                const float val = 
                    (1-wx)*(1-wy)*srow1[px1 + c] +
                    wx*(1-wy)*srow1[px2 + c] +
                    (1-wx)*wy*srow2[px1 + c] +
                    wx*wy*srow2[px2 + c];
                
                pdst[x*channels + c] = static_cast<uchar>(val + 0.5f);  // 四舍五入
            }
        }
    }
}
void EdsRosDriver::bilateralFilterWithGuide(const cv::Mat& depth, const cv::Mat& guide, cv::Mat& filtered_depth,
                                          int kernel_size, float sigma_space, float sigma_color) {
    filtered_depth = cv::Mat(depth.size(), CV_32F, cv::Scalar(0));
    int radius = kernel_size / 2;
    
    // 预计算空间权重
    std::vector<float> space_weights(kernel_size * kernel_size);
    const float space_coeff = -1.0f / (2 * sigma_space * sigma_space);
    const float color_coeff = -1.0f / (2 * sigma_color * sigma_color);
    
    #pragma omp parallel for collapse(2)
    for(int i = -radius; i <= radius; i++) {
        for(int j = -radius; j <= radius; j++) {
            float dist = i*i + j*j;
            space_weights[(i+radius)*kernel_size + (j+radius)] = std::exp(dist * space_coeff);
        }
    }

    cv::Mat guide_resized = guide;
    // cv::resize(guide, guide_resized, depth.size(), 0, 0, cv::INTER_LINEAR);

    // 对每个像素进行滤波
    #pragma omp parallel for collapse(2) schedule(dynamic)
    for(int y = 0; y < depth.rows; y++) {
        for(int x = 0; x < depth.cols; x++) {
            float sum = 0;
            float weight_sum = 0;
            float center_intensity = guide_resized.at<uchar>(y, x);

            // 在核大小范围内遍历
            for(int i = -radius; i <= radius; i++) {
                int ny = y + i;
                if(ny < 0 || ny >= depth.rows) continue;
                
                for(int j = -radius; j <= radius; j++) {
                    int nx = x + j;
                    if(nx < 0 || nx >= depth.cols) continue;

                    float neighbor_depth = depth.at<float>(ny, nx);
                    if(neighbor_depth == 0) continue;

                    // 计算颜色权重
                    float intensity_diff = center_intensity - guide_resized.at<uchar>(ny, nx);
                    float color_weight = std::exp(intensity_diff * intensity_diff * color_coeff);
                    
                    // 获取空间权重
                    float space_weight = space_weights[(i+radius)*kernel_size + (j+radius)];
                    
                    // 计算总权重
                    float weight = space_weight * color_weight;
                    
                    sum += neighbor_depth * weight;
                    weight_sum += weight;
                }
            }

            if(weight_sum > 0) {
                filtered_depth.at<float>(y, x) = sum / weight_sum;
            } else {
                filtered_depth.at<float>(y, x) = depth.at<float>(y, x);
            }
        }
    }
}

void EdsRosDriver::depthToPointCloud(const cv::Mat &depth, 
    pcl::PointCloud<pcl::PointXYZ>::Ptr &cloud, bool is_rgb_camera) {
    if (is_rgb_camera) {
        // 使用RGB相机的内参
        cloud->width = mParam.Width_rgb;
        cloud->height = mParam.Height_rgb;
        // #pragma omp parallel
        for (int v = 0; v < mParam.Height_rgb; ++v) {
            for (int u = 0; u < mParam.Width_rgb; ++u) {
                float depth_value = depth.at<float>(v, u) / 1000.0f;  // 转换为米
                if (depth_value > 0 && depth_value < 10.0) {
                    pcl::PointXYZ point;
                    point.z = depth_value;
                    point.x = (u - mParam.cx_rgb) * point.z / mParam.fx_rgb;
                    point.y = (v - mParam.cy_rgb) * point.z / mParam.fy_rgb;
                    cloud->points.push_back(point);
                } else {
                    // 无效点设置为NaN
                    pcl::PointXYZ point;
                    point.x = point.y = point.z = std::numeric_limits<float>::quiet_NaN();
                    cloud->points.push_back(point);
                }
            }
        }
    } else {
        // 使用TOF相机的内参
        cloud->width = mParam.Width_tof;
        cloud->height = mParam.Height_tof;
        // #pragma omp parallel
        for (int v = 0; v < mParam.Height_tof; ++v) {
            for (int u = 0; u < mParam.Width_tof; ++u) {
                float depth_value = depth.at<float>(v, u) / 1000.0f;  // 转换为米
                if (depth_value > 0 && depth_value < 10.0) {
                    pcl::PointXYZ point;
                    point.z = depth_value;
                    point.x = (u - mParam.cx_tof) * point.z / mParam.fx_tof;
                    point.y = (v - mParam.cy_tof) * point.z / mParam.fy_tof;
                    cloud->points.push_back(point);
                } else {
                    // 无效点设置为NaN
                    pcl::PointXYZ point;
                    point.x = point.y = point.z = std::numeric_limits<float>::quiet_NaN();
                    cloud->points.push_back(point);
                }
            }
        }
    }
    cloud->is_dense = false;
}

void EdsRosDriver::convertDepthToPointCloud2(cv::Mat &depth, sensor_msgs::PointCloud2 &cloud_msg, pcl::PointCloud<pcl::PointXYZ>::Ptr &cloud)
{
    //pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);
    depthToPointCloud(depth, cloud, false);  // 使用TOF相机的内参
    pcl::toROSMsg(*cloud, cloud_msg);
}

void EdsRosDriver::computeUndistortMaps(const cv::Mat& cameraMatrix, const cv::Mat& distCoeffs, const cv::Size& imageSize,
    cv::Mat& map1, cv::Mat& map2)
    {
        // 确保输出映射表是正确的类型
        map1 = cv::Mat(imageSize, CV_32FC1);
        map2 = cv::Mat(imageSize, CV_32FC1);
        
        // 从相机矩阵中提取参数
    float fx = cameraMatrix.at<float>(0, 0);
    float fy = cameraMatrix.at<float>(1, 1);
    float cx = cameraMatrix.at<float>(0, 2);
    float cy = cameraMatrix.at<float>(1, 2);
    
    // 从畸变系数中提取参数
    float k1 = distCoeffs.at<float>(0);
    float k2 = distCoeffs.at<float>(1);
    float p1 = distCoeffs.at<float>(2);
    float p2 = distCoeffs.at<float>(3);
    float k3 = distCoeffs.at<float>(4);
    
    // 遍历图像中的每个像素
    for (int y = 0; y < imageSize.height; ++y) {
        for (int x = 0; x < imageSize.width; ++x) {
            // 将像素坐标转换为归一化相机坐标
            float u = (x - cx) / fx;
            float v = (y - cy) / fy;
            
            float r2 = u*u + v*v;
            float r4 = r2 * r2;
            float r6 = r4 * r2;
            
            // 计算径向畸变
            float radial_dist = (1.0f + k1*r2 + k2*r4 + k3*r6);
            float x_distorted = u * radial_dist;
            float y_distorted = v * radial_dist;
            
            // 计算切向畸变
            x_distorted += 2.0f * p1 * u * v + p2 * (r2 + 2.0f * u * u);
            y_distorted += p1 * (r2 + 2.0f * v * v) + 2.0f * p2 * u * v;
            
            // 将畸变后的坐标转换回像素坐标
            float x_map = x_distorted * fx + cx;
            float y_map = y_distorted * fy + cy;
            
            // 存储映射结果
            map1.at<float>(y, x) = x_map;
            map2.at<float>(y, x) = y_map;
        }
    }
    // 转换为cv::remap所需的格式
    cv::convertMaps(map1, map2, map1, map2, CV_16SC2);
}

void EdsRosDriver::undistortImage(const cv::Mat &src, cv::Mat &dst, bool is_rgb = false) {
    if (!init_flag_) {
        dst = src.clone();
        return;
    }
    if (is_rgb) {
        cv::remap(src, dst, RGB_Undistort_Map_1, RGB_Undistort_Map_2, cv::INTER_LINEAR);
    } else {
        cv::remap(src, dst, TOF_Undistort_Map_1, TOF_Undistort_Map_2, cv::INTER_LINEAR);
    }
}

int EdsRosDriver::parseIntrinsicParameters(const char* pInfo) {
    picojson::value mIntrinsicsJson;
    picojson::object mObj;
    picojson::object mObjRGB;
    picojson::object mObjExt;
    int rgb_flag = 0;

    std::string err = picojson::parse(mIntrinsicsJson, std::string(pInfo));
    if (!err.empty()) {
        return -1;
    }

    mObj = mIntrinsicsJson.get<picojson::object>();
    
    // 解析TOF参数
    mParam.Width_tof = static_cast<int>(mObj["width"].get<double>());
    mParam.Height_tof = static_cast<int>(mObj["height"].get<double>());
    mParam.fx_tof = mObj["f_x"].get<double>();
    mParam.fy_tof = mObj["f_y"].get<double>();
    mParam.cx_tof = mObj["c_x"].get<double>();
    mParam.cy_tof = mObj["c_y"].get<double>();
    mParam.k1_tof = mObj["k_1"].get<double>();
    mParam.k2_tof = mObj["k_2"].get<double>();
    mParam.k3_tof = mObj["k_3"].get<double>();
    mParam.p1_tof = mObj["p_1"].get<double>();
    mParam.p2_tof = mObj["p_2"].get<double>();
    
    // 解析RGB参数
    if(mObj["rgb"].is<picojson::object>())
    {  
        mObjRGB = mObj["rgb"].get<picojson::object>();
        // printf("--------------------read rgb\n");
        mParam.Width_rgb = static_cast<int>(mObjRGB["width"].get<double>());
        mParam.Height_rgb = static_cast<int>(mObjRGB["height"].get<double>());
        mParam.fx_rgb = mObjRGB["f_x"].get<double>();
        mParam.fy_rgb = mObjRGB["f_y"].get<double>();
        mParam.cx_rgb = mObjRGB["c_x"].get<double>();
        mParam.cy_rgb = mObjRGB["c_y"].get<double>();
        mParam.k1_rgb = mObjRGB["k_1"].get<double>();
        mParam.k2_rgb = mObjRGB["k_2"].get<double>();
        mParam.k3_rgb = mObjRGB["k_3"].get<double>();
        mParam.p1_rgb = mObjRGB["p_1"].get<double>();
        mParam.p2_rgb = mObjRGB["p_2"].get<double>();

        // 解析外参
        mObjExt = mObj["ext_params"].get<picojson::object>();
        
        // 解析旋转矩阵
        picojson::array rotation = mObjExt["Rotation"].get<picojson::array>();
        for (int i = 0; i < 3; ++i) {
            picojson::array row = rotation[i].get<picojson::array>();
            for (int j = 0; j < 3; ++j) {
                mParam.R[i][j] = row[j].get<double>();
                mParam.R_(i, j) = static_cast<float>(mParam.R[i][j]);  // 初始化 R_
            }
        }

        // 解析平移向量
        picojson::array translation = mObjExt["Translation"].get<picojson::array>();
        for (int i = 0; i < 3; ++i) {
            mParam.T[i] = translation[i].get<double>();
            mParam.T_(i) = static_cast<float>(mParam.T[i] / 1000.0f);  // 初始化 T_
        }
        rgb_flag = 1;
    }
    // else if(mObj["RGB_width"].is<picojson::object>())
    else if(mObj.find("RGB_width") != mObj.end())
    {
        // printf("-------------------------read rgb failed\n");

        mParam.Width_rgb = static_cast<int>(mObj["RGB_width"].get<double>());
        // printf("-------------------------read RGB failed\n");
        mParam.Height_rgb = static_cast<int>(mObj["RGB_height"].get<double>());
        mParam.fx_rgb = mObj["RGB_f_x"].get<double>();
        mParam.fy_rgb = mObj["RGB_f_y"].get<double>();
        // printf("-------------------------read rgb failed\n");
        mParam.cx_rgb = mObj["RGB_c_x"].get<double>();
        mParam.cy_rgb = mObj["RGB_c_y"].get<double>();
        // printf("-------------------------read rgb failed\n");
        mParam.k1_rgb = mObj["RGB_k_1"].get<double>();
        mParam.k2_rgb = mObj["RGB_k_2"].get<double>();
        mParam.k3_rgb = mObj["RGB_k_3"].get<double>();
        // printf("-------------------------read rgb failed\n");
        mParam.p1_rgb = mObj["RGB_p_1"].get<double>();
        mParam.p2_rgb = mObj["RGB_p_2"].get<double>();
        // printf("-------------------------read rgb failed\n");

        // 解析外参        
        // 解析旋转矩阵
        picojson::array rotation = mObj["Rotation"].get<picojson::array>();
        for (int i = 0; i < 3; ++i) {
            picojson::array row = rotation[i].get<picojson::array>();
            for (int j = 0; j < 3; ++j) {
                mParam.R[i][j] = row[j].get<double>();
                mParam.R_(i, j) = static_cast<float>(mParam.R[i][j]);  // 初始化 R_
            }
        }

        // 解析平移向量
        picojson::array translation = mObj["Translation"].get<picojson::array>();
        for (int i = 0; i < 3; ++i) {
            mParam.T[i] = translation[i].get<double>();
            mParam.T_(i) = static_cast<float>(mParam.T[i] / 1000.0f);  // 初始化 T_
        }
        rgb_flag = 1;
    }
    else
    {
        printf("-------------------------read rgb parameter failed\n");
    }

    // 打印所有参数
    std::cout << "\n=== Camera Parameters ===" << std::endl;

    // 打印TOF相机内参
    std::cout << "\nTOF Intrinsics:" << std::endl;
    std::cout << "  Resolution: " << mParam.Width_tof << "x" << mParam.Height_tof << std::endl;
    std::cout << "  Focal Length: (" << std::fixed << std::setprecision(6) 
                << mParam.fx_tof << ", " << mParam.fy_tof << ")" << std::endl;
    std::cout << "  Principal Point: (" << mParam.cx_tof << ", " << mParam.cy_tof << ")" << std::endl;
    std::cout << "  Distortion Coefficients:" << std::endl;
    std::cout << "    k1: " << mParam.k1_tof << ", k2: " << mParam.k2_tof 
                << ", k3: " << mParam.k3_tof << std::endl;
    std::cout << "    p1: " << mParam.p1_tof << ", p2: " << mParam.p2_tof << std::endl;
    // 构建TOF内参矩阵 
    TOF_camera_matrix.at<float>(0, 0) = mParam.fx_tof;
    TOF_camera_matrix.at<float>(1, 1) = mParam.fy_tof;
    TOF_camera_matrix.at<float>(0, 2) = mParam.cx_tof;
    TOF_camera_matrix.at<float>(1, 2) = mParam.cy_tof;
    
    TOF_dist_coeffs.at<float>(0, 0) = mParam.k1_tof;
    TOF_dist_coeffs.at<float>(0, 1) = mParam.k2_tof;
    TOF_dist_coeffs.at<float>(0, 2) = mParam.p1_tof;
    TOF_dist_coeffs.at<float>(0, 3) = mParam.p2_tof;
    TOF_dist_coeffs.at<float>(0, 4) = mParam.k3_tof;
    // 预计TOF算映射表
    if (isRunningOnOrin_) {
        computeUndistortMaps(TOF_camera_matrix, TOF_dist_coeffs, 
                        cv::Size(mParam.Width_tof, mParam.Height_tof),
                        TOF_Undistort_Map_1, TOF_Undistort_Map_2);
    } else {
        cv::initUndistortRectifyMap(
            TOF_camera_matrix, TOF_dist_coeffs, cv::Mat(),
            TOF_camera_matrix, cv::Size(mParam.Width_tof, mParam.Height_tof), CV_16SC2, TOF_Undistort_Map_1, TOF_Undistort_Map_2
        );
    }
    if (rgb_flag)
    {
        // 计算rgbd内参
        mParam.Height_rgbd =resolution_height_;
        float P_rgb2rgbd;
        P_rgb2rgbd = (float)mParam.Height_rgbd / (float)mParam.Height_rgb;

        mParam.Width_rgbd = mParam.Width_rgb * P_rgb2rgbd;
        mParam.Width_rgbd_cut = mParam.Width_tof * (float)mParam.Height_rgbd / (float)mParam.Height_tof;
        mParam.fx_rgbd = mParam.fx_rgb * P_rgb2rgbd;
        mParam.fy_rgbd = mParam.fy_rgb * P_rgb2rgbd;
        mParam.cx_rgbd = mParam.cx_rgb * P_rgb2rgbd;
        mParam.cy_rgbd = mParam.cy_rgb * P_rgb2rgbd;

        // 构建RGB内参矩阵    
        RGB_camera_matrix.at<float>(0, 0) = mParam.fx_rgb;
        RGB_camera_matrix.at<float>(1, 1) = mParam.fy_rgb;
        RGB_camera_matrix.at<float>(0, 2) = mParam.cx_rgb;
        RGB_camera_matrix.at<float>(1, 2) = mParam.cy_rgb;
        
        // 构建RGB畸变系数
        RGB_dist_coeffs.at<float>(0, 0) = mParam.k1_rgb;
        RGB_dist_coeffs.at<float>(0, 1) = mParam.k2_rgb;
        RGB_dist_coeffs.at<float>(0, 2) = mParam.p1_rgb;
        RGB_dist_coeffs.at<float>(0, 3) = mParam.p2_rgb;
        RGB_dist_coeffs.at<float>(0, 4) = mParam.k3_rgb;

        // 预计RGB算映射表
        if (isRunningOnOrin_) {
            computeUndistortMaps(RGB_camera_matrix, RGB_dist_coeffs, 
                        cv::Size(mParam.Width_rgb, mParam.Height_rgb),
                        RGB_Undistort_Map_1, RGB_Undistort_Map_2);
        } else {
            cv::initUndistortRectifyMap(
                RGB_camera_matrix, RGB_dist_coeffs, cv::Mat(),
                RGB_camera_matrix, cv::Size(mParam.Width_rgb, mParam.Height_rgb), CV_16SC2, RGB_Undistort_Map_1, RGB_Undistort_Map_2
            );            
        }
        
        // 打印RGB相机内参
        std::cout << "\nRGB Intrinsics:" << std::endl;
        std::cout << "  Resolution: " << mParam.Width_rgb << "x" << mParam.Height_rgb << std::endl;
        std::cout << "  Focal Length: (" << mParam.fx_rgb << ", " << mParam.fy_rgb << ")" << std::endl;
        std::cout << "  Principal Point: (" << mParam.cx_rgb << ", " << mParam.cy_rgb << ")" << std::endl;
        std::cout << "  Distortion Coefficients:" << std::endl;
        std::cout << "    k1: " << mParam.k1_rgb << ", k2: " << mParam.k2_rgb 
                    << ", k3: " << mParam.k3_rgb << std::endl;
        std::cout << "    p1: " << mParam.p1_rgb << ", p2: " << mParam.p2_rgb << std::endl;
        if (rgbd_mode_ == DEPTH2RGB_RESIZE) {
            // 打印RGBD内参
            std::cout << "\nRGBD Intrinsics:" << std::endl;
            std::cout << "  Resolution: " << mParam.Width_rgbd << "x" << mParam.Height_rgbd << std::endl;
            std::cout << "  Focal Length: (" << mParam.fx_rgbd << ", " << mParam.fy_rgbd << ")" << std::endl;
            std::cout << "  Principal Point: (" << mParam.cx_rgbd << ", " << mParam.cy_rgbd << ")" << std::endl;
        }

        // 打印外参
        std::cout << "\nExtrinsic Parameters:" << std::endl;
        std::cout << "  Rotation Matrix:" << std::endl;
        for(int i = 0; i < 3; ++i) {
            std::cout << "    [ ";
            for(int j = 0; j < 3; ++j) {
                std::cout << std::fixed << std::setw(10) 
                            << std::setprecision(6) << mParam.R[i][j];
                if(j < 2) std::cout << ", ";
            }
            std::cout << " ]" << std::endl;
        }

        std::cout << "  Translation Vector:" << std::endl;
        std::cout << "    [ ";
        for(int i = 0; i < 3; ++i) {
            std::cout << std::fixed << std::setw(10) 
                        << std::setprecision(6) << mParam.T[i];
            if(i < 2) std::cout << ", ";
        }
        std::cout << " ]\n" << std::endl;
    }
    return 1;
}
void EdsRosDriver::alignDepthToRGB_Resize( 
    cv::Mat &depth_aligned, pcl::PointCloud<pcl::PointXYZ>::Ptr &rgb_cloud, pcl::PointCloud<pcl::PointXYZ>::Ptr &tof_cloud) {
    // 创建RGB分辨率的空深度图
    depth_aligned = cv::Mat(mParam.Height_rgbd, mParam.Width_rgbd, CV_32F, cv::Scalar(0));

    // 将深度图转换为点云（TOF相机坐标系）
    //pcl::PointCloud<pcl::PointXYZ>::Ptr tof_cloud(new pcl::PointCloud<pcl::PointXYZ>);
    //depthToPointCloud(depth_in, tof_cloud, false);  // 使用TOF相机的内参

    // 对每个点进行坐标系转换
    for (const auto &point_depth : tof_cloud->points) 
    {
        if (!std::isnan(point_depth.x)) 
        {
            // 将点从TOF相机坐标系转换到RGB相机坐标系
            Eigen::Vector3f point_depth_eigen(point_depth.x, point_depth.y, point_depth.z);
            Eigen::Vector3f point_rgb = mParam.R_ * point_depth_eigen + mParam.T_;

            // 将点添加到RGB相机坐标系下的点云
            pcl::PointXYZ point;
            point.x = point_rgb[0];
            point.y = point_rgb[1];
            point.z = point_rgb[2];
            rgb_cloud->points.push_back(point);

            // 将3D点投影到RGB图像平面
            if (point_rgb[2] > 0) 
            {  // 点在相机前方
                float rgb_x = (point_rgb[0] / point_rgb[2] * mParam.fx_rgbd + mParam.cx_rgbd);
                float rgb_y = (point_rgb[1] / point_rgb[2] * mParam.fy_rgbd + mParam.cy_rgbd);

                // 检查投影点是否在RGB图像范围内
                if (rgb_x >= 0 && rgb_x < mParam.Width_rgbd && rgb_y >= 0 && rgb_y < mParam.Height_rgbd) 
                {
                    int rgb_u = static_cast<int>(rgb_x + 0.5f);
                    int rgb_v = static_cast<int>(rgb_y + 0.5f);
                    depth_aligned.at<float>(rgb_v, rgb_u) = point_depth.z * 1000.0f;  // 转换回毫米
                    
                }
            }
        }
    }

    rgb_cloud->width = rgb_cloud->points.size();
    rgb_cloud->height = 1;
    rgb_cloud->is_dense = false;
}

void EdsRosDriver::alignRGBToDepth( 
    std::vector<unsigned char> &rgb_points, 
    cv::Mat undistorted_rgb, pcl::PointCloud<pcl::PointXYZ>::Ptr &rgb_cloud, pcl::PointCloud<pcl::PointXYZ>::Ptr &tof_cloud) {

    // 对每个点进行坐标系转换
    for (const auto &point_depth : tof_cloud->points) 
    {
        if (!std::isnan(point_depth.x)) 
        {
            // 将点从TOF相机坐标系转换到RGB相机坐标系
            Eigen::Vector3f point_depth_eigen(point_depth.x, point_depth.y, point_depth.z);
            Eigen::Vector3f point_rgb = mParam.R_ * point_depth_eigen + mParam.T_;

            // 将点添加到RGB相机坐标系下的点云
            pcl::PointXYZ point;
            point.x = point_rgb[0];
            point.y = point_rgb[1];
            point.z = point_rgb[2];
            rgb_cloud->points.push_back(point);

            // 将3D点投影到RGB图像平面
            if (point_rgb[2] > 0) 
            {  // 点在相机前方
                float rgb_x = (point_rgb[0] / point_rgb[2] * mParam.fx_rgb + mParam.cx_rgb);
                float rgb_y = (point_rgb[1] / point_rgb[2] * mParam.fy_rgb + mParam.cy_rgb);

                // 检查投影点是否在RGB图像范围内
                if (rgb_x >= 0 && rgb_x < mParam.Width_rgb && rgb_y >= 0 && rgb_y < mParam.Height_rgb) 
                {
                    int rgb_u = static_cast<int>(rgb_x + 0.5f);
                    int rgb_v = static_cast<int>(rgb_y + 0.5f);
                    //cv::Vec3i rgb_point;
                    
                    //rgb_point=undistorted_rgb.at<cv::Vec3b>(rgb_v, rgb_u);
                    rgb_points.push_back(undistorted_rgb.at<cv::Vec3b>(rgb_v, rgb_u)[0]);
                    rgb_points.push_back(undistorted_rgb.at<cv::Vec3b>(rgb_v, rgb_u)[1]);
                    rgb_points.push_back(undistorted_rgb.at<cv::Vec3b>(rgb_v, rgb_u)[2]);
                }
                else
                {
                    rgb_points.push_back(0);
                    rgb_points.push_back(0);
                    rgb_points.push_back(0);
                }
            }
            else
            {
                rgb_points.push_back(0);
                rgb_points.push_back(0);
                rgb_points.push_back(0);
            }
        }
        else
        {
            rgb_points.push_back(0);
            rgb_points.push_back(0);
            rgb_points.push_back(0);
        }
    }

    rgb_cloud->width = rgb_cloud->points.size();
    rgb_cloud->height = 1;
    rgb_cloud->is_dense = false;
}

void EdsRosDriver::alignDepthToRGB_Filter(cv::Mat &depth_aligned, cv::Mat undistorted_rgb, pcl::PointCloud<pcl::PointXYZ>::Ptr &rgb_cloud, pcl::PointCloud<pcl::PointXYZ>::Ptr &tof_cloud) {
    // 创建RGB分辨率的空深度图
    depth_aligned = cv::Mat(mParam.Height_rgb, mParam.Width_rgb, CV_32F, cv::Scalar(0));

    // 对每个点进行坐标系转换
    for (const auto &point_depth : tof_cloud->points) 
    {
        if (!std::isnan(point_depth.x)) 
        {
            // 将点从TOF相机坐标系转换到RGB相机坐标系
            Eigen::Vector3f point_depth_eigen(point_depth.x, point_depth.y, point_depth.z);
            Eigen::Vector3f point_rgb = mParam.R_ * point_depth_eigen + mParam.T_;

            // 将点添加到RGB相机坐标系下的点云
            pcl::PointXYZ point;
            point.x = point_rgb[0];
            point.y = point_rgb[1];
            point.z = point_rgb[2];
            rgb_cloud->points.push_back(point);

            // 将3D点投影到RGB图像平面
            if (point_rgb[2] > 0) 
            {  // 点在相机前方
                float rgb_x = (point_rgb[0] / point_rgb[2] * mParam.fx_rgb + mParam.cx_rgb);
                float rgb_y = (point_rgb[1] / point_rgb[2] * mParam.fy_rgb + mParam.cy_rgb);

                // 检查投影点是否在RGB图像范围内
                if (rgb_x >= 0 && rgb_x < mParam.Width_rgb && rgb_y >= 0 && rgb_y < mParam.Height_rgb) 
                {
                    int rgb_u = static_cast<int>(rgb_x + 0.5f);
                    int rgb_v = static_cast<int>(rgb_y + 0.5f);
                    depth_aligned.at<float>(rgb_v, rgb_u) = point_depth.z * 1000.0f;  // 转换回毫米
                    
                }
            }
        }
    }

    rgb_cloud->width = rgb_cloud->points.size();
    rgb_cloud->height = 1;
    rgb_cloud->is_dense = false;

    cv::Mat filtered_depth;
    bilateralFilterWithGuide(depth_aligned, undistorted_rgb, filtered_depth, 5, 1.0f, 10.0f);
    depth_aligned = filtered_depth;
}

void EdsRosDriver::convertAlignedDepthToPointCloud2(const cv::Mat &aligned_depth, sensor_msgs::PointCloud2 &cloud_msg) {
    pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);
    cloud->width = mParam.Width_rgb;
    cloud->height = mParam.Height_rgb;
    cloud->is_dense = false;
    cloud->points.resize(cloud->width * cloud->height);
    for (int v = 0; v < mParam.Height_rgb; ++v) {
        for (int u = 0; u < mParam.Width_rgb; ++u) {
            float depth_value = aligned_depth.at<float>(v, u) / 1000.0f;  // 转换为米
            if (depth_value > 0 && depth_value < 10.0) {
                pcl::PointXYZ point;
                // 使用RGB相机的内参进行投影
                point.z = depth_value;
                point.x = (u - mParam.cx_rgb) * point.z / mParam.fx_rgb;
                point.y = (v - mParam.cy_rgb) * point.z / mParam.fy_rgb;
                cloud->points[v * mParam.Width_rgb + u] = point;
            } else {
                // 无效点设置为NaN
                cloud->points[v * mParam.Width_rgb + u].x = cloud->points[v * mParam.Width_rgb + u].y = cloud->points[v * mParam.Width_rgb + u].z = std::numeric_limits<float>::quiet_NaN();
            }
        }
    }
    pcl::toROSMsg(*cloud, cloud_msg);
}

void EdsRosDriver::run() {
    api_init();
    ros::Time start_time = ros::Time::now();
    int connect_count = 0;
    while (ros::ok()) {
        ros::Time connect_time = ros::Time::now();
        if(connect_time.toSec() - start_time.toSec() < 30) {
            handle_ = api_connect((char *) ip_.c_str());
            if (connect_count == 0) {
                std::cout << "Connecting" << std::flush;
            } else {
                std::cout << "." << std::flush;
            }
            connect_count++;
            if (handle_ >= 0) {
                std::cout << "\ncamera connect success!" << std::endl;
                break;          
            }
            ros::Duration(1).sleep();
        }else {
            std::cout << "\ncamera connect error!" << std::endl;
            return;
        }
    }
    // api_set_blocking_mode(handle_, true);
    init_flag_ = false;
    STRC_IMG_ALL *frame ;//= new STRC_IMG_ALL();
    // start_get_para_time = ros::Time::now();
    int fitst_frame = 0;
    std::cout << "Start Get Camera Parameters!" << std::endl;
    while (!init_flag_) { 
        // ros::Time time = ros::Time::now(); 
        char *pInfo = NULL;          
        pInfo = api_get_intrinsic_parameters(handle_);
        if (pInfo != NULL) {
            try {
                init_flag_ = parseIntrinsicParameters(pInfo);
            } catch (const std::runtime_error& e) {
                std::cerr << e.what() << std::endl;
                return;
            }
            int rc = -1;
            rc = api_set_exposure(handle_, exposure_);
            while(rc < 0) {
                std::cout << "camera set_exposure error!" << std::endl;
                rc = api_set_exposure(handle_, exposure_);
                sleep(1);
            }
            std::cout << "camera set_exposure : " << exposure_ << std::endl;                
        }
    }
    std::cout << "Start Get Camera Data!" << std::endl;
    ros::Time StartGetFrameTime = ros::Time::now();
    while (ros::ok()) {
        frame = api_get_img(handle_);
        if(frame != NULL) {
            fitst_frame = 1;
            ros::Time time = ros::Time::now();
            DEBUG_TIMING_START(Total);
            DEBUG_TIMING_START(Depth);
            // 发布原始深度图
            cv::Mat depth(depth_height_, depth_width_, CV_32F, cv::Scalar(0));
            for (int i = 0; i < depth_height_; ++i) {
                for (int j = 0; j < depth_width_; ++j) {
                    float &pixel = depth.at<float>(i, j);
                    pixel = static_cast<float>(frame->img_depth.data[i * depth_width_ + j]);
                }
            }
            sensor_msgs::ImagePtr depth_image = cv_bridge::CvImage(std_msgs::Header(), "32FC1", depth).toImageMsg();
            depth_image->header.stamp = time;
            depth_image->header.frame_id = "depth_camera";
            depth_pub_.publish(depth_image);
            DEBUG_TIMING_END(Depth);

            // 发布IR图
            if(init_flag_) {
                DEBUG_TIMING_START(IR);
                cv::Mat ir(mParam.Height_tof, mParam.Width_tof, CV_32F, cv::Scalar(0));
                for (int i = 0; i < mParam.Height_tof; ++i) {
                    for (int j = 0; j < mParam.Width_tof; ++j) {
                        float &pixel = ir.at<float>(i, j);
                        pixel = static_cast<float>(frame->img_amplitude.data[i * mParam.Width_tof + j]);
                    }
                }
                cv::Mat undistorted_ir;
                undistortImage(ir, undistorted_ir, false);
                sensor_msgs::ImagePtr ir_image = cv_bridge::CvImage(std_msgs::Header(), "32FC1", undistorted_ir).toImageMsg();
                ir_image->header.stamp = time;
                ir_image->header.frame_id = "depth_camera";
                ir_pub_.publish(ir_image);
                DEBUG_TIMING_END(IR);

                // 发布深度图视角下的点云
                pcl::PointCloud<pcl::PointXYZ>::Ptr tof_cloud(new pcl::PointCloud<pcl::PointXYZ>);
                if(cloud_open_ ) {
                    DEBUG_TIMING_START(Cloud);
                    sensor_msgs::PointCloud2 cloud_msg;
                    //convertDepthToPointCloud2(depth, cloud_msg, depth_cloud);

                    depthToPointCloud(depth, tof_cloud, false);  // 使用TOF相机的内参
                    
                    if(rgbd_mode_ == RGBD_CLOSE)// || rgbd_mode_ == RGB2DEPTH)
                    {
                        pcl::toROSMsg(*tof_cloud, cloud_msg);
                        cloud_msg.header.stamp = time;
                        cloud_msg.header.frame_id = "depth_camera";
                        cloud_pub_.publish(cloud_msg);
                    }
                    DEBUG_TIMING_END(Cloud);
                }

                // RGBD对齐
                if(rgb_open_) {
                    DEBUG_TIMING_START(RGB_Decode);
                    cv::Mat rgb(mParam.Width_rgb, mParam.Height_rgb, CV_8UC3, cv::Scalar(0, 0, 0));
                    char *src_data = (char *) frame->img_rgb.data;
                    std::vector <uchar> img_data(src_data, src_data + frame->img_rgb.len);
                    rgb = cv::imdecode(img_data, cv::IMREAD_COLOR);
                    DEBUG_TIMING_END(RGB_Decode);

                    if (!rgb.empty()) {
                        DEBUG_TIMING_START(RGB_Undistort);
                        // 对RGB图像进行去畸变
                        cv::Mat undistorted_rgb;
                        undistortImage(rgb, undistorted_rgb, true);  // true表示这是RGB图像
                        DEBUG_TIMING_END(RGB_Undistort);
                        sensor_msgs::ImagePtr rgb_image = cv_bridge::CvImage(std_msgs::Header(), "bgr8", undistorted_rgb).toImageMsg();
                        rgb_image->header.stamp = time;
                        rgb_image->header.frame_id = "rgb_camera";
                        rgb_pub_.publish(rgb_image);
                                                
                        
                        cv::Mat aligned_depth;
                        pcl::PointCloud<pcl::PointXYZ>::Ptr rgb_cloud(new pcl::PointCloud<pcl::PointXYZ>);
                        cv::Mat rgbd_rgb;
                        sensor_msgs::ImagePtr rgbd_rgb_image;
                        sensor_msgs::ImagePtr aligned_depth_image;
                        std::vector<unsigned char> rgbd_rgb_points;
                        
                        DEBUG_TIMING_START(RGBD);
                        switch(rgbd_mode_) 
                        {
                            case RGBD_CLOSE:
                            break;
                            
                            case DEPTH2RGB_RESIZE:
                            // 对齐深度图到RGB视角并生成RGB相机坐标系下的点云
                                // cv::Mat aligned_depth;
                                // pcl::PointCloud<pcl::PointXYZ>::Ptr rgb_cloud(new pcl::PointCloud<pcl::PointXYZ>);
                                alignDepthToRGB_Resize(aligned_depth, rgb_cloud, tof_cloud);
                                // 去畸变后的RGB图像resize到rgbd分辨率
                                // cv::Mat rgbd_rgb;
                                // myResize(undistorted_rgb, rgbd_rgb, cv::Size(mParam.Width_rgbd, mParam.Height_rgbd));
                                if (isRunningOnOrin_) {
                                    myResize(undistorted_rgb, rgbd_rgb, cv::Size(mParam.Width_rgbd, mParam.Height_rgbd));
                                } else {
                                    cv::resize(undistorted_rgb, rgbd_rgb, cv::Size(mParam.Width_rgbd, mParam.Height_rgbd));
                                }                                
                                // 发布resize后的RGBD_RGB图像
                                rgbd_rgb_image = cv_bridge::CvImage(std_msgs::Header(), "bgr8", rgbd_rgb).toImageMsg();
                                rgbd_rgb_image->header.stamp = time;
                                rgbd_rgb_image->header.frame_id = "rgb_camera";
                                rgbd_rgb_pub_.publish(rgbd_rgb_image);
                                
                                // 发布对齐后的深度图
                                aligned_depth_image = cv_bridge::CvImage(std_msgs::Header(), "32FC1", aligned_depth).toImageMsg();
                                aligned_depth_image->header.stamp = time;
                                aligned_depth_image->header.frame_id = "rgb_camera";
                                rgbd_depth_pub_.publish(aligned_depth_image);
                                
                                // 发布RGB相机坐标系下的点云
                                if (cloud_open_) {
                                    sensor_msgs::PointCloud2 rgb_cloud_msg;
                                    pcl::toROSMsg(*rgb_cloud, rgb_cloud_msg);
                                    rgb_cloud_msg.header.stamp = time;
                                    rgb_cloud_msg.header.frame_id = "rgb_camera";
                                    rgbd_depth_cloud_pub_.publish(rgb_cloud_msg);
                                }
                                break;
                            
                                case DEPTH2RGB_FILTER:
                                
                                // 对齐深度图到RGB视角并生成RGB相机坐标系下的点云
                                // cv::Mat aligned_depth;
                                // pcl::PointCloud<pcl::PointXYZ>::Ptr rgb_cloud(new pcl::PointCloud<pcl::PointXYZ>);
                                alignDepthToRGB_Filter(aligned_depth, undistorted_rgb, rgb_cloud, tof_cloud);
                                
                                // 发布RGBD_RGB图像
                                rgbd_rgb_image = cv_bridge::CvImage(std_msgs::Header(), "bgr8", undistorted_rgb).toImageMsg();
                                rgbd_rgb_image->header.stamp = time;
                                rgbd_rgb_image->header.frame_id = "rgb_camera";
                                rgbd_rgb_pub_.publish(rgbd_rgb_image);

                                // 发布对齐后的深度图
                                aligned_depth_image = cv_bridge::CvImage(std_msgs::Header(), "32FC1", aligned_depth).toImageMsg();
                                aligned_depth_image->header.stamp = time;
                                aligned_depth_image->header.frame_id = "rgb_camera";
                                rgbd_depth_pub_.publish(aligned_depth_image);
                                
                                // 发布RGB相机坐标系下的点云
                                if (cloud_open_) {
                                    sensor_msgs::PointCloud2 rgb_cloud_msg;
                                    pcl::toROSMsg(*rgb_cloud, rgb_cloud_msg);
                                    rgb_cloud_msg.header.stamp = time;
                                    rgb_cloud_msg.header.frame_id = "rgb_camera";
                                    rgbd_depth_cloud_pub_.publish(rgb_cloud_msg);
                                }
                                break;
                                
                                case RGB2DEPTH:
                                // 给深度图上色并生成RGB相机坐标系下的点云
                                // pcl::PointCloud<pcl::PointXYZ>::Ptr rgb_cloud(new pcl::PointCloud<pcl::PointXYZ>);
                                // std::vector<cv::Vec3i> rgbd_rgb_points;
                                alignRGBToDepth(rgbd_rgb_points, undistorted_rgb, rgb_cloud, tof_cloud);
                                
                                // 创建tof分辨率的rgb图
                                // cv::Mat rgbd_rgb;
                                rgbd_rgb = cv::Mat(mParam.Height_tof, mParam.Width_tof, CV_8UC3, rgbd_rgb_points.data());
                                
                                // 发布对齐后的DEPTH图像
                                aligned_depth_image = cv_bridge::CvImage(std_msgs::Header(), "32FC1", depth).toImageMsg();
                                aligned_depth_image->header.stamp = time;
                                aligned_depth_image->header.frame_id = "depth_camera";
                                rgbd_depth_pub_.publish(aligned_depth_image);
                                // 发布深度图上色的RGBD_RGB图像
                                rgbd_rgb_image = cv_bridge::CvImage(std_msgs::Header(), "bgr8", rgbd_rgb).toImageMsg();
                                rgbd_rgb_image->header.stamp = time;
                                rgbd_rgb_image->header.frame_id = "depth_camera";
                                rgbd_rgb_pub_.publish(rgbd_rgb_image);
                                
                                // 发布RGB相机坐标系下的点云
                                if (cloud_open_) {
                                    sensor_msgs::PointCloud2 rgb_cloud_msg;
                                    pcl::toROSMsg(*tof_cloud, rgb_cloud_msg);
                                    rgb_cloud_msg.header.stamp = time;
                                    rgb_cloud_msg.header.frame_id = "depth_camera";
                                    rgbd_depth_cloud_pub_.publish(rgb_cloud_msg);
                                }
                                break;

                                default:
                                printf("unsupport mode\n");
                                break;
                            }
                            DEBUG_TIMING_END(RGBD);
                    } 
                    else 
                    {
                        std::cout << "rgb read error！" << std::endl;
                    }
                }
                DEBUG_TIMING_END(Total);
            }
        } else {
            std::this_thread::sleep_for(std::chrono::milliseconds(5));
            ros::Time time = ros::Time::now();
            if (fitst_frame == 0) {
                if (time - StartGetFrameTime > ros::Duration(10)) {
                    std::cout << "Get data timeout!" << std::endl;
                    // api_disconnect(handle_);
                    // api_exit();
                    break;
                }   
            }
        }
        ros::spinOnce();
    }
    delete frame;
    frame = nullptr;

    api_disconnect(handle_);
    api_exit();
}

int main(int argc, char **argv) {
    ros::init(argc, argv, "exponentialdeepsky_ros_node");
    EdsRosDriver ede_ros_driver;
    ede_ros_driver.run();
    return 0;
}