#ifndef __UTILITY_HPP__
#define __UTILITY_HPP__
#include <cmath>
#include <vector>
#include <unordered_set>
#include <Eigen/Eigen>
#include <opencv2/opencv.hpp>
#include <opencv2/core/eigen.hpp>
#include <pcl/point_types.h>
#include <pcl/point_cloud.h>
#include <pcl/io/pcd_io.h>
#include <nlohmann/json.hpp>
#include "common_tools.h"

namespace velodyne_ros {
  struct EIGEN_ALIGN16 Point {
      PCL_ADD_POINT4D;
      float intensity;
      float time;
      std::uint16_t ring;
      EIGEN_MAKE_ALIGNED_OPERATOR_NEW
  };
}  // namespace velodyne_ros

POINT_CLOUD_REGISTER_POINT_STRUCT(velodyne_ros::Point,
    (float, x, x)
    (float, y, y)
    (float, z, z)
    (float, intensity, intensity)
    (float, time, time)
    (std::uint16_t, ring, ring)
)

namespace ouster_ros {
  struct EIGEN_ALIGN16 Point {
      PCL_ADD_POINT4D;
      float intensity;
      std::uint32_t t;
      std::uint16_t reflectivity;
      std::uint8_t  ring;
      std::uint16_t ambient;
      std::uint32_t range;
      EIGEN_MAKE_ALIGNED_OPERATOR_NEW
  };
}  // namespace ouster_ros

// clang-format off
POINT_CLOUD_REGISTER_POINT_STRUCT(ouster_ros::Point,
    (float, x, x)
    (float, y, y)
    (float, z, z)
    (float, intensity, intensity)
    // use std::uint32_t to avoid conflicting with pcl::uint32_t
    (std::uint32_t, t, t)
    (std::uint16_t, reflectivity, reflectivity)
    (std::uint8_t, ring, ring)
    (std::uint16_t, ambient, ambient)
    (std::uint32_t, range, range)
)

//ANCHOR robosense modify
namespace robosense_ros {
    struct EIGEN_ALIGN16 Point {
        PCL_ADD_POINT4D;
        std::uint8_t intensity;
        std::uint16_t ring;
        double timestamp;
        EIGEN_MAKE_ALIGNED_OPERATOR_NEW
    };
}

// namespace robosense_ros
POINT_CLOUD_REGISTER_POINT_STRUCT(robosense_ros::Point,
    (float, x, x)
    (float, y, y)
    (float, z, z)
    // use std::uint32_t to avoid conflicting with pcl::uint32_t
    (std::uint8_t, intensity, intensity)
    (std::uint16_t, ring, ring)
    (double, timestamp, timestamp)
)

namespace edu_ros {
    struct EIGEN_ALIGN16 Point {
        PCL_ADD_POINT4D;
        float intensity;
        double time;
        uint8_t  tag;
        uint8_t  line;
        double timestamp;
        std::uint16_t ring;
        EIGEN_MAKE_ALIGNED_OPERATOR_NEW
    };
}

POINT_CLOUD_REGISTER_POINT_STRUCT(edu_ros::Point,
                                  (float, x, x)
                                  (float, y, y)
                                  (float, z, z)
                                  (float, intensity, intensity)
                                  (uint8_t, tag, tag)
                                  (uint8_t, line, line)
                                  (double, timestamp, timestamp)
)

using PointType = pcl::PointXYZRGBNormal;

struct CameraParam
{
    std::string name;
    cv::Size resolution;
    cv::Size outResolution;
    cv::Mat intrinsicMatrix;
    cv::Mat distortionCoeff;

    CameraParam() {}

    CameraParam(std::string name, cv::Size resolution,
        cv::Mat intrinsicMatrix, cv::Mat distortionCoeff)
        : name(name), resolution(resolution), outResolution(resolution),
        intrinsicMatrix(intrinsicMatrix), distortionCoeff(distortionCoeff) {}

    CameraParam(std::string name, cv::Size resolution, cv::Size outResolution,
                cv::Mat intrinsicMatrix, cv::Mat distortionCoeff)
        : name(name), resolution(resolution), outResolution(outResolution),
          intrinsicMatrix(intrinsicMatrix), distortionCoeff(distortionCoeff) {}

    CameraParam(const CameraParam& other)
        : name(other.name), resolution(other.resolution),
          outResolution(other.outResolution),
          intrinsicMatrix(other.intrinsicMatrix.clone()),
          distortionCoeff(other.distortionCoeff.clone()) {}

    CameraParam(CameraParam&& other) noexcept
        : name(std::move(other.name)), resolution(other.resolution),
          outResolution(other.outResolution),
          intrinsicMatrix(std::move(other.intrinsicMatrix)),
          distortionCoeff(std::move(other.distortionCoeff)) {}

    CameraParam& operator=(const CameraParam& other) {
        if (this != &other) {
            name = other.name;
            resolution = other.resolution;
            outResolution = other.outResolution,
            intrinsicMatrix = other.intrinsicMatrix.clone();
            distortionCoeff = other.distortionCoeff.clone();
        }
        return *this;
    }

    CameraParam& operator=(CameraParam&& other) noexcept {
        if (this != &other) {
            name = std::move(other.name);
            resolution = other.resolution;
            outResolution = other.outResolution;
            intrinsicMatrix = std::move(other.intrinsicMatrix);
            distortionCoeff = std::move(other.distortionCoeff);
        }
        return *this;
    }

    friend std::ostream& operator<<(std::ostream& os, const CameraParam& param) {
        os << "Camera Name: " << param.name << "\n"
           << "Resolution: " << param.resolution.width << "x" << param.resolution.height << "\n"
           << "Out Resolution: " << param.outResolution.width << "x" << param.outResolution.height << "\n"
           << "Intrinsic Matrix:\n" << param.intrinsicMatrix << "\n"
           << "Distortion Coefficients:\n" << param.distortionCoeff;
        return os;
    }
};

struct LidarCameraParam
{
    std::vector<CameraParam> camParams;
    std::vector<Eigen::Matrix4f> Tlidar2cam;

    LidarCameraParam() {}

    LidarCameraParam(const LidarCameraParam& other)
        : camParams(other.camParams), Tlidar2cam(other.Tlidar2cam) {}

    LidarCameraParam(LidarCameraParam&& other) noexcept
        : camParams(std::move(other.camParams)), Tlidar2cam(std::move(other.Tlidar2cam)) {}

    LidarCameraParam& operator=(const LidarCameraParam& other) {
        if (this != &other) {
            camParams = other.camParams;
            Tlidar2cam = other.Tlidar2cam;
        }
        return *this;
    }

    LidarCameraParam& operator=(LidarCameraParam&& other) noexcept {
        if (this != &other) {
            camParams = std::move(other.camParams);
            Tlidar2cam = std::move(other.Tlidar2cam);
        }
        return *this;
    }

    friend std::ostream& operator<<(std::ostream& os, const LidarCameraParam& param) {
        os << "LidarCameraParam Details:\n";
        for (size_t i = 0; i < param.camParams.size(); ++i) {
            os << "Camera " << i << ":\n" << param.camParams[i] << "\n";
        }
        os << "Transformation Matrices from Lidar to Cameras:\n";
        for (size_t i = 0; i < param.Tlidar2cam.size(); ++i) {
            os << "Tlidar2cam[" << i << "]:\n" << param.Tlidar2cam[i] << "\n";
        }
        return os;
    }
};

template <typename K, typename V>
std::ostream& operator<<(std::ostream& os, const std::unordered_map<K, V>& map) {
    os << "{";
    size_t count = 0;
    for (const auto& pair : map) {
        os << pair.first << ": " << pair.second;
        if (++count != map.size()) os << ", ";
    }
    os << "}";
    return os;
}

bool AreImagesSameSize(const cv::Mat& image1, const cv::Mat& image2) {
    return (image1.rows == image2.rows) && (image1.cols == image2.cols);
}

template <typename PointT>
typename pcl::PointCloud<PointT>::Ptr TransformPointCloud(const typename pcl::PointCloud<PointT>::Ptr &cloudIn, const Eigen::Matrix4f &transform)
{
    typename pcl::PointCloud<PointT>::Ptr cloudOut(new pcl::PointCloud<PointT>());
    pcl::copyPointCloud(*cloudIn, *cloudOut);
    int cloudSize = cloudIn->size();

#pragma omp parallel for
    for (int i = 0; i < cloudSize; i++)
    {
        Eigen::Vector3f position(cloudIn->points[i].x, cloudIn->points[i].y, cloudIn->points[i].z);
        Eigen::Vector3f newPosition = transform.block<3, 3>(0, 0) * position + transform.block<3, 1>(0, 3);

        // 更新输出点云
        cloudOut->points[i].x = newPosition(0);
        cloudOut->points[i].y = newPosition(1);
        cloudOut->points[i].z = newPosition(2);
    }

    return cloudOut;
}

template <typename PointT>
typename pcl::PointCloud<PointT>::Ptr TransformPointCloudWithNormal(const typename pcl::PointCloud<PointT>::Ptr &cloudIn, const Eigen::Matrix4f &transform)
{
    typename pcl::PointCloud<PointT>::Ptr cloudOut(new pcl::PointCloud<PointT>());
    pcl::copyPointCloud(*cloudIn, *cloudOut); // 正确复制点云
    int cloudSize = cloudIn->size();

#pragma omp parallel for
    for (int i = 0; i < cloudSize; i++)
    {
        Eigen::Vector3f position(cloudIn->points[i].x, cloudIn->points[i].y, cloudIn->points[i].z);
        Eigen::Vector3f normal(cloudIn->points[i].normal_x, cloudIn->points[i].normal_y, cloudIn->points[i].normal_z);
        Eigen::Vector3f newPosition = transform.block<3, 3>(0, 0) * position + transform.block<3, 1>(0, 3);
        Eigen::Vector3f newNormal = transform.block<3, 3>(0, 0) * normal;

        // 更新输出点云
        cloudOut->points[i].x = newPosition(0);
        cloudOut->points[i].y = newPosition(1);
        cloudOut->points[i].z = newPosition(2);
        cloudOut->points[i].normal_x = newNormal(0);
        cloudOut->points[i].normal_y = newNormal(1);
        cloudOut->points[i].normal_z = newNormal(2);
    }

    return cloudOut;
}

void SaveOdom(double timestamp, const Eigen::Matrix4f &pose, std::ofstream &outfile)
{
    Eigen::Vector3f trans = pose.block<3, 1>(0, 3);
    Eigen::Quaternionf quat(pose.block<3, 3>(0, 0));

    outfile << timestamp << " "
            << trans.x() << " "
            << trans.y() << " "
            << trans.z() << " "
            << quat.x() << " "
            << quat.y() << " "
            << quat.z() << " "
            << quat.w() << std::endl;
}

std::string Time2String(double timestamp){
    std::stringstream ss;
    ss.setf(std::ios::fixed);
    ss.precision(6);
    ss << timestamp;
    return ss.str();
}

void SaveImage(cv::Mat img, const std::string &imageDir, double timestamp, const std::string &name) {
    std::string imagePath = imageDir + "/cam" + name + "_" + Time2String(timestamp) + "_undistorted.png";
    if (CommonTools::IfFileExist(imagePath))
        return;
    cv::imwrite(imagePath, img);
}

pcl::PointCloud<PointType>::Ptr SavePCD(const std::string& pcdDir, double timestamp, const pcl::PointCloud<PointType>::Ptr &laserCloud, Eigen::Matrix4f transform = Eigen::Matrix4f::Identity())
{
    if (!laserCloud || laserCloud->empty()) {
        std::cerr << "Laser cloud is empty or null." << std::endl;
        return nullptr;
    }

    pcl::PCDWriter pcd_writer;
    std::stringstream ss;
    ss.setf(std::ios::fixed);
    ss.precision(6);
    ss << timestamp;
    std::string pcdPath = pcdDir + ss.str() + ".pcd";

    auto transformedCloud = TransformPointCloudWithNormal<PointType>(laserCloud, transform);
    pcd_writer.write(pcdPath, *transformedCloud, false);
    return transformedCloud;
}

std::vector<std::tuple<std::string, std::string, std::string>> ReadImages(const std::string& imageDir) {
    const std::string suffix = ".png";
    std::vector<std::tuple<std::string, std::string, std::string>> imageTuples;
    auto files = CommonTools::GetFilesInDir(imageDir, suffix);
    for (const auto& file : files) {
        size_t pos = file.find_last_of("/");
        if (pos == std::string::npos) {
            continue;
        }
        std::string filename = file.substr(pos + 1);
        std::vector<std::string> strSplit = CommonTools::SplitString(filename, '_');
        auto& camName = strSplit[0];
        auto& timePart = strSplit[1];
        imageTuples.emplace_back(camName, timePart, file);
    }
    return imageTuples;
}

class ImageProcessor {
protected:
    std::string imageDir;
    std::vector<cv::Mat> undistortMap;
    CameraParam camParam;

public:
    ImageProcessor(const CameraParam& camParam, const std::string &outputDir)
        : camParam(camParam), imageDir(outputDir + "images/") {
        CommonTools::IfNotExistThenCreate(imageDir);
        if (cv::norm(this->camParam.distortionCoeff) > 1e-6){
            cv::Mat map1, map2;
            cv::fisheye::initUndistortRectifyMap(this->camParam.intrinsicMatrix, this->camParam.distortionCoeff, cv::Mat(), this->camParam.intrinsicMatrix, this->camParam.resolution, CV_16SC2, map1, map2);
            undistortMap.emplace_back(map1);
            undistortMap.emplace_back(map2);
        }
    }

    cv::Mat UndistortImage(cv::Mat cvImg) {
        cv::Mat resizedImage;
        if (undistortMap.size() == 2) {
            cv::Mat undistortImg;
            cv::remap(cvImg, undistortImg, undistortMap[0], undistortMap[1], cv::INTER_LINEAR, cv::BORDER_CONSTANT);
            cv::resize(undistortImg, resizedImage, camParam.outResolution);
        }
        else {
            cv::resize(cvImg, resizedImage, camParam.outResolution);
        }

        return resizedImage;
    }

    void Save(cv::Mat img, double timestamp) {
        SaveImage(img, imageDir, timestamp, camParam.name);
    }
};

std::tuple<std::vector<Eigen::MatrixXf>, Eigen::MatrixXf, Eigen::MatrixXf> ProjectPoints2Images(
    const LidarCameraParam &lidarCamParam,
    const pcl::PointCloud<PointType>::Ptr &pointcloud
) {
    size_t numPoints = pointcloud->size();
    size_t numCameras = lidarCamParam.camParams.size();
    // 创建齐次坐标矩阵
    Eigen::MatrixXf pointsHomo = Eigen::MatrixXf::Ones(4, numPoints);
    pointsHomo.block(0, 0, 3, numPoints) = pointcloud->getMatrixXfMap().block(0, 0, 3, numPoints);

    // 存储每个相机的变换后的点云
    std::vector<Eigen::MatrixXf> transformedPoints(numCameras, Eigen::MatrixXf(4, numPoints));
    std::vector<Eigen::MatrixXf> projectedPoints2D(numCameras, Eigen::MatrixXf(2, numPoints));
    Eigen::MatrixXf depths(numCameras, numPoints);
    Eigen::MatrixXf dists(numCameras, numPoints);
    // 批量变换点云
    for (size_t camId = 0; camId < numCameras; ++camId) {
        Eigen::Matrix3f intrinsic = Eigen::Matrix3f::Identity();
        cv::cv2eigen(lidarCamParam.camParams[camId].intrinsicMatrix, intrinsic);
        transformedPoints[camId] = lidarCamParam.Tlidar2cam[camId] * pointsHomo;
        depths.row(camId) = transformedPoints[camId].row(2);
        dists.row(camId) = transformedPoints[camId].colwise().norm();
        projectedPoints2D[camId] = transformedPoints[camId].block(0, 0, 2, numPoints).array().rowwise() / depths.row(camId).array();
        Eigen::MatrixXf homogeneousPoints = Eigen::MatrixXf::Ones(3, numPoints);
        homogeneousPoints.block(0, 0, 2, numPoints) = projectedPoints2D[camId];
        projectedPoints2D[camId] = (intrinsic * homogeneousPoints).topRows(2).array() / intrinsic(2, 2);
    }
    return std::make_tuple(projectedPoints2D, depths, dists);
}

std::unordered_map<std::string, uint32_t> JsonToUnorderedMap(const nlohmann::json& j) {
    std::unordered_map<std::string, uint32_t> result;
    for (auto& [key, value] : j.items()) {
        if (value.is_number()) {
            result[key] = value.get<uint32_t>();
        }
        // 可选：处理字符串类型的数值
        else if (value.is_string()) {
            result[key] = std::stoul(value.get<std::string>());
        }
    }
    return result;
}

class SemanticFilter {
protected:
    
    std::unordered_map<std::string, uint32_t> semMap;
    std::unordered_set<uint32_t> filterSet;
    int radius;
    cv::Mat kernel;
public:
    SemanticFilter(const std::string &jsonPath, const std::vector<std::string> &filteredClasses, int radius = 0)
    : radius(radius) {
        nlohmann::json semanticJson;
        try {
            if (!CommonTools::IfFileExist(jsonPath)) {
                throw std::runtime_error("Json path is not exist: " + jsonPath);
            }
            std::ifstream file(jsonPath);
            if (!file.is_open()) {
                throw std::runtime_error("Failed to open json file: " + jsonPath);
            }
            semanticJson = nlohmann::json::parse(file);
        } catch (const nlohmann::json::exception& e) {
            std::cerr << "JSON parse error: " << e.what() << std::endl;
        } catch (const std::exception& e) {
            std::cerr << "Error: " << e.what() << std::endl;
        }
        scope_color(ANSI_COLOR_CYAN_BOLD);
        semMap = JsonToUnorderedMap(semanticJson);
        std::cout << "Semantic map is : " << semMap << std::endl;
        std::cout << "Filtered Classes:" << std::endl;
        for (const auto& cls : filteredClasses) {
            std::cout << "class name: '" << cls << "'  ";
            auto it = semMap.find(cls);
            if (it != semMap.end()) {
                std::cout << "id: " << it->second;
                filterSet.emplace(it->second);
            }
            std::cout << std::endl;
        }
        std::cout << "Dilating radius: " << radius << std::endl;
        kernel = cv::getStructuringElement(cv::MORPH_ELLIPSE,
                                           cv::Size(2*radius + 1, 2*radius + 1));
    }

    cv::Mat_<uint8_t> SemanticMaskToFilterMask(const cv::Mat& img, bool useDilate = true) {
        CV_Assert(img.depth() == CV_8U); // 确保是8位图像

        const int rows = img.rows;
        const int cols = img.cols;
        cv::Mat_<uint8_t> filterMask(rows, cols); // 创建一个与输入图像大小相同的8位矩阵
    
        if (img.channels() == 1) {
            // 单通道优化
            #pragma omp parallel for
            for (int r = 0; r < rows; ++r) {
                const uchar* imgPtr = img.ptr<uchar>(r);
                for (int c = 0; c < cols; ++c) {
                    filterMask(r, c) = filterSet.count(static_cast<uint32_t>(imgPtr[c])) * 255;
                }
            }
        } 
        else if (img.channels() == 3) {
            // 三通道优化（BGR顺序）
            #pragma omp parallel for
            for (int r = 0; r < rows; ++r) {
                const cv::Vec3b* imgPtr = img.ptr<cv::Vec3b>(r);
                for (int c = 0; c < cols; ++c) {
                    const auto& pixel = imgPtr[c];
                    filterMask(r, c) = (static_cast<uint32_t>(pixel[2]) << 16) |
                                       (static_cast<uint32_t>(pixel[1]) << 8)  |
                                       (static_cast<uint32_t>(pixel[0]));
                }
            }
        }
        else {
            CV_Error(cv::Error::StsBadArg, "Unsupported channel number");
        }
        if (useDilate) {
            cv::dilate(filterMask, filterMask, kernel);
        }
        return filterMask;
    }

    // 检查邻域范围内是否有过滤的语义标签
    bool operator()(const cv::Mat_<uint8_t> &mask, int x, int y) {
        return (x >= 0) && (x < mask.cols) &&
               (y >= 0) && (y < mask.rows) &&
               (mask(y, x) > 0);
    }
};

class SemanticColorizer {
protected:
    std::optional<SemanticFilter> semanticFilter;
public:
    SemanticColorizer(): semanticFilter(std::nullopt) {}

    void SetFilter(const std::string &jsonPath, const std::vector<std::string> &filteredClasses, int radius = 0) {
        semanticFilter = SemanticFilter(jsonPath, filteredClasses, radius);
    }

    pcl::PointCloud<PointType>::Ptr operator()(
        const LidarCameraParam &lidarCamParam,
        const pcl::PointCloud<PointType>::Ptr &pointcloud,
        const std::vector<cv::Mat> &images,
        const std::optional<std::vector<cv::Mat>> &masks = std::nullopt) {
        pcl::PointCloud<PointType>::Ptr cpc(new pcl::PointCloud<PointType>);
        size_t numPoints = pointcloud->size();
        size_t numCameras = images.size();
    
        if (numPoints == 0 || numCameras == 0) {
            return cpc;
        }
    
        auto [projectedPoints2D, depths, dists] = ProjectPoints2Images(lidarCamParam, pointcloud);
        Eigen::MatrixXf angles(numCameras, numPoints);
        Eigen::Matrix<bool, Eigen::Dynamic, Eigen::Dynamic> flags(numCameras, numPoints);
        angles = (depths.array() / dists.array()).unaryExpr([](float x) { return std::acos(x); });
        const float DEPTH_THRESHOLD = 0.01f;
        const float ANGLE_THRESHOLD = M_PI_2;
        flags = (depths.array() < DEPTH_THRESHOLD) || (angles.array() > ANGLE_THRESHOLD);
        const float PENALTY_COEFF = 10000.0f; // 惩罚系数，惩罚不满足条件的点
        angles.array() += flags.cast<float>().array() * PENALTY_COEFF;
        bool semanticFlag = masks.has_value() && masks->size() == numCameras;
        std::vector<bool> maskFlags(numCameras, semanticFlag);
        std::vector<cv::Mat_<uint8_t>> filterMasks(numCameras);
        for (int i = 0; i < numCameras; ++i) {
            maskFlags[i] = maskFlags[i] && AreImagesSameSize(masks->at(i), images[i]);
            if (maskFlags[i]) {
                filterMasks[i] = semanticFilter->SemanticMaskToFilterMask(masks->at(i));
            }
        }
        // 批量筛选和映射
        for (size_t i = 0; i < numPoints; ++i) {
            int chosenCamId = -1;
            auto minAng = angles.col(i).minCoeff(&chosenCamId);
            if(minAng >= PENALTY_COEFF)
                continue;
            double x = projectedPoints2D[chosenCamId](0, i);
            double y = projectedPoints2D[chosenCamId](1, i);
            if ((x < 0) || (x >= images[chosenCamId].cols) || (y < 0) || (y >= images[chosenCamId].rows))
                continue;
            if (maskFlags[chosenCamId]) {
                if (semanticFilter.has_value() && (*semanticFilter)(filterMasks[chosenCamId], static_cast<int>(x), static_cast<int>(y))) {
                    continue;
                }
            }

            cv::Vec3b cc = images[chosenCamId].at<cv::Vec3b>(static_cast<int>(y), static_cast<int>(x));
            PointType cpt;
            cpt.x = pointcloud->at(i).x;
            cpt.y = pointcloud->at(i).y;
            cpt.z = pointcloud->at(i).z;
            cpt.normal_x = pointcloud->at(i).normal_x;
            cpt.normal_y = pointcloud->at(i).normal_y;
            cpt.normal_z = pointcloud->at(i).normal_z;
            cpt.r = cc[2];
            cpt.g = cc[1];
            cpt.b = cc[0];
            cpc->emplace_back(cpt);
        }
    
        return cpc;
    }

    pcl::PointCloud<PointType>::Ptr RenderPoints(
        const LidarCameraParam &lidarCamParam,
        const pcl::PointCloud<PointType>::Ptr &pointcloud,
        const std::vector<cv::Mat> &images,
        const std::optional<std::vector<cv::Mat>> &masks = std::nullopt) {
        pcl::PointCloud<PointType>::Ptr cpc(new pcl::PointCloud<PointType>);
    
        for (size_t i = 0; i < pointcloud->size(); ++i)
        {
            Eigen::Vector4f homoCoord;
            homoCoord.block<3, 1>(0, 0) = pointcloud->at(i).getVector3fMap();
            homoCoord.block<1, 1>(3, 0) << 1.0;
            double angle = M_PI;
            int chosenCamId = -1;
            for (size_t camId=0; camId < images.size(); ++camId) {
                Eigen::Vector4f pCam = lidarCamParam.Tlidar2cam[camId] * homoCoord;
                
                if(pCam.z() < 0.01)
                    continue;
                double tmp = acos(pCam.z() / sqrt(pCam.x() * pCam.x() + pCam.y() * pCam.y() + pCam.z() * pCam.z()));
                if(angle / 2 < tmp)
                    continue;
                chosenCamId = camId;
                angle = tmp;
            }
            if(chosenCamId < 0)
                continue;
            homoCoord = lidarCamParam.Tlidar2cam[chosenCamId] * homoCoord;
            std::vector<cv::Point3d> pts3d;
            pts3d.emplace_back(cv::Point3d(homoCoord.x(), homoCoord.y(), homoCoord.z()));
            std::vector<cv::Point2d> pts2d;
            cv::Mat rvec = (cv::Mat_<double>(3, 1) << 0, 0, 0);
            cv::Mat tvec = (cv::Mat_<double>(3, 1) << 0, 0, 0);
            cv::Mat dvec = (cv::Mat_<double>(4, 1) << 0, 0, 0, 0);
            cv::fisheye::projectPoints(pts3d, pts2d, rvec, tvec, lidarCamParam.camParams[chosenCamId].intrinsicMatrix, dvec);
            cv::Point2f pp = pts2d[0];
            if (pp.x > 0 && pp.x < lidarCamParam.camParams[chosenCamId].resolution.width && pp.y > 0 && pp.y < lidarCamParam.camParams[chosenCamId].resolution.height) {
                if (masks.has_value() && AreImagesSameSize(masks->at(chosenCamId), images.at(chosenCamId))) {
                    if (semanticFilter.has_value() && (*semanticFilter)(masks->at(chosenCamId), static_cast<int>(pp.x), static_cast<int>(pp.y))) {
                        continue;
                    }
                }

                auto cc = images.at(chosenCamId).at<cv::Vec3b>(pp);
                PointType cpt;
                cpt.x = pointcloud->at(i).x;
                cpt.y = pointcloud->at(i).y;
                cpt.z = pointcloud->at(i).z;
                cpt.normal_x = pointcloud->at(i).normal_x;
                cpt.normal_y = pointcloud->at(i).normal_y;
                cpt.normal_z = pointcloud->at(i).normal_z;
                cpt.r = cc[2];
                cpt.g = cc[1];
                cpt.b = cc[0];
                cpc->emplace_back(cpt);
            }
        }
        return cpc;
    }
};

#endif // __UTILITY_HPP__