

/**********************************************************
 * Author       : Zhenxin
 * Since        : 2024-08-30 11:46:04
 * LastTime     : 2024-11-16 11:46:06
 * LastAuthor   : Zhenxin
 * Message      : Src file for Camera calibration and hand eye calibration
 * FilePath     : /Location/libloc/src/CalibrationCalc.cpp
 * Copyright (c) 2024 by Zhenxin email: iamyzx@163.com, All Rights Reserved.
 **********************************************************/

#include <filesystem>
#include "CalibrationCalc.h"
#include <unistd.h>
CameraCalibration::CameraCalibration() {}
CameraCalibration::~CameraCalibration() {}

bool fitPlaneByThreePoints(const cv::Point3d &p1, const cv::Point3d &p2, const cv::Point3d &p3,
                           cv::Point3d &normal, double &D)
{
    cv::Point3d v(p2.x - p1.x, p2.y - p1.y, p2.z - p1.z);
    cv::Point3d w(p3.x - p1.x, p3.y - p1.y, p3.z - p1.z);
    normal = v.cross(w);
    D = -normal.dot(p1);
    return true;
}

cv::Mat eulerAngleToRotatedMatrix(const cv::Mat &eulerAngle, const std::string &seq)
{

    eulerAngle /= 180 / CV_PI;
    cv::Matx13d m(eulerAngle);
    auto rx = m(0, 0), ry = m(0, 1), rz = m(0, 2);
    auto xs = std::sin(rx), xc = std::cos(rx);
    auto ys = std::sin(ry), yc = std::cos(ry);
    auto zs = std::sin(rz), zc = std::cos(rz);

    cv::Mat rotX = (cv::Mat_<double>(3, 3) << 1, 0, 0, 0, xc, -xs, 0, xs, xc);
    cv::Mat rotY = (cv::Mat_<double>(3, 3) << yc, 0, ys, 0, 1, 0, -ys, 0, yc);
    cv::Mat rotZ = (cv::Mat_<double>(3, 3) << zc, -zs, 0, zs, zc, 0, 0, 0, 1);

    cv::Mat rotMat;

    return rotMat = rotX * rotY * rotZ;
}

cv::Mat eulerRodZXZToRotMat(double alpha, double beta, double gamma)
{

    cv::Mat R_x1 = cv::Mat::eye(3, 3, CV_64F);
    cv::Mat R_y = cv::Mat::eye(3, 3, CV_64F);
    cv::Mat R_x2 = cv::Mat::eye(3, 3, CV_64F);

    // ???????Z?????
    R_x1.at<double>(0, 0) = cos(alpha);
    R_x1.at<double>(0, 1) = -sin(alpha);
    R_x1.at<double>(1, 0) = sin(alpha);
    R_x1.at<double>(1, 1) = cos(alpha);

    // ??X?????
    R_y.at<double>(1, 1) = cos(beta);
    R_y.at<double>(1, 2) = -sin(beta);
    R_y.at<double>(2, 1) = sin(beta);
    R_y.at<double>(2, 2) = cos(beta);

    // ???????Z?????
    R_x2.at<double>(0, 0) = cos(gamma);
    R_x2.at<double>(0, 1) = -sin(gamma);
    R_x2.at<double>(1, 0) = sin(gamma);
    R_x2.at<double>(1, 1) = cos(gamma);

    return R_x1 * R_y * R_x2;
}

bool eularAngleToRotaionMatrix2(cv::Mat &R, double z, double y, double x)
{
    cv::Vec3d theta(x, y, z);
    // Calculate rotation about x axis
    cv::Mat R_x = (cv::Mat_<double>(3, 3) << 1, 0, 0,
                   0, cos(theta[0]), -sin(theta[0]),
                   0, sin(theta[0]), cos(theta[0]));

    // Calculate rotation about y axis
    cv::Mat R_y = (cv::Mat_<double>(3, 3) << cos(theta[1]), 0, sin(theta[1]),
                   0, 1, 0,
                   -sin(theta[1]), 0, cos(theta[1]));

    // Calculate rotation about z axis
    cv::Mat R_z = (cv::Mat_<double>(3, 3) << cos(theta[2]), -sin(theta[2]), 0,
                   sin(theta[2]), cos(theta[2]), 0,
                   0, 0, 1);

    // Combined rotation matrix
    R = R_z * R_y * R_x; // ZYX???

    return true;
}

bool checkRotationMatrix(cv::Mat R)
{
    cv::Mat temp33 = R({0, 0, 3, 3});
    cv::Mat Rt;
    cv::transpose(temp33, Rt);
    cv::Mat shouldBeIdentity = Rt * temp33; //????????????????��????
    cv::Mat I = cv::Mat::eye(3, 3, shouldBeIdentity.type());

    return cv::norm(I, shouldBeIdentity) < 1e-6;
}

cv::Mat R_T2RT(cv::Mat &R, cv::Mat &T)
{
    cv::Mat RT;
    cv::Mat_<double> R1 = (cv::Mat_<double>(4, 3) << R.at<double>(0, 0), R.at<double>(0, 1), R.at<double>(0, 2),
                           R.at<double>(1, 0), R.at<double>(1, 1), R.at<double>(1, 2),
                           R.at<double>(2, 0), R.at<double>(2, 1), R.at<double>(2, 2),
                           0.0, 0.0, 0.0);
    cv::Mat_<double> T1 = (cv::Mat_<double>(4, 1) << T.at<double>(0, 0), T.at<double>(1, 0), T.at<double>(2, 0), 1.0);
    hconcat(R1, T1, RT); // C=A+B???????
    return RT;
}

void RT2R_T(cv::Mat &RT, cv::Mat &R, cv::Mat &T)
{
    cv::Rect R_rect(0, 0, 3, 3);
    cv::Rect T_rect(3, 0, 1, 3);
    R = RT(R_rect);
    T = RT(T_rect);
}

void getBoardWorldPoints(int num, cv::Size boardSize, cv::Size2d squareSize,
                         std::vector<std::vector<cv::Point3f>> &outArrayPoints)
{
    outArrayPoints.clear();
    for (int n = 0; n < num; n++)
    {
        std::vector<cv::Point3f> tempPointSet;
        for (int i = 0; i < boardSize.height; i++)
        {
            for (int j = 0; j < boardSize.width; j++)
            {
                cv::Point3f realPoint;
                realPoint.x = i * squareSize.width;
                realPoint.y = j * squareSize.height;
                realPoint.z = 0;
                tempPointSet.push_back(realPoint);
            }
        }
        outArrayPoints.push_back(tempPointSet);
    }
}

cv::Point3d transformToMainCamera2(const cv::Point3d &point, const cv::Mat &R, const cv::Mat &T)
{
    /*
    * left2right
    cv::Mat pointMat = (cv::Mat_<double>(3, 1) << point.x, point.y, point.z);

    cv::Mat transformedPointMat = R * pointMat + T;

    return cv::Point3d(transformedPointMat.at<double>(0),
        transformedPointMat.at<double>(1) ,
        transformedPointMat.at<double>(2) );
    */
    /*
    cv::Mat T1 = (cv::Mat_<float>(3, 4) <<
        1, 0, 0, 0,
        0, 1, 0, 0,
        0, 0, 1, 0);
    cv::Mat T2 = (cv::Mat_<float>(3, 4) <<
        R.at<double>(0, 0), R.at<double>(0, 1), R.at<double>(0, 2), T.at<double>(0, 0),
        R.at<double>(1, 0), R.at<double>(1, 1), R.at<double>(1, 2), T.at<double>(1, 0),
        R.at<double>(2, 0), R.at<double>(2, 1), R.at<double>(2, 2), T.at<double>(2, 0)
        );

    // 2d to homogeneous
    cv::Mat pointRightHomogeneous = (cv::Mat_<float>(4, 1) << point.x, point.y, point.z, 1);

    // T2 to 3D space
    cv::Mat point3DHomogeneous = T2 * pointRightHomogeneous;
    cv::Point3f point3D;
    point3D = cv::Point3d(point3DHomogeneous.at<float>(0, 0), point3DHomogeneous.at<float>(1, 0), point3DHomogeneous.at<float>(2, 0));
    return point3D;
    */
    /**
     * OpenCV Calibration parameters: R,T is Left to Right(main2slave);
     */
    cv::Mat pointMat = (cv::Mat_<double>(3, 1) << point.x, point.y, point.z);
    cv::Mat R_r2l = R.t();
    cv::Mat T_r2l = -R.t() * T;
    cv::Mat pl = R_r2l * pointMat + T_r2l;
    cv::Point3f point3D;
    return cv::Point3d(pl.at<double>(0, 0), pl.at<double>(1, 0), pl.at<double>(2, 0));
}

StatusCode CameraCalibration::saveDualCameraRet(std::string path,
                                                std::array<cv::Mat1d, 4> &cameraParams, std::array<cv::Mat1d, 9> &result)
{
    try
    {
        cv::FileStorage fs(path, cv::FileStorage::WRITE);
        if (fs.isOpened())
        {
            // Init zeros EtoHand and RefPlane
            cv::Mat EtoHand = cv::Mat::zeros(4, 4, CV_16F);
            cv::Mat plane = cv::Mat::zeros(1, 4, CV_16F);
            fs << "M1" << cameraParams[0] << "D1" << cameraParams[1]
               << "M2" << cameraParams[2] << "D2" << cameraParams[3]
               << "R" << result[0] << "T" << result[1]
               << "stereo_E" << result[2] << "stereo_F" << result[3]
               << "R1" << result[4] << "R2" << result[5]
               << "P1" << result[6] << "P2" << result[7] << "Q" << result[8]
               << "EtoHandY" << EtoHand
               << "EtoHandR" << EtoHand
               << "Normal_abcd" << plane;
            /*fs << "M1" << LeftCameraMat << "D1" << LeftDistCoeffMat <<
                "M2" << RightCameraMat << "D2" << RightDistCoeffMat <<
                "R" << R << "T" << T << "stereo_E" << E << "stereo_F" << F <<
                "R1" << R1 << "R2" << R2 << "P1" << P1 << "P2" << P2 << "Q" << Q;*/
            fs.release();
        }
        else
        {
            spdlog::error("Can not save the intrinsic parameters!");
            return STATUS_INVALID_PARAM;
        }
        return STATUS_OK;
    }
    catch (const std::exception &e)
    {
        spdlog::error(e.what());
        return STATUS_ERROR;
    }
}

StatusCode CameraCalibration::saveUpdateRet(std::string path, std::vector<cv::Mat> &updateMat, std::vector<std::string> &matchName)
{
    try
    {
        cv::FileStorage fsRead(path, cv::FileStorage::READ);
        if (!fsRead.isOpened())
        {
            spdlog::error("Error opening file for reading: {}.", path);
            return STATUS_INVALID_PARAM;
        }
        if (updateMat.size() != matchName.size())
        {
            spdlog::error("The input name {} and data length {} do not match.", matchName.size(), updateMat.size());
            return STATUS_INVALID_PARAM;
        }
        

        cv::Mat plane = cv::Mat::zeros(1, 4, CV_16F);
        plane.at<double>(0, 3) = this->refPlane_d;
        plane.at<double>(0, 0) = this->refPlane[0];
        plane.at<double>(0, 1) = this->refPlane[1];
        plane.at<double>(0, 2) = this->refPlane[2];

        int alterNum = 0;
        for (int index = 0; index < matchName.size(); index++)
        {
            if (matchName[index] == "EtoHandY")
            {
                this->TY = updateMat[index];
                ++alterNum;
            }else if (matchName[index] == "EtoHandR")
            {
                this->TR = updateMat[index];
                ++alterNum;
            }else if (matchName[index] == "Normal_abcd")
            {
                plane = updateMat[index];
                ++alterNum;
            }
        }
        if (alterNum != updateMat.size())
        {
            spdlog::error("Update error, please check the update fields.");
            return STATUS_INVALID_PARAM;
        }
        

        std::filesystem::path filePath(path);

        // folder
        std::string directory = filePath.parent_path().string();

        // get filename
        std::string fileName = filePath.filename().string();
        std::string modifyName = "m" + fileName;
        std::string modifiedYamlPath = directory + "/" + modifyName;

        cv::FileNode rootNode = fsRead.getFirstTopLevelNode();
        cv::FileStorage fsWrite(modifiedYamlPath, cv::FileStorage::WRITE);

        if (fsWrite.isOpened())
        {
            fsWrite << "M1" << this->M1 << "D1" << this->D1
               << "M2" << this->M2 << "D2" << this->D2
               << "R" << this->R << "T" << this->T
               << "stereo_E" << this->E << "stereo_F" << this->F
               << "R1" << this->R1 << "R2" << this->R2
               << "P1" << this->P1 << "P2" << this->P2 << "Q" << this->Q
               << "EtoHandY" << this->TY
               << "EtoHandR" << this->TR
               << "Normal_abcd" << plane;
            fsWrite.release();
        }
        fsRead.release();

        // 1. delete ori
        std::remove(path.c_str());
        sleep(0.2);
        // 2. rename modifyName
        std::rename(modifiedYamlPath.c_str(), path.c_str());

        spdlog::info("EyeToHandRet YAML file updated successfully.");
        return STATUS_OK;
    }
    catch (const std::exception &e)
    {
        spdlog::error("EyeToHandRet YAML file updated failed.");
        spdlog::error(e.what());
        return STATUS_ERROR;
    }
}

StatusCode CameraCalibration::readCalibrateParam(std::string path)
{
    try
    {
        cv::FileStorage fs(path, cv::FileStorage::READ);
        // Internal parameter matrix and distortion coefficient
        fs["M1"] >> M1;
        fs["D1"] >> D1;
        fs["M2"] >> M2;
        fs["D2"] >> D2;

        fs["R"] >> R;
        fs["T"] >> T;

        fs["R1"] >> R1;
        fs["R2"] >> R2;
        fs["P1"] >> P1;
        fs["P2"] >> P2;
        fs["Q"] >> Q;

        fs["stereo_E"] >> E;
        fs["stereo_F"] >> F;

        // Ref Normal
        cv::Mat rPlane;
        fs["Normal_abcd"] >> rPlane;
        this->refPlane_d = rPlane.at<double>(0, 3);
        this->refPlane = {rPlane.at<double>(0, 0), rPlane.at<double>(0, 1), rPlane.at<double>(0, 2)};

        fs["EtoHandY"] >> TY;
        fs["EtoHandR"] >> TR;

        cv::Mat R_inv = R.t();
        cv::Mat T_homog(4, 1, CV_64F); // 4x1
        T_homog.at<double>(0) = T.at<double>(0);
        T_homog.at<double>(1) = T.at<double>(1);
        T_homog.at<double>(2) = T.at<double>(2);
        T_homog.at<double>(3) = 1.0; // homogeneous coordinates
        cv::Mat T_inv = -T_homog;
        Rt = (cv::Mat_<double>(4, 4) << R_inv.at<double>(0, 0), R_inv.at<double>(0, 1), R_inv.at<double>(0, 2), T_inv.at<double>(0),
              R_inv.at<double>(1, 0), R_inv.at<double>(1, 1), R_inv.at<double>(1, 2), T_inv.at<double>(1),
              R_inv.at<double>(2, 0), R_inv.at<double>(2, 1), R_inv.at<double>(2, 2), T_inv.at<double>(2),
              0.0, 0.0, 0.0, 1.0);
        fs.release();

        // Init camera's center
        slaveCenter = transformToMainCamera2(cv::Point3d(0.0, 0.0, 0.0), R, T);
        mainCenter = cv::Point3d(0.0, 0.0, 0.0);
    }
    catch (std::exception e)
    {
        return STATUS_INVALID_PARAM;
    }
    return STATUS_OK;
}

StatusCode CameraCalibration::CalibraRefPlane(std::string path, double *planeRet)
{
    // The size of the chessboard pattern (number of interior angles)
    // cv::Size boardSize(boardSize.first, boardSize.second); // 9 6
    // Points in the world coordinate system (mm)
    std::vector<std::vector<cv::Point3f>> objectPoints;
    // Points in the image coordinate system
    std::vector<std::vector<cv::Point2f>> imagePointsL, imagePointsR;

    // Detect interior angles
    std::vector<cv::String> imageNamesL, imageNamesR;

    cv::glob(path + "/*_R.bmp", imageNamesL, false);
    cv::glob(path + "/*_L.bmp", imageNamesR, false);
    cv::Mat image, grayL, grayR;
    cv::Size imageSize;
    // std::vector<cv::Point2d> corners;
    std::vector<cv::Point2f> cornersL, cornersR;
    for (size_t item = 0; item < imageNamesL.size(); item++)
    {
        cv::Mat imageL = cv::imread(imageNamesL[item]);
        cv::Mat imageR = cv::imread(imageNamesR[item]);
        if (!imageL.empty())
        {
            imageSize = imageL.size();
        }

        if (imageL.empty() || imageR.empty())
        {
            std::cerr << "Could not read image: " << imageL << ",or " << imageR << std::endl;
            continue;
        }
        cv::cvtColor(imageL, grayL, cv::COLOR_BGR2GRAY);
        cv::cvtColor(imageR, grayR, cv::COLOR_BGR2GRAY);

        bool found = cv::findChessboardCorners(grayL, this->boardSize, cornersL, cv::CALIB_CB_ADAPTIVE_THRESH + cv::CALIB_CB_NORMALIZE_IMAGE);
        found = cv::findChessboardCorners(grayR, this->boardSize, cornersR, cv::CALIB_CB_ADAPTIVE_THRESH + cv::CALIB_CB_NORMALIZE_IMAGE);
        if (found)
        {
            cv::cornerSubPix(grayL, cornersL, this->boardSize, this->squareSize, cv::TermCriteria(cv::TermCriteria::EPS + cv::TermCriteria::MAX_ITER, 30, 0.1));
            imagePointsL.push_back(cornersL);
            cv::cornerSubPix(grayR, cornersR, this->boardSize, this->squareSize, cv::TermCriteria(cv::TermCriteria::EPS + cv::TermCriteria::MAX_ITER, 30, 0.1));
            imagePointsR.push_back(cornersR);
        }
        cv::Mat showImage = imageL.clone();
        cv::drawChessboardCorners(showImage, this->boardSize, cornersL, true);
        cv::imwrite("showCorner.jpg", showImage);
    }

    getBoardWorldPoints(imageNamesL.size(), this->boardSize, this->squareSize, objectPoints);

    // ???????????????????????
    std::vector<cv::Mat> rvecsMat, tvecsMat;
    std::vector<std::vector<cv::Point2f>> cornerList;
    cornerList.push_back(cornersL);
    cv::Mat cm, cd, M11 = this->M1, D11 = this->D1;
    cv::calibrateCamera(objectPoints, cornerList, imageSize, M11, D11, rvecsMat, tvecsMat);
    cv::Mat planeTranR;
    cv::Rodrigues(rvecsMat[0], planeTranR);
    // cv::Mat c2tT = R_T2RT(planeTranR, tvecsMat[0]);
    // std::cout << "calibration board to camera R T" << std::endl;
    // std::cout << c2tT << std::endl;
    // END

    std::vector<cv::Point2f> pointsLeft, pointsRight;
    cv::Mat pts_4d;
    pointsLeft.push_back(imagePointsL[0][0]);
    pointsLeft.push_back(imagePointsL[0][10]);
    pointsLeft.push_back(imagePointsL[0][87]);
    pointsRight.push_back(imagePointsR[0][0]);
    pointsRight.push_back(imagePointsR[0][10]);
    pointsRight.push_back(imagePointsR[0][87]);

    /*
    pointsLeft.push_back(cv::Point2f(597,489));
    pointsRight.push_back(cv::Point2f(787,489));
    cv::Point2f t1 = pixel2cam(cv::Point2f(641, 510), M1);
    cv::Point2f t2 = pixel2cam(cv::Point2f(699, 475), M2);
    pointsLeft.clear();
    pointsRight.clear();
    pointsLeft.push_back(t1);
    pointsRight.push_back(t2);
    */
    cv::Mat T1 = (cv::Mat_<double>(3, 4) << 1, 0, 0, 0,
                  0, 1, 0, 0,
                  0, 0, 1, 0);
    cv::Mat T2 = (cv::Mat_<double>(3, 4) << R.at<double>(0, 0), R.at<double>(0, 1), R.at<double>(0, 2), T.at<double>(0, 0),
                  R.at<double>(1, 0), R.at<double>(1, 1), R.at<double>(1, 2), T.at<double>(1, 0),
                  R.at<double>(2, 0), R.at<double>(2, 1), R.at<double>(2, 2), T.at<double>(2, 0));

    cv::Mat TT1 = M1 * T1;
    cv::Mat TT2 = M2 * T2;

    cv::triangulatePoints(TT1, TT2, pointsLeft, pointsRight, pts_4d);

    std::vector<cv::Point3d> points3d;
    for (int i = 0; i < pts_4d.cols; i++)
    {
        cv::Mat x = pts_4d.col(i);
        x /= x.at<float>(3, 0); // ?????
        cv::Point3d p(
            x.at<float>(0, 0),
            x.at<float>(1, 0),
            x.at<float>(2, 0));
        points3d.push_back(p);
        // spdlog::info(p);
    }

#if DEBUG
    /*std::cout << "R:" << planeTranR << std::endl;
    std::cout << "T:" << tvecsMat[0] << std::endl;*/
    spdlog::info("First points3d:{},{},{}.", points3d[0].x, points3d[0].y, points3d[0].z);
    spdlog::info("First points3d:{},{},{}.", points3d[0].x, points3d[0].y, points3d[0].z);
#endif

    cv::Point3d nor;
    double D1;
    fitPlaneByThreePoints(points3d[0], points3d[1], points3d[2], nor, D1);

    spdlog::info("FitPlane:{},{},{},{}.", nor.x, nor.y, nor.z, D1);

    // Plane: Dot method
    cv::Point3d normal = nor;
    cv::Point3d p12(mainCenter.x - slaveCenter.x, mainCenter.y - slaveCenter.y, mainCenter.z - slaveCenter.z);
    cv::Point3d newNormal = normal.cross(p12);

    // Determine whether it is collinear
    if (std::abs(normal.dot(p12)) < 1e-6)
    {
        spdlog::error("The points P1 and P2 are on the given plane or are collinear with the normal vector.");
        return STATUS_ERROR;
    }

    double D_prime = -(newNormal.x * mainCenter.x + newNormal.y * mainCenter.y + newNormal.z * mainCenter.z);
#if DEBUG
    spdlog::info("The equation of the new plane is: {}x {}y {}z {} = 0", newNormal.x, newNormal.y, newNormal.z, D_prime);
#endif
    planeRet[0] = newNormal.x;
    planeRet[1] = newNormal.y;
    planeRet[2] = newNormal.z;
    planeRet[3] = D_prime;
    double check = slaveCenter.x * planeRet[0] + slaveCenter.y * planeRet[1] + slaveCenter.z * planeRet[2] + D_prime;
    if (check < 1e-3)
        return STATUS_OK;
    else
    {
        spdlog::error("Projection plane calculation error, please check the camera's optical center coordinates and calibration normal vector.");
        return STATUS_ERROR;
    }
    return STATUS_OK;
}

StatusCode CameraCalibration::CalibraRefPlaneOnline(std::vector<cv::Mat> imagesLeft, std::vector<cv::Mat> imagesRight,
                                                    cv::Mat &planeRet)
{
    if (imagesLeft.size() != imagesRight.size())
    {
        spdlog::error("The number of calibration images for the left and right cameras is inconsistent. please check.");
        return STATUS_INVALID_PARAM;
    }
    // The size of the chessboard pattern (number of interior angles)
    // cv::Size boardSize(bSize.first, bSize.second); // 9 6
    // Points in the world coordinate system (mm)
    std::vector<std::vector<cv::Point3f>> objectPoints;
    // Points in the image coordinate system
    std::vector<std::vector<cv::Point2f>> imagePointsL, imagePointsR;

    // Detect interior angles
    cv::Mat image, grayL, grayR;
    cv::Size imageSize;
    // std::vector<cv::Point2d> corners;
    std::vector<cv::Point2f> cornersL, cornersR;
    for (size_t item = 0; item < imagesLeft.size(); item++)
    {
        cv::Mat imageL = imagesLeft[item];
        cv::Mat imageR = imagesRight[item];
        if (!imageL.empty())
        {
            imageSize = imageL.size();
        }

        if (imageL.empty() || imageR.empty())
        {
            spdlog::error("The calibration image is empty.");
            continue;
        }
        cv::cvtColor(imageL, grayL, cv::COLOR_BGR2GRAY);
        cv::cvtColor(imageR, grayR, cv::COLOR_BGR2GRAY);

        bool found = cv::findChessboardCorners(grayL, this->boardSize, cornersL, cv::CALIB_CB_ADAPTIVE_THRESH + cv::CALIB_CB_NORMALIZE_IMAGE);
        found = cv::findChessboardCorners(grayR, this->boardSize, cornersR, cv::CALIB_CB_ADAPTIVE_THRESH + cv::CALIB_CB_NORMALIZE_IMAGE);
        if (found)
        {
            cv::cornerSubPix(grayL, cornersL, this->boardSize, this->squareSize, cv::TermCriteria(cv::TermCriteria::EPS + cv::TermCriteria::MAX_ITER, 30, 0.1));
            imagePointsL.push_back(cornersL);
            cv::cornerSubPix(grayR, cornersR, this->boardSize, this->squareSize, cv::TermCriteria(cv::TermCriteria::EPS + cv::TermCriteria::MAX_ITER, 30, 0.1));
            imagePointsR.push_back(cornersR);
        }
        else
        {
            // ?????????????????????????????????????????????, ????????x2??????????????
            found = cv::findChessboardCornersSB(grayL, this->boardSize, cornersL, cv::CALIB_CB_EXHAUSTIVE | cv::CALIB_CB_ACCURACY);
            if (!found)
            {
                spdlog::error("Corner detection failed, image is too blurry,please optimize the image quality.");
                return STATUS_INVALID_PARAM;
            }
            found = cv::findChessboardCornersSB(grayR, this->boardSize, cornersR, cv::CALIB_CB_EXHAUSTIVE | cv::CALIB_CB_ACCURACY);
            if (!found)
            {
                spdlog::error("Corner detection failed, image is too blurry,please optimize the image quality.");
                return STATUS_INVALID_PARAM;
            }
        }
#if DEBUG
        cv::Mat showImage = imageL.clone();
        cv::drawChessboardCorners(showImage, this->boardSize, cornersL, true);
        cv::imwrite("showCorner.jpg", showImage);
#endif
    }

    getBoardWorldPoints(imagesLeft.size(), this->boardSize, this->squareSize, objectPoints);

    // Local sliding table module testing.
    std::vector<cv::Mat> rvecsMat, tvecsMat;
    std::vector<std::vector<cv::Point2f>> cornerList;
    cornerList.push_back(cornersL);
    cv::calibrateCamera(objectPoints, cornerList, imageSize, this->M1, this->D1, rvecsMat, tvecsMat);
    cv::Mat planeTranR;
    cv::Rodrigues(rvecsMat[0], planeTranR);

    std::vector<cv::Point2f> pointsLeft, pointsRight;
    cv::Mat pts_4d;
    pointsLeft.push_back(imagePointsL[0][0]);
    pointsLeft.push_back(imagePointsL[0][10]);
    pointsLeft.push_back(imagePointsL[0][87]);
    pointsRight.push_back(imagePointsR[0][0]);
    pointsRight.push_back(imagePointsR[0][10]);
    pointsRight.push_back(imagePointsR[0][87]);

    cv::Mat LeftT = (cv::Mat_<double>(3, 4) << 1, 0, 0, 0,
                     0, 1, 0, 0,
                     0, 0, 1, 0);
    cv::Mat RightT = (cv::Mat_<double>(3, 4) << R.at<double>(0, 0), R.at<double>(0, 1), R.at<double>(0, 2), T.at<double>(0, 0),
                      R.at<double>(1, 0), R.at<double>(1, 1), R.at<double>(1, 2), T.at<double>(1, 0),
                      R.at<double>(2, 0), R.at<double>(2, 1), R.at<double>(2, 2), T.at<double>(2, 0));

    cv::Mat TT1 = this->M1 * LeftT;
    cv::Mat TT2 = this->M2 * RightT;

    cv::triangulatePoints(TT1, TT2, pointsLeft, pointsRight, pts_4d);

    std::vector<cv::Point3d> points3d;
    for (int i = 0; i < pts_4d.cols; i++)
    {
        cv::Mat x = pts_4d.col(i);
        x /= x.at<float>(3, 0); // normalization
        cv::Point3d p(
            x.at<float>(0, 0),
            x.at<float>(1, 0),
            x.at<float>(2, 0));
        points3d.push_back(p);
    }

#if DEBUG
    std::cout << "R:" << planeTranR << std::endl;
    /*std::cout << "T:" << tvecsMat[0] << std::endl;*/
    spdlog::info("First points3d:{},{},{}.", points3d[0].x, points3d[0].y, points3d[0].z);
#endif

    cv::Point3d nor;
    double D1;
    fitPlaneByThreePoints(points3d[0], points3d[1], points3d[2], nor, D1);

    spdlog::info("FitPlane:{},{},{},{}.", nor.x, nor.y, nor.z, D1);

    // Plane: Dot method
    cv::Point3d normal = nor;
    cv::Point3d p12(mainCenter.x - slaveCenter.x, mainCenter.y - slaveCenter.y, mainCenter.z - slaveCenter.z);
    cv::Point3d newNormal = normal.cross(p12);

    // Determine whether it is collinear
    if (std::abs(normal.dot(p12)) < 1e-6)
    {
        spdlog::error("The points P1 and P2 are on the given plane or are collinear with the normal vector.");
        return STATUS_ERROR;
    }

    double D_prime = -(newNormal.x * mainCenter.x + newNormal.y * mainCenter.y + newNormal.z * mainCenter.z);
#if DEBUG
    spdlog::info("The equation of the new plane is: {}x {}y {}z {} = 0", newNormal.x, newNormal.y, newNormal.z, D_prime);
#endif
    // planeRet[0] = newNormal.x;
    // planeRet[1] = newNormal.y;
    // planeRet[2] = newNormal.z;
    // planeRet[3] = D_prime;
    cv::Mat_<double> ret(1, 4);
    ret(0, 0) = newNormal.x;
    ret(0, 1) = newNormal.y;
    ret(0, 2) = newNormal.z;
    ret(0, 3) = D_prime;
    // double check = slaveCenter.x * planeRet[0] + slaveCenter.y * planeRet[1] + slaveCenter.z * planeRet[2] + D_prime;
    double check = slaveCenter.x * ret(0, 0) + slaveCenter.y * ret(0, 1) + slaveCenter.z * ret(0, 2) + ret(0, 3);

    if (check < 1e-3)
    {
        planeRet = ret;
        return STATUS_OK;
    }
    else
    {
        spdlog::error("Projection plane calculation error, please check the camera's optical center coordinates and calibration normal vector.");
        return STATUS_ERROR;
    }
    return STATUS_OK;
}

StatusCode CameraCalibration::CalibraSingoCamera(const std::vector<std::vector<cv::Point2f>> &imagePointsArray,
                                                 cv::Mat &cameraMatrix, cv::Mat &distCoeffMatrix,
                                                 std::vector<cv::Mat> &Pose, double &rms)
{
    std::vector<std::vector<cv::Point3f>> objectPointsArray;
    getBoardWorldPoints(imagePointsArray.size(), this->boardSize, this->squareSize, objectPointsArray);

    std::vector<cv::Mat> rvec;
    std::vector<cv::Mat> tvec;
    rms = cv::calibrateCamera(objectPointsArray, imagePointsArray, this->imageSize, cameraMatrix, distCoeffMatrix, rvec, tvec);

    for (size_t i = 0; i < imagePointsArray.size(); i++)
    {
        cv::Mat R, T_;
        cv::Rodrigues(rvec[i], R);
        T_ = R_T2RT(R, tvec[i]);
        Pose.push_back(T_);
    }
    return STATUS_OK;
}

StatusCode CameraCalibration::CalibraEyeToHand(const std::vector<cv::Mat> &imgList,
                                               const std::vector<std::vector<float>> &handList,
                                               cv::Mat &outMat)
{
    try
    {
        if (imgList.size() != handList.size())
        {
            throw std::length_error("00366H");
        }

        cv::Mat R_cam2gripper = (cv::Mat_<double>(3, 3));
        cv::Mat T_cam2gripper = (cv::Mat_<double>(3, 1));

        std::vector<std::vector<cv::Point3f>> objectPoints;
        getBoardWorldPoints(imgList.size(), this->boardSize, this->squareSize, objectPoints);

        std::vector<cv::Mat> R_target2cam, T_target2cam;
        std::vector<cv::Mat> R_gripper2base, T_gripper2base;
        R_target2cam.reserve(imgList.size());
        T_target2cam.reserve(imgList.size());
        R_gripper2base.reserve(handList.size());
        T_gripper2base.reserve(handList.size());

        std::vector<std::vector<cv::Point2f>> cornersList;
        for (int i = 0; i < imgList.size(); i++)
        {
            cv::Mat handleMat;
            cv::cvtColor(imgList[i], handleMat, cv::COLOR_BGR2GRAY);
            std::vector<cv::Point2f> corners;
            bool found = cv::findChessboardCorners(handleMat, this->boardSize, corners, cv::CALIB_CB_ADAPTIVE_THRESH + cv::CALIB_CB_NORMALIZE_IMAGE);

            if (found)
            {
                cv::cornerSubPix(handleMat, corners, this->boardSize, this->squareSize, cv::TermCriteria(cv::TermCriteria::EPS + cv::TermCriteria::MAX_ITER, 30, 0.1));
                cornersList.push_back(corners);
            }
            else
            {
                throw std::invalid_argument("00398H");
            }

#if DEBUG
            cv::Mat showImage = imgList[i].clone();
            cv::drawChessboardCorners(showImage, this->boardSize, corners, true);
            cv::imwrite("show_" + std::to_string(i) + ".jpg", showImage);
#endif
        }

        // Every img R T
        std::vector<cv::Mat> rvecsMat;
        double rms = cv::calibrateCamera(objectPoints, cornersList, imgList[0].size(), this->M1, this->D1, rvecsMat, T_target2cam);
        if (rms > 1.0f)
        {
            throw std::invalid_argument("00415H Corner detection failed!");
        }

        cv::Mat transformItem;
        for (auto &rvecItem : rvecsMat)
        {
            cv::Rodrigues(rvecItem, transformItem);
            R_target2cam.push_back(transformItem);
        }

        cv::Mat identityMatrix = cv::Mat::eye(3, 3, CV_32F);
        for (int i = 0; i < handList.size(); i++)
        {
            std::vector<float> item = handList[i];
            R_gripper2base.push_back(identityMatrix);
            T_gripper2base.push_back(cv::Mat(3, 1, CV_32F, item.data()).clone());
        }

        cv::calibrateHandEye(R_gripper2base, T_gripper2base, R_target2cam, T_target2cam, R_cam2gripper, T_cam2gripper, cv::CALIB_HAND_EYE_TSAI);

        outMat = R_T2RT(R_cam2gripper, T_cam2gripper);
    }
    catch (const std::invalid_argument &e)
    {
        if (std::string(e.what()) == "00398H")
        {
            spdlog::error("Caught an exception 00398H, Corner detection failed!");
        }
        else if (std::string(e.what()) == "00415H")
        {
            spdlog::error("Caught an exception 00415H, Calibration rms is too large, please recalibrate!");
        }
        return STATUS_ERROR;
    }
    catch (const std::length_error &e)
    {
        if (std::string(e.what()) == "00366H")
        {
            spdlog::error("Caught an exception 00366H, The data length of the robotic arm and camera does not match!");
        }
        return STATUS_ERROR;
    }
    catch (const std::exception &e)
    {
        spdlog::error("Caught an exception 00000H, Calibration logic failed, please check input!");
        return STATUS_ERROR;
    }
    return STATUS_OK;
}

StatusCode CameraCalibration::CalibraEyeToHandFeat(const std::vector<cv::Mat> &imgLeftList,
                                                   const std::vector<std::vector<float>> &handList,
                                                   cv::Mat &outMat)
{
    try
    {
        std::vector<cv::Point2f> pointsLeft;
        std::vector<cv::Point3f> objectPoints;
        std::vector<cv::Mat> rvecsMat, tvecsMat;
        cv::Mat gray, img_bin;
        cv::RNG rng;
        for (int i = 0; i < imgLeftList.size(); i++)
        {
            cv::cvtColor(imgLeftList[i], gray, cv::COLOR_BGR2GRAY);
            // cv::medianBlur(gray, gray, 3);
            cv::threshold(gray, img_bin, 128, 255, cv::THRESH_BINARY);
            // cv::Mat element = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(15, 15));
            // cv::morphologyEx(img_bin, img_bin, cv::MORPH_CLOSE, element); // 闭操作
            cv::imwrite("bin+morphologyEx.png", img_bin);

            std::vector<std::vector<cv::Point>> contours; // 双层，意思是有很多个轮廓，每个轮廓有很多个点
            std::vector<cv::Vec4i> hierachy;              // 4个元素分别存储该轮廓的【后一个轮廓、前一个轮廓、父轮廓、内嵌轮廓】的索引编号
            cv::findContours(img_bin, contours, hierachy, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE, cv::Point(0, 0));
            cv::Mat dst = cv::Mat::zeros(img_bin.size(), CV_8UC3); // 新建一个图像用来绘制寻找到的轮廓

            // 绘制所有轮廓像素点
            for (auto i = 0; i < contours.size(); ++i)
            {
                cv::Scalar color = cv::Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255)); // 随机颜色
                cv::drawContours(dst, contours, i, color, 1, 8, hierachy, 0, cv::Point(0, 0));                // 重新绘制轮廓
            }
            cv::imwrite("str_OutputWindowTitle.png", dst);
            // cv::waitKey(0);
        }
        return STATUS_OK;
        // cv::solvePnPRansac();
    }
    catch (const std::exception &e)
    {
        return STATUS_ERROR;
    }
}

StatusCode CameraCalibration::CalibraEyeToHandCorner(const cv::Mat &image,
                                                     std::vector<cv::Point3f> &objectPoints,
                                                     std::vector<int> &imagePointsIndex,
                                                     cv::Mat &outMat,
                                                     TaskType task = YARN)
{
    try
    {
        if (image.empty() || this->M1.empty() || this->D1.empty())
        {
            spdlog::error("Counld not open or find the image, or Camera internal parameters not loaded!");
            return STATUS_ERROR;
        }
        cv::Mat handImage = image.clone(), subImg;
        std::vector<cv::Point2f> imagePoints;
        int flags = cv::CALIB_CB_ADAPTIVE_THRESH | cv::CALIB_CB_NORMALIZE_IMAGE;
        bool foundChessboard = cv::findChessboardCorners(handImage, this->boardSize, imagePoints, flags);
        if (foundChessboard)
        {
            cv::cvtColor(handImage, subImg, cv::COLOR_BGR2GRAY);
            cv::cornerSubPix(subImg, imagePoints, this->boardSize, this->squareSize,
                             cv::TermCriteria(cv::TermCriteria::EPS + cv::TermCriteria::COUNT, 30, 0.1));
            cv::drawChessboardCorners(handImage, boardSize, imagePoints, foundChessboard);
            double st = cv::getTickCount();
            cv::imwrite(std::to_string(st) + "chessboard.png", handImage);
        }
        else
        {
            spdlog::error("Could not find chessboard corners!");
            return STATUS_ERROR;
        }

        std::vector<cv::Point2f> imagePointsFiltered;
        for (size_t i = 0; i < imagePointsIndex.size(); i++)
        {
            int index = imagePointsIndex[i];
            cv::Point2f point = imagePoints[index];
            imagePointsFiltered.push_back(point);
        }
        // std::vector<cv::Mat> rvecsMat, tvecsMat;
        cv::Mat rvecsMat, tvecsMat;
        cv::solvePnPRansac(objectPoints, imagePointsFiltered, this->M1, this->D1, rvecsMat, tvecsMat);

        cv::Mat R_camera2gripper;
        cv::Rodrigues(rvecsMat, R_camera2gripper);
        outMat = R_T2RT(R_camera2gripper, tvecsMat);
        /*
        cv::FileStorage fw("calib.yml", cv::FileStorage::WRITE);
        fw << "R" << rvecsMat;
        fw << "T" << tvecsMat;
        fw.release();
        */
        return STATUS_OK;
    }
    catch (const std::exception &e)
    {
        spdlog::error("EyeToHand exe failed,Exception: {}", e.what());
        return STATUS_ERROR;
    }
}

StatusCode CameraCalibration::CalibrateDualCamera(const std::vector<std::vector<cv::Point2f>> &PointsLeft,
                                                  const std::vector<std::vector<cv::Point2f>> &PointsRight,
                                                  std::array<cv::Mat1d, 4> &cameraParams,
                                                  std::array<cv::Mat1d, 9> &result,
                                                  std::array<std::vector<cv::Mat>, 2> &Pose,
                                                  double &errorv)
{
    try
    {
        cv::Mat1d LeftCameraMat, LeftDistCoeffMat, RightCameraMat, RightDistCoeffMat;

        if (PointsLeft.size() != PointsRight.size())
        {
            throw std::length_error("00564H");
        }

        std::vector<std::vector<cv::Point3f>> ObjectPointsArray;
        getBoardWorldPoints(PointsLeft.size(), this->boardSize, this->squareSize, ObjectPointsArray);

        cv::Mat R, T, E, F;

        double rmsL, rmsR;
        this->CalibraSingoCamera(PointsLeft, LeftCameraMat, LeftDistCoeffMat, Pose[0], rmsL);
        this->CalibraSingoCamera(PointsRight, RightCameraMat, RightDistCoeffMat, Pose[1], rmsR);

        double repv = cv::stereoCalibrate(
            ObjectPointsArray, PointsLeft, PointsRight,
            LeftCameraMat, LeftDistCoeffMat,
            RightCameraMat, RightDistCoeffMat, 
            this->imageSize,
            R, T, E, F, cv::CALIB_FIX_INTRINSIC);

        double err = 0;
        int npoints = 0;
        std::vector<cv::Vec3f> lines[2];
        for (unsigned int i = 0; i < PointsLeft.size(); i++)
        {
            int npt = (int)PointsLeft[i].size(); //??????
            cv::Mat imgpt[2];
            {
                imgpt[0] = cv::Mat(PointsLeft[i]);                                                                  //
                cv::undistortPoints(imgpt[0], imgpt[0], LeftCameraMat, LeftDistCoeffMat, cv::Mat(), LeftCameraMat); // ????
                cv::computeCorrespondEpilines(imgpt[0], 1, F, lines[0]);                                            // ??????
            }
            {
                imgpt[1] = cv::Mat(PointsRight[i]);                                                                    //
                cv::undistortPoints(imgpt[1], imgpt[1], RightCameraMat, RightDistCoeffMat, cv::Mat(), RightCameraMat); // ????
                cv::computeCorrespondEpilines(imgpt[1], 2, F, lines[1]);                                               // ??????
            }
            for (int j = 0; j < npt; j++)
            {
                double errij = fabs(PointsLeft[i][j].x * lines[1][j][0] +
                                    PointsLeft[i][j].y * lines[1][j][1] + lines[1][j][2]) +
                               fabs(PointsRight[i][j].x * lines[0][j][0] +
                                    PointsRight[i][j].y * lines[0][j][1] + lines[0][j][2]);
                err += errij; // ??????
            }
            npoints += npt;
        }
        spdlog::info("Average reprojection err: " + std::to_string(err / npoints));

        cv::Mat R1, R2, P1, P2, Q;
        cv::Rect validRoi[2];
        cv::stereoRectify(LeftCameraMat, LeftDistCoeffMat,
                          RightCameraMat, RightDistCoeffMat,
                          this->imageSize, R, T, R1, R2, P1, P2, Q,
                          0, -1, this->imageSize, &validRoi[0], &validRoi[1]);

        cameraParams[0] = LeftCameraMat;
        cameraParams[1] = LeftDistCoeffMat;
        cameraParams[2] = RightCameraMat;
        cameraParams[3] = RightDistCoeffMat;

        result[0] = R;
        result[1] = T;
        result[2] = E;
        result[3] = F;
        result[4] = R1;
        result[5] = R2;
        result[6] = P1;
        result[7] = P2;
        result[8] = Q;

        errorv = err / npoints;

        return STATUS_OK;
    }
    catch (const std::invalid_argument &e)
    {
        return STATUS_ERROR;
    }
    catch (const std::length_error &e)
    {
        if (std::string(e.what()) == "00564H")
        {
            spdlog::error("Caught an exception 00564H, L camera points and R camera points does not match!");
        }
        return STATUS_ERROR;
    }
    catch (const std::exception &e)
    {
        spdlog::error("Caught an exception 00000H, Dual Calibration logic failed, please check input!");
        return STATUS_ERROR;
    }
}