/*
 * @Author: huangyupei huangyupei2021@ia.ac.cn
 * @Date: 2024-09-20 13:06:22
 * @LastEditors: huangyupei huangyupei2021@ia.ac.cn
 * @LastEditTime: 2025-03-19 23:04:06
 * @FilePath: /SelfCalib_OptiAcoustic/src/camera_pose_estimation_2d2d.cc
 * @Description: 这是默认设置,请设置`customMade`, 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE
 */
#include "camera_pose_estimation.h"

using namespace std;
using namespace cv;


// void find_feature_matches(const Mat &img_1, const Mat &img_2,
//                           std::vector<KeyPoint> &keypoints_1,
//                           std::vector<KeyPoint> &keypoints_2,
//                           std::vector<int> &matches) 
// {
//   //-- 初始化
//   Mat descriptors_1, descriptors_2;
//   // used in OpenCV3
//   Ptr<FeatureDetector> detector = ORB::create();
//   Ptr<DescriptorExtractor> descriptor = ORB::create();
//   // use this if you are in OpenCV2
//   // Ptr<FeatureDetector> detector = FeatureDetector::create ( "ORB" );
//   // Ptr<DescriptorExtractor> descriptor = DescriptorExtractor::create ( "ORB" );
//   Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
//   //-- 第一步:检测 Oriented FAST 角点位置
//   detector->detect(img_1, keypoints_1);
//   detector->detect(img_2, keypoints_2);

//   //-- 第二步:根据角点位置计算 BRIEF 描述子
//   descriptor->compute(img_1, keypoints_1, descriptors_1);
//   descriptor->compute(img_2, keypoints_2, descriptors_2);

//   //-- 第三步:对两幅图像中的BRIEF描述子进行匹配，使用 Hamming 距离
//   vector<DMatch> match;
//   //BFMatcher matcher ( NORM_HAMMING );
//   matcher->match(descriptors_1, descriptors_2, match);

//   //-- 第四步:匹配点对筛选
//   double min_dist = 10000, max_dist = 0;

//   //找出所有匹配之间的最小距离和最大距离, 即是最相似的和最不相似的两组点之间的距离
//   for (int i = 0; i < descriptors_1.rows; i++) {
//     double dist = match[i].distance;
//     if (dist < min_dist) min_dist = dist;
//     if (dist > max_dist) max_dist = dist;
//   }

//   printf("-- Max dist : %f \n", max_dist);
//   printf("-- Min dist : %f \n", min_dist);

//   //当描述子之间的距离大于两倍的最小距离时,即认为匹配有误.但有时候最小距离会非常小,设置一个经验值30作为下限.
//   for (int i = 0; i < descriptors_1.rows; i++) {
//     if (match[i].distance <= max(2 * min_dist, 30.0)) {
//       matches.push_back(match[i]);
//     }
//   }

// }


Point2d pixel2cam(const Point2d &p, const Mat &K) {
    return Point2d(
        (p.x - K.at<double>(0, 2)) / K.at<double>(0, 0),
        (p.y - K.at<double>(1, 2)) / K.at<double>(1, 1));
}

void camera_pose_estimation_2d2d(std::vector<KeyPoint> keypoints_1,
                          std::vector<KeyPoint> keypoints_2,
                          std::vector<int> matches,
                          Mat &R, Mat &t) {
    Point2d principal_point(320, 240);  //相机光心
    
    // double focal_length = 335;      //相机焦距
    // // 相机内参
    // Mat K = (Mat_<double>(3, 3) << focal_length, 0, 320, 0, focal_length, 240, 0, 0, 1);

    // 假设相机内参矩阵 K
    double focal_length = 1200, cx = 960, cy = 540;
    cv::Mat K = (cv::Mat_<double>(3, 3) << focal_length, 0, cx, 0, focal_length, cy, 0, 0, 1);

    //-- 把匹配点转换为vector<Point2f>的形式
    vector<Point2f> points1;
    vector<Point2f> points2;

    for (int i = 0; i < (int) matches.size(); i++) {
    points1.push_back(keypoints_1[i].pt);
    points2.push_back(keypoints_2[i].pt);
    }

    //-- 计算基础矩阵
    Mat fundamental_matrix;
    fundamental_matrix = findFundamentalMat(points1, points2, CV_FM_RANSAC);
    //   cout << "fundamental_matrix is " << endl << fundamental_matrix << endl;

    //-- 计算本质矩阵
    Mat essential_matrix;
    essential_matrix = findEssentialMat(points1, points2, focal_length, principal_point, CV_RANSAC);
    //   cout << "essential_matrix is " << endl << essential_matrix << endl;

    //-- 计算单应矩阵
    //-- 但是本例中场景不是平面，单应矩阵意义不大
    Mat homography_matrix;
    homography_matrix = findHomography(points1, points2, RANSAC, 3);
    //   cout << "homography_matrix is " << endl << homography_matrix << endl;

    //-- 从本质矩阵中恢复旋转和平移信息.
    // 此函数仅在Opencv3中提供
    recoverPose(essential_matrix, points1, points2, R, t, focal_length, principal_point);
    //   cout << "R is " << endl << R << endl;
    //   cout << "t is " << endl << t << endl;

}


void validateEpipolar(Mat R, Mat t, const vector<cv::KeyPoint> vKeys1, const vector<cv::KeyPoint> vKeys2, vector<int> matches) {
    //-- 验证E=t^R*scale
    Mat t_x =
        (Mat_<double>(3, 3) << 0, -t.at<double>(2, 0), t.at<double>(1, 0),
        t.at<double>(2, 0), 0, -t.at<double>(0, 0),
        -t.at<double>(1, 0), t.at<double>(0, 0), 0);

    // cout << "t^R=" << endl << t_x * R << endl;

    //-- 验证对极约束
    // double focal_length = 335;      //相机焦距
    // Mat K = (Mat_<double>(3, 3) << focal_length, 0, 320, 0, focal_length, 240, 0, 0, 1);

    // 假设相机内参矩阵 K
    double fx = 1200, fy = 1200, cx = 960, cy = 540;
    cv::Mat K = (cv::Mat_<double>(3, 3) << fx, 0, cx, 0, fy, cy, 0, 0, 1);

    for (auto m: matches) {
        Point2d pt1 = pixel2cam(vKeys1[m].pt, K);
        Mat y1 = (Mat_<double>(3, 1) << pt1.x, pt1.y, 1);
        Point2d pt2 = pixel2cam(vKeys2[m].pt, K);
        Mat y2 = (Mat_<double>(3, 1) << pt2.x, pt2.y, 1);
        Mat d = y2.t() * t_x * R * y1;
        // cout << "epipolar constraint = " << d << endl;
    }
}


void triangulationCamera(const vector<cv::KeyPoint> &kp_1,
                   const vector<cv::KeyPoint> &kp_2,
                   const vector<int> &matches,
                   const Mat &R, const Mat &t,
                   vector<Eigen::Vector3d> &points) {
    // 左相机的投影矩阵，假设世界坐标系在左相机的位姿处
    cv::Mat T1 = (Mat_<float>(3, 4) <<
    1, 0, 0, 0,
    0, 1, 0, 0,
    0, 0, 1, 0);

    // 右相机的投影矩阵，外参M矩阵
    cv::Mat T2 = (Mat_<float>(3, 4) <<
    R.at<double>(0, 0), R.at<double>(0, 1), R.at<double>(0, 2), t.at<double>(0, 0),
    R.at<double>(1, 0), R.at<double>(1, 1), R.at<double>(1, 2), t.at<double>(1, 0),
    R.at<double>(2, 0), R.at<double>(2, 1), R.at<double>(2, 2), t.at<double>(2, 0));

    // double focal_length = 335;      //相机焦距
    // Mat K = (Mat_<double>(3, 3) << focal_length, 0, 320, 0, focal_length, 240, 0, 0, 1); // 相机内参

    // 假设相机内参矩阵 K
    double fx = 1200, fy = 1200, cx = 960, cy = 540;
    cv::Mat K = (cv::Mat_<double>(3, 3) << fx, 0, cx, 0, fy, cy, 0, 0, 1);

    vector<Point2f> pts_1, pts_2;
    for (int i = 0; i < (int) matches.size(); i++) {
        pts_1.push_back(pixel2cam(kp_1[i].pt, K));
        pts_2.push_back(pixel2cam(kp_2[i].pt, K));
    }

    cv::Mat pts_4d;
    cv::triangulatePoints(T1, T2, pts_1, pts_2, pts_4d);

    // 转换成非齐次坐标
    for (int i = 0; i < pts_4d.cols; i++) {
        cv::Mat x = pts_4d.col(i);
        x /= x.at<float>(3, 0);   // 归一化
        Eigen::Vector3d p(x.at<float>(0, 0), x.at<float>(1, 0), x.at<float>(2, 0));
        points.push_back(p);
    }

}

void camera_pose_estimation_3d2d(const vector<Vector3d>& points_3d, const vector<Vector2d>& points_2d, Sophus::SE3d& pose) {
    // Step 1 构建图优化, 先设定g2o
    // pose is 6 dim, landmark is 3 dim
    typedef g2o::BlockSolver<g2o::BlockSolverTraits<6, 3>> BlockSolverType;
    typedef g2o::LinearSolverDense<BlockSolverType::PoseMatrixType> LinearSolverType;

    // 梯度下降方法,可以从GN LM DogLeg选
    BlockSolverType::LinearSolverType* linearSolver = new LinearSolverType();
    BlockSolverType* solver_ptr = new BlockSolverType(linearSolver);
    auto solver = new g2o::OptimizationAlgorithmLevenberg(solver_ptr);

    g2o::SparseOptimizer optimizer;  // 图模型
    optimizer.setAlgorithm(solver);  // 设置求解器

    // Step 2 构建、添加顶点
    VertexPose* vertex_pose = new VertexPose();   // 定义相机位姿顶点
    vertex_pose->setId(0);                        // 设置顶点 Id
    vertex_pose->setEstimate(Sophus::SE3d());     // 设置顶点 初始估计值
    vertex_pose->setFixed(false);
    optimizer.addVertex(vertex_pose);             // 将顶点加入到图优化器中

    // // 相机内参
    // double focal_length = 335;      //相机焦距
    // Mat K = (Mat_<double>(3, 3) << focal_length, 0, 320, 0, focal_length, 240, 0, 0, 1);

    // 假设相机内参矩阵 K
    double fx = 1200, fy = 1200, cx = 960, cy = 540;
    cv::Mat K = (cv::Mat_<double>(3, 3) << fx, 0, cx, 0, fy, cy, 0, 0, 1);

    Eigen::Matrix3d K_eigen;
    K_eigen << K.at<double>(0, 0), K.at<double>(0, 1), K.at<double>(0, 2),
        K.at<double>(1, 0), K.at<double>(1, 1), K.at<double>(1, 2),
        K.at<double>(2, 0), K.at<double>(2, 1), K.at<double>(2, 2);

    // Step 3 构建、添加边
    int index = 1;
    for (size_t i = 0; i < points_2d.size(); i++)
    {
        auto p2d = points_2d[i];
        auto p3d = points_3d[i];
        EdgeProjection* edge = new EdgeProjection(p3d, K_eigen);    // 定义边
        edge->setId(i + 1);                 // 设置边的 Id
        edge->setVertex(0, vertex_pose);    // 设置边连接的顶点
        edge->setMeasurement(p2d);          //! 设置边的观测值，用于求误差
        edge->setInformation(Matrix2d::Identity()); // 设置信息矩阵
        optimizer.addEdge(edge);            // 将边加入到图优化器中
    }

    // Step 4 执行优化
    optimizer.setVerbose(false);
    optimizer.initializeOptimization();     // 初始化优化问题
    optimizer.optimize(20);                 // 开始优化
    pose = vertex_pose->estimate().inverse();
    // cout << "T_c1_ci pose estimated by g2o = \n" <<pose.matrix() << endl;
}


void LocalCamBA(vector<Vector3d> &vPointsCamera, vector<Sophus::SE3d> &vCameraPoses, vector<vector<Vector2d>> vCameraMeasAll) {
    typedef g2o::BlockSolver<g2o::BlockSolverTraits<-1, -1>> BlockSolverType;
    typedef g2o::LinearSolverDense<BlockSolverType::PoseMatrixType> LinearSolverType;

    // ~ solver 这一块的定义是参考ORB-SLAM3的
    BlockSolverType::LinearSolverType* linearSolver = new LinearSolverType();
    BlockSolverType* solver_ptr = new BlockSolverType(linearSolver);
    auto solver = new g2o::OptimizationAlgorithmLevenberg(solver_ptr);
    
    // 定义优化器
    g2o::SparseOptimizer optimizer;
    optimizer.setAlgorithm(solver);
    optimizer.setVerbose(false);        // 是否输出优化过程的详细信息

    // 优化顶点有两种：三角化计算的3D点 和 第i帧相机的位姿
    // 构建 g2o 问题
    // Step 1：添加顶点
    vector<VertexPose *> vertex_camPoses;
    // 固定顶点只有一个：第一帧相机的位姿，值为单位阵(实际上可以不必添加这个顶点)
    // 2024-11-27 16:58:31 更新：得添加这个顶点啊！不然少了一个关键的测量信息啊
    VertexPose *vT_c0 = new VertexPose();
    vT_c0->setId(0);
    vT_c0->setEstimate(Sophus::SE3d());
    vT_c0->setFixed(true);
    optimizer.addVertex(vT_c0);
    vertex_camPoses.emplace_back(vT_c0);

    // 待优化的位姿顶点：第2~7帧相机位姿T_si_s0(表示在第一帧相机坐标系下)
    int num_camPose = vCameraPoses.size();     
    for (int i = 1; i < num_camPose; i++) {
        VertexPose *vT_ci_c0 = new VertexPose();
        vT_ci_c0->setId(i);                         // Id范围：1 ~ 6, 对应第2~7帧声呐位姿
        vT_ci_c0->setEstimate(vCameraPoses[i].inverse());
        vT_ci_c0->setFixed(false);
        optimizer.addVertex(vT_ci_c0);
        vertex_camPoses.emplace_back(vT_ci_c0);       // index范围：1 ~ 6
    }

    // 待优化的3D点顶点：相机初始化恢复的三维点
    int num_points = vPointsCamera.size();
    vector<VertexPoint *> vertex_points;
    for (int i = 0; i < num_points; i++) {
        VertexPoint *vPoint = new VertexPoint();
        vPoint->setId(i + num_camPose);           // 3D点顶点Id从7开始
        vPoint->setEstimate(vPointsCamera[i]);
        vPoint->setMarginalized(false);
        optimizer.addVertex(vPoint);
        vertex_points.emplace_back(vPoint);
    }

    // Step 2: 添加重投影误差边
    // 自由度为2（单相机/声呐，测量为2维）的卡方分布，显著性水平为0.05，对应的临界阈值5.991
    // 可以理解为卡方值高于5.991 95%的几率为外点
    const float deltaMono = sqrt(5.991);
    for (int i = 0; i < num_camPose; i++) {      // 从第2幅图像到第7幅图像，总共有6次观测，每次观测有20个误差边【还是要把第1帧带上！！！】
        // 读取相机图像特征点测量
        vector<Eigen::Vector2d> vCameraMeasi = vCameraMeasAll[i];
        
        // 获取到相机图像特征点测量后，构建重投影误差边
        for (int j = 0; j < vCameraMeasi.size(); j++) {
            EdgeLocalCamBAProjection *e = new EdgeLocalCamBAProjection();
            e->setVertex(0, vertex_camPoses[i]);    // 第i幅声呐图像，对应顶点vertex_camPoses[i]
            e->setVertex(1, vertex_points[j]);        // 该误差边观测到的3D点
            e->setMeasurement(vCameraMeasi[j]);
            e->setInformation(Eigen::Matrix2d::Identity());

            auto rk = new g2o::RobustKernelHuber();
            e->setRobustKernel(rk);
            rk->setDelta(deltaMono);

            optimizer.addEdge(e);
        }
    }

    // Step 3: 执行优化
    optimizer.initializeOptimization();
    optimizer.optimize(40);

    // Step 4: 取出优化的结果
    // 取出声呐的位姿
    for (int i = 1; i < num_camPose; i++) {
        vCameraPoses[i] = vertex_camPoses[i]->estimate().inverse();
    }

    // 取出三维点
    for (int i = 0; i < vPointsCamera.size(); i++) {
        vPointsCamera[i] = vertex_points[i]->estimate();
    }
}