/*
 * @Author: huangyupei huangyupei2021@ia.ac.cn
 * @Date: 2024-09-19 10:21:56
 * @LastEditors: huangyupei huangyupei2021@ia.ac.cn
 * @LastEditTime: 2025-06-22 19:46:17
 * @FilePath: /SelfCalib_OptiAcoustic/MotionBasedExtrinCalib-v2.cpp
 * @Description: 这是默认设置,请设置`customMade`, 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE
 */
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <cmath>

#include <eigen3/Eigen/Core>
#include <eigen3/Eigen/Dense>
#include <opencv2/opencv.hpp>
#include <opencv2/core/eigen.hpp>

#include "CameraTwoViewReconstruction.h"
#include "readImageFeatures.h"
#include "camera_pose_estimation.h"
#include "acoustic_BA_g2o.h"
#include "hand_eye_calibration.h"
#include "extrinsicRefinement.h"

using namespace std;
using namespace cv;
using namespace Eigen;
using namespace Sophus;
using namespace ExtrinRefine;


double calcReprojError(Sophus::SE3d T_s1_s2, vector<Vector3d> vPointsSonar, vector<Vector2d> vSonarMeas1, vector<Vector2d> vSonarMeas2) {
    double error = 0.0;
    for (int i = 0; i < vPointsSonar.size(); i++) {
        auto reproj = SonarProject(g2o::SE3Quat(), vPointsSonar[i]);
        double err = pow(vSonarMeas1[i][0] - reproj[0], 2) + pow(vSonarMeas1[i][1] - reproj[1], 2);
        error += err;
        
        g2o::SE3Quat T_s1_s2_g2o(T_s1_s2.unit_quaternion(), T_s1_s2.translation());
        reproj = SonarProject(T_s1_s2_g2o, vPointsSonar[i]);
        err = pow(vSonarMeas2[i][0] - reproj[0], 2) + pow(vSonarMeas2[i][1] - reproj[1], 2);
        error += err;
    }

    return error;
}

// 检查文件是否存在，如果不存在则创建
void checkAndCreateFile(const string& filename) {
    ifstream infile(filename);
    if (!infile.good()) { // 文件不存在
        ofstream outfile(filename); // 创建空文件
        if (outfile.is_open()) {
            cout << "文件 " << filename << " 不存在，已创建。\n";
        } else {
            cerr << "无法创建文件: " << filename << endl;
        }
    }
}

// 追加写入位姿到文件
void savePosesToFile(const string& filename, const Sophus::SE3d& pose) {
    ofstream file(filename, ios::app); // 以追加模式打开文件
    if (!file.is_open()) {
        cerr << "无法打开文件: " << filename << endl;
        return;
    }

    Matrix3d R = pose.rotationMatrix();  // 提取旋转矩阵
    Vector3d t = pose.translation();     // 提取平移向量
    // 输出旋转矩阵的9个元素
    for (int i = 0; i < 3; ++i)
        for (int j = 0; j < 3; ++j)
            file << R(i, j) << " ";
    
    // 输出平移向量的3个元素
    for (int i = 0; i < 3; ++i)
        file << t(i) << " ";
    
    file << "\n"; // 换行

    file.close();
}


int main(int argc, char** argv) {
    // Step 1 获取相机运动序列, 相机恢复的三维点(表示在第一帧相机坐标系下)
    // 相机运动序列：vector<Sophus::SE3d>，恢复的三维点：vector<Eigen::Vector3d>
    vector<Sophus::SE3d> vCameraPoses;
    vCameraPoses.emplace_back(Sophus::SE3d());

    // Step 1.1: 根据前两帧单目相机图像初始化计算相机运动
    string fielname_prefix = "/mnt/d/MATLAB_Files/SelfCalibration/Sequence018";         // MATLAB仿真数据文件目录
    string camera_filename1 = fielname_prefix + "/image_features-1.txt";
    string camera_filename2 = fielname_prefix + "/image_features-2.txt";
    
    vector<cv::KeyPoint> vKeys1, vKeys2;

    assert(readCameraFeaturesFromTXT(camera_filename1, vKeys1) != -1);
    assert(readCameraFeaturesFromTXT(camera_filename2, vKeys2) != -1);

    int num_points = vKeys1.size();
    if (vKeys1.size() > vKeys2.size()) num_points = vKeys2.size();

    // 匹配关系在仿真中默认为一一对应，与左相机特征点索引相同的右特征点为对应匹配点；
    vector<int> vInitMatches(num_points, 0);
    for (int i = 0; i < num_points; i++) {
        vInitMatches[i] = i;
    }

    // 选用SLAM十四讲的代码来测试单目运动估计
    Mat R, t;
    camera_pose_estimation_2d2d(vKeys1, vKeys2, vInitMatches, R, t);
    // ~ 这里得到的 R 和 t 就是两帧之间的旋转和平移，需要注意的是，这里的变换R21、t21是第一个坐标系在第二个坐标系下的表示
    validateEpipolar(R, t, vKeys1, vKeys2, vInitMatches);

    // 将cv::Mat类型的旋转和平移转换为 Sophus::SE3d
    Eigen::Matrix3d eigen_R;
    Eigen::Vector3d eigen_t;
    cv::cv2eigen(R, eigen_R);
    cv::cv2eigen(t, eigen_t);
    Eigen::Vector3d unit_t = eigen_t.normalized();    // 直接对原始平移向量进行归一化
    Sophus::SE3d T_c1_c2 = Sophus::SE3d(eigen_R, unit_t).inverse();
    // cout << "Camera Motion T_c1_c2: \n" << T_c1_c2.matrix() << endl;
    // ~ 至此得到了第二帧相机的位姿T_c1_c2 [需要注意的是：平移的长度暂定为1]
    vCameraPoses.emplace_back(T_c1_c2);

    // Step 1.2 三角化得到地图点
    Sophus::SE3d T_c2_c1 = T_c1_c2.inverse();
    Mat R_c2_c1, t_c2_c1;
    cv::eigen2cv(T_c2_c1.rotationMatrix(), R_c2_c1);
    cv::eigen2cv(T_c2_c1.translation(), t_c2_c1);

    vector<Eigen::Vector3d> vPointsCamera;
    // 这里的旋转矩阵和平移向量的定义有点反我的直觉。。。但是符合OpenCV的用法
    triangulationCamera(vKeys1, vKeys2, vInitMatches, R_c2_c1, t_c2_c1, vPointsCamera);         
    // ~ 至此，得到了第二帧相机的位置T_c1_c2 和 恢复的3D三维点 vPointsCamera(以第一帧相机坐标系为世界坐标系)

    // Step 1.3 后续根据每一帧的相机图像特征点测量，根据 3D-2D PnP求解每帧的相机位姿
    string camera_filename;
    for (int i = 3; i <= 7; i++) {
        camera_filename = fielname_prefix + "/image_features-" + to_string(i) + ".txt";

        vector<cv::KeyPoint> vCameraKeysi;
        assert(readCameraFeaturesFromTXT(camera_filename, vCameraKeysi) != -1);
        vector<Eigen::Vector2d> vCameraMeasi;
        for (auto keypoint: vCameraKeysi) {
            vCameraMeasi.emplace_back(Vector2d(keypoint.pt.x, keypoint.pt.y));
        }

        Sophus::SE3d T_c1_ci = Sophus::SE3d();      // 初值设定为单位阵

        // 根据一组3D点和第二个图像中的2D投影，估计第二个相机的位姿. 
        // 只连接一个位姿顶点, 误差就是3D点的投影误差
        camera_pose_estimation_3d2d(vPointsCamera, vCameraMeasi, T_c1_ci);
        // cout << "after opt, T_c1_c" << i << ": \n" << T_c1_ci.matrix() << endl;
        vCameraPoses.emplace_back(T_c1_ci);
    }

    vector<vector<Vector2d>> vCameraMeasAll;
    for (int i = 1; i <= 7; i++) {      // 从第1幅相机图像到第7幅相机图像
        // 读取相机图像特征点测量
        string camera_filename = fielname_prefix + "/image_features-" + to_string(i) + ".txt";
        vector<cv::KeyPoint> vCameraKeysi;
        assert(readCameraFeaturesFromTXT(camera_filename, vCameraKeysi) != -1);
        
        vector<Eigen::Vector2d> vCameraMeasi;
        for (auto keypoint: vCameraKeysi) {
            vCameraMeasi.emplace_back(Vector2d(keypoint.pt.x, keypoint.pt.y));
        }

        vCameraMeasAll.emplace_back(vCameraMeasi);
    }

    LocalCamBA(vPointsCamera, vCameraPoses, vCameraMeasAll);
    
    // ~ --------------------------声呐运动轨迹计算------------------------------------------

    // Step 2 获取声呐运动序列、声呐恢复的三维点(表示在第一帧声呐坐标系下)
    // 声呐运动序列：vector<Sophus::SE3d>，恢复的三维点：vector<Eigen::Vector3d>
    vector<Sophus::SE3d> vSonarPoses;
    vSonarPoses.emplace_back(Sophus::SE3d());
    vector<vector<Vector2d>> vSonarMeasAll;

    // Step 2.1: 根据前两帧声呐图像初始化声呐的运动、三维点
    // 代码框架主要参考SLAM十四讲ch9中的bundle_adjustment_g2o.cpp实现BA优化的过程，g2o的使用以及CMakeLists.txt中g2o库链接过程主要参考ORB-SLAM3
    Sophus::SE3d T_s1_s2 = Sophus::SE3d();      // 初值设定为单位阵
    
    // 从txt文件读取第一帧、第二帧声呐图像的测距、方位角测量
    string sonar_filename1 = fielname_prefix + "/sonar_features-1.txt";
    string sonar_filename2 = fielname_prefix + "/sonar_features-2.txt";
    
    vector<Eigen::Vector2d> vSonarMeas1, vSonarMeas2;
    vSonarMeas1 = getSonarMeasXY(sonar_filename1);
    vSonarMeas2 = getSonarMeasXY(sonar_filename2);

    vSonarMeasAll.emplace_back(vSonarMeas1);
    vSonarMeasAll.emplace_back(vSonarMeas2);

    // 下面是根据第一帧和第二帧声呐图像测量，进行初始化的过程
    vector<Eigen::Vector3d> vPointsSonar;
    int num_acoustic_points = vSonarMeas1.size();           // 三维点的数量        
    vPointsSonar.reserve(num_acoustic_points);                   // 预先分配空间
    
    // 三维点赋值仰角初值
    for (int i = 0; i < num_acoustic_points; i++) {
        vPointsSonar.emplace_back(Eigen::Vector3d(vSonarMeas1[i][0], vSonarMeas1[i][1], 0.1));           // 直角坐标表示三维点
        // vPointsSonar.emplace_back(Eigen::Vector3d(vSonarMeas1[i][0], vSonarMeas1[i][1], -0.1));       // 球坐标表示时的三维点初始值
    }

    bool priorZ_positive = false;

    SolveAcousticBA(T_s1_s2, vPointsSonar, vSonarMeas1, vSonarMeas2, priorZ_positive);    
    // ~ 至此得到了第二帧声呐的位置T_s1_s2 和 恢复的三维点 vPointsSonar
    vSonarPoses.emplace_back(T_s1_s2);

    // Step 2.2 后续根据每一帧的声呐图像特征点测量，根据3D-2D PnP求解每帧的声呐位姿
    // 后续就是 sonar_features-3 4 5 6 7.txt  先以3.txt为例

    string sonar_filename;
    for (int i = 3; i <= 7; i++) {
        sonar_filename = fielname_prefix + "/sonar_features-" + to_string(i) + ".txt";

        vector<Eigen::Vector2d> vSonarMeasi;
        vSonarMeasi = getSonarMeasXY(sonar_filename);

        Sophus::SE3d T_s1_si = Sophus::SE3d();      // 初值设定为单位阵

        // 根据一组3D点和第二个图像中的2D投影，估计第二个相机的位姿. 
        // 顶点不需要重新定义, 边需要重新定义一种一元边, 只连接一个位姿顶点, 误差就是3D点的投影误差
        // cout << "before opt, T_s1_si: \n" << T_s1_si.matrix() << endl;
        AcousticPnpBA(T_s1_si, vPointsSonar, vSonarMeasi);
        // cout << "after opt, T_s1_s" << i << ": \n" << T_s1_si.matrix() << endl;
        vSonarPoses.emplace_back(T_s1_si);
        vSonarMeasAll.emplace_back(vSonarMeasi);
    }

    LocalSonarBA(vPointsSonar, vSonarPoses, vSonarMeasAll);

    // ------------------------------------------------------------------------------------
    // Step 3: 手眼标定计算外参初值
    // 经过上面的相机、声呐位姿计算模块，已经得到了相机运动轨迹vCameraPoses和声呐运动轨迹vSonarPoses
    std::vector<Eigen::Matrix4d> cam_list, sonar_list;
    calcRelativeMotion(vCameraPoses, vSonarPoses, cam_list, sonar_list);
    // providePoseList(cam_list, sonar_list);

    Eigen::Matrix3d R_sc;
    Eigen::Vector3d t_sc;
    // cout << "----------start first hand-eye calib----------" << endl;
    double scale;
    handEyeCalibration(cam_list, sonar_list, R_sc, t_sc, scale);
    
    // cout << "----------finish first hand-eye calib---------" << endl;
    // cout << "scale: " << scale << endl;    
    
    // ------------------------------------------------------------------------------------
    // Step 4: 进一步细化外参
    // Step 4.1 根据相机的重投影误差优化外参和相机三维点
    // 先重新三角化得到相机的3D点
    unit_t *= scale;                                   // 赋予第二模块手眼标定估计的粗略尺度信息
    T_c2_c1 = Sophus::SE3d(eigen_R, unit_t);
    cv::eigen2cv(T_c2_c1.rotationMatrix(), R_c2_c1);
    cv::eigen2cv(T_c2_c1.translation(), t_c2_c1);
    vector<Eigen::Vector3d> vPointsCameraUpdate; 
    
    // 三角化得到地图点
    triangulationCamera(vKeys1, vKeys2, vInitMatches, R_c2_c1, t_c2_c1, vPointsCameraUpdate); 
    
    // 重新计算相机位姿
    for (auto& pose: vCameraPoses) {
        // 获取当前位姿的平移向量
        Eigen::Vector3d translation = pose.translation();
        // 按比例缩放
        translation *= scale;
        // 更新位姿的平移部分
        pose.translation() = translation;
    }    
    
    // cout << "\n--------------------------------" << endl;
    
    Sophus::SE3d T_sc(R_sc, t_sc);
    // cout << "before refinement, T_sc: \n" << T_sc.matrix() << endl;
    // ~ 这里保存一次手眼标定后的结果
    string result_filename = fielname_prefix + "/results/ours_cam1sonar5.txt";
    checkAndCreateFile(result_filename);
    savePosesToFile(result_filename, T_sc);  

    // Step 4.2 通过最小化声学路标的重投影误差，BA优化外参和相机运动
    // ExtrinsicRefineUseCam(T_sc, vPointsCameraUpdate, vSonarPoses, vCameraMeasAll);   
    ExtrinsicRefineUseSonar(T_sc, vPointsSonar, vCameraPoses, vSonarMeasAll);
    
    // cout << "after refinement, T_sc:\n" << T_sc.matrix() << endl;
    // ~ 这里保存一次最小化重投影误差后的优化结果
    savePosesToFile(result_filename, T_sc);
    
    // Step 4.3 执行轨迹一致性优化，优化变量仅为外参T_sc，代价函数为 T_s1_si * T_s_c = T_sc * T_c1_ci 
    TrajConsistencyOptimization(vCameraPoses, vSonarPoses, T_sc);
    // ~ 这里再保存一次轨迹一致性优化后的外参结果
    savePosesToFile(result_filename, T_sc);
    cout << "位姿已保存到 " << result_filename << " 文件。\n";    

    return 0;
}