#include <iostream>
#include <fstream>
#include <sstream>
#include <vector>
#include <string>
#include <iomanip>
#include <chrono>
#include <opencv2/opencv.hpp>
#include <Eigen/Dense>
#include <Eigen/Geometry>
#include <opengv/relative_pose/methods.hpp>
#include <opengv/relative_pose/CentralRelativeAdapter.hpp>
#include <gtest/gtest.h>
#include "poseEstimate.hpp" 

using namespace std;
using namespace cv;
using namespace Eigen;
using Eigen::Quaterniond;

class PoseEstimationTestFixture : public ::testing::Test {
protected:
    string imageFolderPath = "/home/lee/dataset/V101/mav0/cam0/data";
    string csvPath = "/home/lee/dataset/V101/mav0/cam0/data.csv";
    string outputFilePath = "/home/lee/dataset/V101/mav0/cam0/poseEstimate.txt";
    string groundTruthPath = "/home/lee/dataset/V101/mav0/state_groundtruth_estimate0/V1_01_easy.txt";
    int nfeatures = 300;

    vector<String> files;
    ifstream timefile;
    ofstream outfile;
    PoseEstimator estimator;
    
    virtual void SetUp() override {
        glob(imageFolderPath, files, false);
        ASSERT_GE(files.size(), 2) << "Not enough images in the folder";
        sort(files.begin(), files.end());
        
        timefile.open(csvPath);
        ASSERT_TRUE(timefile.is_open()) << "Failed to open CSV file: " << csvPath;
        
        outfile.open(outputFilePath);
        ASSERT_TRUE(outfile.is_open()) << "Failed to open output file: " << outputFilePath;

        int successful_pairs = 0;
        string line, timestamp;
        getline(timefile, line);
    }
    
    virtual void TearDown() override {
        outfile.close();
        timefile.close();
        
        estimator.getRMSE(groundTruthPath, outputFilePath);
        
        std::ofstream clearFile(outputFilePath, std::ofstream::out | std::ofstream::trunc);
        clearFile.close();
    }
};

// ---------------------------- 本质矩阵

TEST_F(PoseEstimationTestFixture, BasicPoseEstimation_cv_LMEDS) {
    auto start_time = std::chrono::high_resolution_clock::now();

    int successful_pairs = 0;
    for (size_t i = 0; i < files.size() - 1; ++i) {
        Mat img_1 = imread(files[i], IMREAD_COLOR);
        Mat img_2 = imread(files[i + 1], IMREAD_COLOR);

        vector<KeyPoint> keypoints_1, keypoints_2;
        vector<DMatch> matches;
        estimator.find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches, nfeatures);
        Mat R_cv, t_cv;
        estimator.pose_CV_essential(keypoints_1, keypoints_2, matches, R_cv, t_cv, cv::LMEDS);
        Eigen::Matrix3d R;
        estimator.convertCvMatToEigen(R_cv, R);
        Eigen::Quaterniond q(R);
        estimator.savePose(timefile, outfile, t_cv, q);
        successful_pairs++;
    }

    auto end_time = std::chrono::high_resolution_clock::now();
    auto total_time = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time).count();
    double average_time = successful_pairs > 0 ? static_cast<double>(total_time) / successful_pairs : 0;
    std::cout << "LMEDS时间消耗：" << total_time  << " 毫秒" << std::endl;
    cout << "LMEDS平均每两帧计算位姿的时间：" << average_time  << " 毫秒" << endl;
}

TEST_F(PoseEstimationTestFixture, BasicPoseEstimation_cv_RANSAC) {
    auto start_time = std::chrono::high_resolution_clock::now();

    int successful_pairs = 0;
    for (size_t i = 0; i < files.size() - 1; ++i) {
        Mat img_1 = imread(files[i], IMREAD_COLOR);
        Mat img_2 = imread(files[i + 1], IMREAD_COLOR);

        vector<KeyPoint> keypoints_1, keypoints_2;
        vector<DMatch> matches;
        estimator.find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches, nfeatures);
        Mat R_cv, t_cv;
        estimator.pose_CV_essential(keypoints_1, keypoints_2, matches, R_cv, t_cv, cv::RANSAC);
        Eigen::Matrix3d R;
        estimator.convertCvMatToEigen(R_cv, R);
        Eigen::Quaterniond q(R);
        estimator.savePose(timefile, outfile, t_cv, q);
        successful_pairs++;
    }

    auto end_time = std::chrono::high_resolution_clock::now();
    auto total_time = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time).count();
    double average_time = successful_pairs > 0 ? static_cast<double>(total_time) / successful_pairs : 0;
    std::cout << "RANSAC时间消耗：" << total_time  << " 毫秒" << std::endl;
    cout << "RANSAC平均每两帧计算位姿的时间：" << average_time  << " 毫秒" << endl;
}

TEST_F(PoseEstimationTestFixture, BasicPoseEstimation_cv_DEFAULT) {
    auto start_time = std::chrono::high_resolution_clock::now();
    
    int successful_pairs = 0;
    for (size_t i = 0; i < files.size() - 1; ++i) {
        Mat img_1 = imread(files[i], IMREAD_COLOR);
        Mat img_2 = imread(files[i + 1], IMREAD_COLOR);

        vector<KeyPoint> keypoints_1, keypoints_2;
        vector<DMatch> matches;
        estimator.find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches, nfeatures);
        Mat R_cv, t_cv;
        estimator.pose_CV_essential(keypoints_1, keypoints_2, matches, R_cv, t_cv, cv::USAC_DEFAULT);
        Eigen::Matrix3d R;
        estimator.convertCvMatToEigen(R_cv, R);
        Eigen::Quaterniond q(R);
        estimator.savePose(timefile, outfile, t_cv, q);
        successful_pairs++;
    }

    auto end_time = std::chrono::high_resolution_clock::now();
    auto total_time = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time).count();
    double average_time = successful_pairs > 0 ? static_cast<double>(total_time) / successful_pairs : 0;
    std::cout << "USAC_DEFAULT时间消耗：" << total_time  << " 毫秒" << std::endl;
    cout << "USAC_DEFAULT平均每两帧计算位姿的时间：" << average_time  << " 毫秒" << endl;
}

TEST_F(PoseEstimationTestFixture, BasicPoseEstimation_cv_ACCURATE) {
    auto start_time = std::chrono::high_resolution_clock::now();

    int successful_pairs = 0;
    for (size_t i = 0; i < files.size() - 1; ++i) {
        Mat img_1 = imread(files[i], IMREAD_COLOR);
        Mat img_2 = imread(files[i + 1], IMREAD_COLOR);

        vector<KeyPoint> keypoints_1, keypoints_2;
        vector<DMatch> matches;
        estimator.find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches, nfeatures);
        Mat R_cv, t_cv;
        estimator.pose_CV_essential(keypoints_1, keypoints_2, matches, R_cv, t_cv, cv::USAC_ACCURATE);
        Eigen::Matrix3d R;
        estimator.convertCvMatToEigen(R_cv, R);
        Eigen::Quaterniond q(R);
        estimator.savePose(timefile, outfile, t_cv, q);
        successful_pairs++;
    }

    auto end_time = std::chrono::high_resolution_clock::now();
    auto total_time = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time).count();
    double average_time = successful_pairs > 0 ? static_cast<double>(total_time) / successful_pairs : 0;
    std::cout << "USAC_ACCURATE时间消耗：" << total_time  << " 毫秒" << std::endl;
    cout << "USAC_ACCURATE平均每两帧计算位姿的时间：" << average_time  << " 毫秒" << endl;
}

TEST_F(PoseEstimationTestFixture, BasicPoseEstimation_cv_PROSAC) {
    auto start_time = std::chrono::high_resolution_clock::now();

    int successful_pairs = 0;
    for (size_t i = 0; i < files.size() - 1; ++i) {
        Mat img_1 = imread(files[i], IMREAD_COLOR);
        Mat img_2 = imread(files[i + 1], IMREAD_COLOR);

        vector<KeyPoint> keypoints_1, keypoints_2;
        vector<DMatch> matches;
        estimator.find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches, nfeatures);
        Mat R_cv, t_cv;
        estimator.pose_CV_essential(keypoints_1, keypoints_2, matches, R_cv, t_cv, cv::USAC_PROSAC);
        Eigen::Matrix3d R;
        estimator.convertCvMatToEigen(R_cv, R);
        Eigen::Quaterniond q(R);
        estimator.savePose(timefile, outfile, t_cv, q);
        successful_pairs++;
    }

    auto end_time = std::chrono::high_resolution_clock::now();
    auto total_time = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time).count();
    double average_time = successful_pairs > 0 ? static_cast<double>(total_time) / successful_pairs : 0;
    std::cout << "USAC_PROSAC时间消耗：" << total_time  << " 毫秒" << std::endl;
    cout << "USAC_PROSAC平均每两帧计算位姿的时间：" << average_time  << " 毫秒" << endl;
}

TEST_F(PoseEstimationTestFixture, BasicPoseEstimation_cv_FAST) {
    auto start_time = std::chrono::high_resolution_clock::now();

    int successful_pairs = 0;
    for (size_t i = 0; i < files.size() - 1; ++i) {
        Mat img_1 = imread(files[i], IMREAD_COLOR);
        Mat img_2 = imread(files[i + 1], IMREAD_COLOR);

        vector<KeyPoint> keypoints_1, keypoints_2;
        vector<DMatch> matches;
        estimator.find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches, nfeatures);
        Mat R_cv, t_cv;
        estimator.pose_CV_essential(keypoints_1, keypoints_2, matches, R_cv, t_cv, cv::USAC_FAST);
        Eigen::Matrix3d R;
        estimator.convertCvMatToEigen(R_cv, R);
        Eigen::Quaterniond q(R);
        estimator.savePose(timefile, outfile, t_cv, q);
        successful_pairs++;
    }

    auto end_time = std::chrono::high_resolution_clock::now();
    auto total_time = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time).count();
    double average_time = successful_pairs > 0 ? static_cast<double>(total_time) / successful_pairs : 0;
    std::cout << "USAC_FAST时间消耗：" << total_time  << " 毫秒" << std::endl;
    cout << "USAC_FAST平均每两帧计算位姿的时间：" << average_time  << " 毫秒" << endl;
}

TEST_F(PoseEstimationTestFixture, BasicPoseEstimation_cv_MAGSAC) {
    auto start_time = std::chrono::high_resolution_clock::now();
    
    int successful_pairs = 0;
    for (size_t i = 0; i < files.size() - 1; ++i) {
        Mat img_1 = imread(files[i], IMREAD_COLOR);
        Mat img_2 = imread(files[i + 1], IMREAD_COLOR);

        vector<KeyPoint> keypoints_1, keypoints_2;
        vector<DMatch> matches;
        estimator.find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches, nfeatures);
        Mat R_cv, t_cv;
        estimator.pose_CV_essential(keypoints_1, keypoints_2, matches, R_cv, t_cv, cv::USAC_MAGSAC);
        Eigen::Matrix3d R;
        estimator.convertCvMatToEigen(R_cv, R);
        Eigen::Quaterniond q(R);
        estimator.savePose(timefile, outfile, t_cv, q);
        successful_pairs++;
    }
    auto end_time = std::chrono::high_resolution_clock::now();
    auto total_time = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time).count();
    double average_time = successful_pairs > 0 ? static_cast<double>(total_time) / successful_pairs : 0;
    std::cout << "USAC_MAGSAC时间消耗：" << total_time  << " 毫秒" << std::endl;
    cout << "USAC_MAGSAC平均每两帧计算位姿的时间：" << average_time  << " 毫秒" << endl;
}

// ----------------------------- 基础矩阵

TEST_F(PoseEstimationTestFixture, BasicPoseEstimation_cv_7POINT) {
    auto start_time = std::chrono::high_resolution_clock::now();

    int successful_pairs = 0;
    for (size_t i = 0; i < files.size() - 1; ++i) {
        Mat img_1 = imread(files[i], IMREAD_COLOR);
        Mat img_2 = imread(files[i + 1], IMREAD_COLOR);

        vector<KeyPoint> keypoints_1, keypoints_2;
        vector<DMatch> matches;
        estimator.find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches, nfeatures);
        Mat R_cv, t_cv;
        estimator.pose_CV_essential(keypoints_1, keypoints_2, matches, R_cv, t_cv, cv::FM_7POINT);
        Eigen::Matrix3d R;
        estimator.convertCvMatToEigen(R_cv, R);
        Eigen::Quaterniond q(R);
        estimator.savePose(timefile, outfile, t_cv, q);
        successful_pairs++;
    }

    auto end_time = std::chrono::high_resolution_clock::now();
    auto total_time = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time).count();
    double average_time = successful_pairs > 0 ? static_cast<double>(total_time) / successful_pairs : 0;
    std::cout << "FM_7POINT时间消耗：" << total_time  << " 毫秒" << std::endl;
    cout << "FM_7POINT平均每两帧计算位姿的时间：" << average_time  << " 毫秒" << endl;
}

TEST_F(PoseEstimationTestFixture, BasicPoseEstimation_cv_8POINT) {
    auto start_time = std::chrono::high_resolution_clock::now();

    int successful_pairs = 0;
    for (size_t i = 0; i < files.size() - 1; ++i) {
        Mat img_1 = imread(files[i], IMREAD_COLOR);
        Mat img_2 = imread(files[i + 1], IMREAD_COLOR);

        vector<KeyPoint> keypoints_1, keypoints_2;
        vector<DMatch> matches;
        estimator.find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches, nfeatures);
        Mat R_cv, t_cv;
        estimator.pose_CV_essential(keypoints_1, keypoints_2, matches, R_cv, t_cv, cv::FM_8POINT);
        Eigen::Matrix3d R;
        estimator.convertCvMatToEigen(R_cv, R);
        Eigen::Quaterniond q(R);
        estimator.savePose(timefile, outfile, t_cv, q);
        successful_pairs++;
    }

    auto end_time = std::chrono::high_resolution_clock::now();
    auto total_time = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time).count();
    double average_time = successful_pairs > 0 ? static_cast<double>(total_time) / successful_pairs : 0;
    std::cout << "FM_8POINT时间消耗：" << total_time  << " 毫秒" << std::endl;
    cout << "FM_8POINT平均每两帧计算位姿的时间：" << average_time  << " 毫秒" << endl;
}

TEST_F(PoseEstimationTestFixture, BasicPoseEstimation_cv_FMRANSAC) {
    auto start_time = std::chrono::high_resolution_clock::now();

    int successful_pairs = 0;
    for (size_t i = 0; i < files.size() - 1; ++i) {
        Mat img_1 = imread(files[i], IMREAD_COLOR);
        Mat img_2 = imread(files[i + 1], IMREAD_COLOR);

        vector<KeyPoint> keypoints_1, keypoints_2;
        vector<DMatch> matches;
        estimator.find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches, nfeatures);
        Mat R_cv, t_cv;
        estimator.pose_CV_essential(keypoints_1, keypoints_2, matches, R_cv, t_cv, cv::FM_RANSAC);
        Eigen::Matrix3d R;
        estimator.convertCvMatToEigen(R_cv, R);
        Eigen::Quaterniond q(R);
        estimator.savePose(timefile, outfile, t_cv, q);
        successful_pairs++;
    }

    auto end_time = std::chrono::high_resolution_clock::now();
    auto total_time = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time).count();
    double average_time = successful_pairs > 0 ? static_cast<double>(total_time) / successful_pairs : 0;
    std::cout << "FM_RANSAC时间消耗：" << total_time  << " 毫秒" << std::endl;
    cout << "FM_RANSAC平均每两帧计算位姿的时间：" << average_time  << " 毫秒" << endl;
}

TEST_F(PoseEstimationTestFixture, BasicPoseEstimation_cv_FMLMEDS) {
    auto start_time = std::chrono::high_resolution_clock::now();

    int successful_pairs = 0;
    for (size_t i = 0; i < files.size() - 1; ++i) {
        Mat img_1 = imread(files[i], IMREAD_COLOR);
        Mat img_2 = imread(files[i + 1], IMREAD_COLOR);

        vector<KeyPoint> keypoints_1, keypoints_2;
        vector<DMatch> matches;
        estimator.find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches, nfeatures);
        Mat R_cv, t_cv;
        estimator.pose_CV_essential(keypoints_1, keypoints_2, matches, R_cv, t_cv, cv::FM_LMEDS);
        Eigen::Matrix3d R;
        estimator.convertCvMatToEigen(R_cv, R);
        Eigen::Quaterniond q(R);
        estimator.savePose(timefile, outfile, t_cv, q);
        successful_pairs++;
    }

    auto end_time = std::chrono::high_resolution_clock::now();
    auto total_time = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time).count();
    double average_time = successful_pairs > 0 ? static_cast<double>(total_time) / successful_pairs : 0;
    std::cout << "FM_LMEDS时间消耗：" << total_time  << " 毫秒" << std::endl;
    cout << "FM_LMEDS平均每两帧计算位姿的时间：" << average_time  << " 毫秒" << endl;
}

TEST_F(PoseEstimationTestFixture, BasicPoseEstimation_cv_USAC_DEFAULT) {
    auto start_time = std::chrono::high_resolution_clock::now();

    int successful_pairs = 0;
    for (size_t i = 0; i < files.size() - 1; ++i) {
        Mat img_1 = imread(files[i], IMREAD_COLOR);
        Mat img_2 = imread(files[i + 1], IMREAD_COLOR);

        vector<KeyPoint> keypoints_1, keypoints_2;
        vector<DMatch> matches;
        estimator.find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches, nfeatures);
        Mat R_cv, t_cv;
        estimator.pose_CV_essential(keypoints_1, keypoints_2, matches, R_cv, t_cv, cv::USAC_DEFAULT);
        Eigen::Matrix3d R;
        estimator.convertCvMatToEigen(R_cv, R);
        Eigen::Quaterniond q(R);
        estimator.savePose(timefile, outfile, t_cv, q);
        successful_pairs++;
    }

    auto end_time = std::chrono::high_resolution_clock::now();
    auto total_time = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time).count();
    double average_time = successful_pairs > 0 ? static_cast<double>(total_time) / successful_pairs : 0;
    std::cout << "USAC_DEFAULT时间消耗：" << total_time  << " 毫秒" << std::endl;
    cout << "USAC_DEFAULT平均每两帧计算位姿的时间：" << average_time  << " 毫秒" << endl;
}

TEST_F(PoseEstimationTestFixture, BasicPoseEstimation_cv_USAC_FM_8PTS) {
    auto start_time = std::chrono::high_resolution_clock::now();

    int successful_pairs = 0;
    for (size_t i = 0; i < files.size() - 1; ++i) {
        Mat img_1 = imread(files[i], IMREAD_COLOR);
        Mat img_2 = imread(files[i + 1], IMREAD_COLOR);

        vector<KeyPoint> keypoints_1, keypoints_2;
        vector<DMatch> matches;
        estimator.find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches, nfeatures);
        Mat R_cv, t_cv;
        estimator.pose_CV_essential(keypoints_1, keypoints_2, matches, R_cv, t_cv, cv::USAC_FM_8PTS);
        Eigen::Matrix3d R;
        estimator.convertCvMatToEigen(R_cv, R);
        Eigen::Quaterniond q(R);
        estimator.savePose(timefile, outfile, t_cv, q);
        successful_pairs++;
    }

    auto end_time = std::chrono::high_resolution_clock::now();
    auto total_time = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time).count();
    double average_time = successful_pairs > 0 ? static_cast<double>(total_time) / successful_pairs : 0;
    std::cout << "USAC_FM_8PTS时间消耗：" << total_time  << " 毫秒" << std::endl;
    cout << "USAC_FM_8PTS平均每两帧计算位姿的时间：" << average_time  << " 毫秒" << endl;
}

TEST_F(PoseEstimationTestFixture, BasicPoseEstimation_cv_USAC_ACCURATE) {
    auto start_time = std::chrono::high_resolution_clock::now();

    int successful_pairs = 0;
    for (size_t i = 0; i < files.size() - 1; ++i) {
        Mat img_1 = imread(files[i], IMREAD_COLOR);
        Mat img_2 = imread(files[i + 1], IMREAD_COLOR);

        vector<KeyPoint> keypoints_1, keypoints_2;
        vector<DMatch> matches;
        estimator.find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches, nfeatures);
        Mat R_cv, t_cv;
        estimator.pose_CV_essential(keypoints_1, keypoints_2, matches, R_cv, t_cv, cv::USAC_ACCURATE);
        Eigen::Matrix3d R;
        estimator.convertCvMatToEigen(R_cv, R);
        Eigen::Quaterniond q(R);
        estimator.savePose(timefile, outfile, t_cv, q);
        successful_pairs++;
    }

    auto end_time = std::chrono::high_resolution_clock::now();
    auto total_time = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time).count();
    double average_time = successful_pairs > 0 ? static_cast<double>(total_time) / successful_pairs : 0;
    std::cout << "USAC_ACCURATE时间消耗：" << total_time  << " 毫秒" << std::endl;
    cout << "USAC_ACCURATE平均每两帧计算位姿的时间：" << average_time  << " 毫秒" << endl;
}

TEST_F(PoseEstimationTestFixture, BasicPoseEstimation_cv_USAC_PROSAC) {
    auto start_time = std::chrono::high_resolution_clock::now();

    int successful_pairs = 0;
    for (size_t i = 0; i < files.size() - 1; ++i) {
        Mat img_1 = imread(files[i], IMREAD_COLOR);
        Mat img_2 = imread(files[i + 1], IMREAD_COLOR);

        vector<KeyPoint> keypoints_1, keypoints_2;
        vector<DMatch> matches;
        estimator.find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches, nfeatures);
        Mat R_cv, t_cv;
        estimator.pose_CV_essential(keypoints_1, keypoints_2, matches, R_cv, t_cv, cv::USAC_PROSAC);
        Eigen::Matrix3d R;
        estimator.convertCvMatToEigen(R_cv, R);
        Eigen::Quaterniond q(R);
        estimator.savePose(timefile, outfile, t_cv, q);
        successful_pairs++;
    }

    auto end_time = std::chrono::high_resolution_clock::now();
    auto total_time = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time).count();
    double average_time = successful_pairs > 0 ? static_cast<double>(total_time) / successful_pairs : 0;
    std::cout << "USAC_PROSAC时间消耗：" << total_time  << " 毫秒" << std::endl;
    cout << "USAC_PROSAC平均每两帧计算位姿的时间：" << average_time  << " 毫秒" << endl;
}

TEST_F(PoseEstimationTestFixture, BasicPoseEstimation_cv_USAC_FAST) {
    auto start_time = std::chrono::high_resolution_clock::now();

    int successful_pairs = 0;
    for (size_t i = 0; i < files.size() - 1; ++i) {
        Mat img_1 = imread(files[i], IMREAD_COLOR);
        Mat img_2 = imread(files[i + 1], IMREAD_COLOR);

        vector<KeyPoint> keypoints_1, keypoints_2;
        vector<DMatch> matches;
        estimator.find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches, nfeatures);
        Mat R_cv, t_cv;
        estimator.pose_CV_essential(keypoints_1, keypoints_2, matches, R_cv, t_cv, cv::USAC_FAST);
        Eigen::Matrix3d R;
        estimator.convertCvMatToEigen(R_cv, R);
        Eigen::Quaterniond q(R);
        estimator.savePose(timefile, outfile, t_cv, q);
        successful_pairs++;
    }

    auto end_time = std::chrono::high_resolution_clock::now();
    auto total_time = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time).count();
    double average_time = successful_pairs > 0 ? static_cast<double>(total_time) / successful_pairs : 0;
    std::cout << "USAC_FAST时间消耗：" << total_time  << " 毫秒" << std::endl;
    cout << "USAC_FAST平均每两帧计算位姿的时间：" << average_time  << " 毫秒" << endl;
}

TEST_F(PoseEstimationTestFixture, BasicPoseEstimation_cv_USAC_MAGSAC) {
    auto start_time = std::chrono::high_resolution_clock::now();

    int successful_pairs = 0;
    for (size_t i = 0; i < files.size() - 1; ++i) {
        Mat img_1 = imread(files[i], IMREAD_COLOR);
        Mat img_2 = imread(files[i + 1], IMREAD_COLOR);

        vector<KeyPoint> keypoints_1, keypoints_2;
        vector<DMatch> matches;
        estimator.find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches, nfeatures);
        Mat R_cv, t_cv;
        estimator.pose_CV_essential(keypoints_1, keypoints_2, matches, R_cv, t_cv, cv::USAC_MAGSAC);
        Eigen::Matrix3d R;
        estimator.convertCvMatToEigen(R_cv, R);
        Eigen::Quaterniond q(R);
        estimator.savePose(timefile, outfile, t_cv, q);
        successful_pairs++;
    }

    auto end_time = std::chrono::high_resolution_clock::now();
    auto total_time = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time).count();
    double average_time = successful_pairs > 0 ? static_cast<double>(total_time) / successful_pairs : 0;
    std::cout << "USAC_MAGSAC时间消耗：" << total_time  << " 毫秒" << std::endl;
    cout << "USAC_MAGSAC平均每两帧计算位姿的时间：" << average_time  << " 毫秒" << endl;
}

// ----------------------------- openGV

TEST_F(PoseEstimationTestFixture, BasicPoseEstimation_gv_eightpt) {
    auto start_time = std::chrono::high_resolution_clock::now();

    int successful_pairs = 0;
    for (size_t i = 0; i < files.size() - 1; ++i) {
        Mat img_1 = imread(files[i], IMREAD_COLOR);
        Mat img_2 = imread(files[i + 1], IMREAD_COLOR);

        vector<KeyPoint> keypoints_1, keypoints_2;
        vector<DMatch> matches;
        estimator.find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches, nfeatures);
        auto [R, t] = estimator.pose_GV_eightpt(keypoints_1, keypoints_2, matches);
        Quaterniond q(R);
        cv::Mat t_cv;
        estimator.convertEigenToCvMat(t, t_cv);
        estimator.savePose(timefile, outfile, t_cv, q);
        successful_pairs++;
    }

    auto end_time = std::chrono::high_resolution_clock::now();
    auto total_time = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time).count();
    double average_time = successful_pairs > 0 ? static_cast<double>(total_time) / successful_pairs : 0;
    std::cout << "eightpt时间消耗：" << total_time  << " 毫秒" << std::endl;
    cout << "eightpt平均每两帧计算位姿的时间：" << average_time  << " 毫秒" << endl;
}

TEST_F(PoseEstimationTestFixture, BasicPoseEstimation_gv_eigensolver) {
    auto start_time = std::chrono::high_resolution_clock::now();

    int successful_pairs = 0;
    for (size_t i = 0; i < files.size() - 1; ++i) {
        Mat img_1 = imread(files[i], IMREAD_COLOR);
        Mat img_2 = imread(files[i + 1], IMREAD_COLOR);

        vector<KeyPoint> keypoints_1, keypoints_2;
        vector<DMatch> matches;
        estimator.find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches, nfeatures);
        auto [R, t] = estimator.pose_GV_eigensolver(keypoints_1, keypoints_2, matches);
        Quaterniond q(R);
        cv::Mat t_cv;
        estimator.convertEigenToCvMat(t, t_cv);
        estimator.savePose(timefile, outfile, t_cv, q);
        successful_pairs++;
    }

    auto end_time = std::chrono::high_resolution_clock::now();
    auto total_time = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time).count();
    double average_time = successful_pairs > 0 ? static_cast<double>(total_time) / successful_pairs : 0;
    std::cout << "eigensolver时间消耗：" << total_time  << " 毫秒" << std::endl;
    cout << "eigensolver平均每两帧计算位姿的时间：" << average_time  << " 毫秒" << endl;
}
