#include "kdtree/kdtree.h"
#include "initializer.h"

#include "base/database.h"
#include "feature/extraction.h"
#include "feature/matching.h"
#include "util/misc.h"

#include <cstdlib>
#include <iostream>
#include <fstream>

#include <Eigen/Core>
#include <Eigen/Geometry>

#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl/io/ply_io.h>

#include <opencv2/opencv.hpp>
#include <opencv2/core/eigen.hpp>

#include <omp.h>

using namespace colmap;
using namespace std;
using namespace Eigen;
// using namespace cv;

double computeQuaternionAngle(const Eigen::Quaterniond& q1, const Eigen::Quaterniond& q2) 
{
    // Compute the relative rotation quaternion
    Eigen::Quaterniond delta_q = q1.conjugate() * q2;
    delta_q.normalize(); // Ensure it is a unit quaternion

    // Extract the angle from the quaternion
    double angle = 2 * std::acos(delta_q.w());
    if (angle > M_PI) {
        angle = 2 * M_PI - angle; // Convert to the smallest angle
    }
    return angle;
}


bool createDatabaseFromProject(const std::string& idx_path, const std::string& calib_path, const std::string& database_path)
{
    const double min_dist = 2.0;

    Database database(database_path);

    // Read idx file
    std::vector<Eigen::Matrix4d> vPoses;
    std::vector<std::string> vTimestamps;

    std::ifstream fin_idx(idx_path);
    if (!fin_idx.is_open())
    {
        std::cerr << "无法打开文件 " << idx_path << std::endl;
        return false;
    }

    std::string line;
    Vector3d last_t(0,0,0);
    while (std::getline(fin_idx, line))
    {
        if (line.length() < 30 || line[0] == '#')
            continue;

        long long int time;
        double qw, qx, qy, qz, tx, ty, tz;
        if (sscanf(line.c_str(), "%lld,%lf,%lf,%lf,%lf,%lf,%lf,%lf",
                   &time, &qw, &qx, &qy, &qz, &tx, &ty, &tz) != 8)
        {
            continue;
        }


        Eigen::Quaterniond q(qw, qx, qy, qz);
        Eigen::Vector3d t(tx, ty, tz);
        Eigen::Matrix4d Twc = Eigen::Matrix4d::Identity();

        /* downsample */
        double dist = (t-last_t).norm();
        if(dist<min_dist)continue;
        last_t = t;

        Twc.block<3, 3>(0, 0) = q.matrix();
        Twc.block<3, 1>(0, 3) = t;
        vPoses.push_back(Twc);
        vTimestamps.push_back(std::to_string(time));
    }
    fin_idx.close();

    // Read camera intrinsic parameters
    cv::FileStorage fs_calib(calib_path, cv::FileStorage::READ);
    if (!fs_calib.isOpened())
    {
        std::cerr << "无法打开文件 " << calib_path << std::endl;
        return false;
    }

    int camera_num;
    fs_calib["CameraNum"] >> camera_num;

    std::vector<cv::Mat> vKs(camera_num), vDs(camera_num), vTs(camera_num);
    std::vector<std::vector<int>> vSizes(camera_num);

    for (int i = 0; i < camera_num; ++i)
    {
        fs_calib["K" + std::to_string(i)] >> vKs[i];
        fs_calib["D" + std::to_string(i)] >> vDs[i];
        fs_calib["T" + std::to_string(i)] >> vTs[i];    // camera to lidar
        fs_calib["ImageSize" + std::to_string(i)] >> vSizes[i];


        Camera camera;
        camera.SetCameraId(i + 1);
        camera.SetModelIdFromName("OPENCV_FISHEYE");
        camera.SetWidth(vSizes[i][0]);
        camera.SetHeight(vSizes[i][1]);

        vector<double> params{vKs[i].at<double>(0, 0), vKs[i].at<double>(1, 1), 
                                vKs[i].at<double>(0, 2), vKs[i].at<double>(1, 2),
                                vDs[i].at<double>(0), vDs[i].at<double>(1), 
                                vDs[i].at<double>(2), vDs[i].at<double>(3),};
        camera.SetParams(params);

        if(database.ExistsCamera(camera.CameraId()))
            database.UpdateCamera(camera);
        else
            database.WriteCamera(camera, true);

    }
    fs_calib.release();


    // Write images
    image_t image_id = 1;
    for (size_t i = 0; i < vPoses.size(); ++i)
    {
        Eigen::Matrix4d Twl = vPoses[i];
        std::string time = vTimestamps[i];
        for (int j = 0; j < camera_num; ++j)
        {
            Eigen::Matrix4d Tlc;
            cv2eigen(vTs[j], Tlc);
            Eigen::Matrix4d Twc = Twl * Tlc;
            Eigen::Matrix4d Tcw = Twc.inverse();

            Eigen::Quaterniond q(Tcw.block<3, 3>(0, 0).matrix());
            Eigen::Vector3d t(Tcw.block<3, 1>(0, 3));

            Image image;
            image.SetImageId(image_id++);
            image.SetCameraId(j + 1);
            image.SetName(time + "_Cam_" + std::to_string(j) + ".jpg");
            image.SetRegistered(true);

            image.QvecPrior() = {q.w(), q.x(), q.y(), q.z()};
            image.TvecPrior() = {t(0), t(1), t(2)};

            if(database.ExistsImage(image.ImageId()))
                database.UpdateImage(image);
            else
                database.WriteImage(image, true);
        }
    }

    std::cout << "Write database complete." << std::endl;

    return true;
}

void extractFeatures(const std::string& image_dir, const std::string& database_path)
{
    const int thread_num = 15;

    colmap::SiftExtractionOptions sift_options;
    sift_options.use_gpu = false; // Use CPU
    sift_options.estimate_affine_shape = false;
    sift_options.max_image_size = 4000;
    sift_options.max_num_features = 10000;

    Database database(database_path);
    database.ClearKeypoints();
    database.ClearDescriptors();
    std::vector<Image> images = database.ReadAllImages();

#pragma omp parallel for schedule(dynamic) num_threads(thread_num)
    for(int i=0; i<images.size(); i++)
    {
        if(omp_get_thread_num()==1)
            cout<<(float)i*100.0/images.size()<<endl;

        Image& image = images[i];
        string image_path = image_dir+image.Name();
        // Load image
        Bitmap bitmap;
        if (!bitmap.Read(image_path, false))
        {
            std::cerr << "无法读取图像 " << image_path << std::endl;
            continue;
        }
        
        double scale, scale_inv;
        if (static_cast<int>(bitmap.Width()) > sift_options.max_image_size ||
            static_cast<int>(bitmap.Width()) > sift_options.max_image_size) 
        {
            // Fit the down-sampled version exactly into the max dimensions.
            scale = static_cast<double>(sift_options.max_image_size) /
                std::max(bitmap.Width(), bitmap.Height());
            scale_inv = 1.0/scale;  
            const int new_width =
                static_cast<int>(bitmap.Width() * scale);
            const int new_height =
                static_cast<int>(bitmap.Height() * scale);

            bitmap.Rescale(new_width, new_height);
        }

        FeatureKeypoints keypoints;
        FeatureDescriptors descriptors;

        if (!ExtractSiftFeaturesCPU(sift_options, bitmap, &keypoints, &descriptors))
        {
            std::cerr << "特征提取失败 " << image_path << std::endl;
            continue;
        }

        for (auto& keypoint : keypoints)
            keypoint.Rescale(scale_inv, scale_inv);

 #pragma omp critical
        {
            cout<<image.Name()<<" "<<image.ImageId()<<" "<<keypoints.size()<<" "<<descriptors.size()<<endl;

            database.WriteKeypoints(image.ImageId(), keypoints);
            database.WriteDescriptors(image.ImageId(), descriptors);
        }
    }

    cout<<"Extraction successful."<<endl;
}

void matchFeatures(const std::string& database_path, double radius, double theta) 
{
    // Load the database
    Database database(database_path);

    // Load image ids and corresponding camera poses
    std::vector<image_t> image_ids;
    std::vector<Eigen::Vector4d> qvecs;
    std::vector<Eigen::Vector3d> tvecs;

    for (const auto& image : database.ReadAllImages()) {
        image_ids.push_back(image.ImageId());

        // Get the quaternion and translation vector
        Eigen::Vector4d qvec = image.QvecPrior();
        Eigen::Vector3d tvec = image.TvecPrior();

        // Compute the inverse
        Eigen::Quaterniond q(qvec(0), qvec(1), qvec(2), qvec(3));
        Eigen::Quaterniond q_inv = q.inverse();
        Eigen::Vector4d qvec_inv(q_inv.w(), q_inv.x(), q_inv.y(), q_inv.z());
        Eigen::Vector3d tvec_inv = -(q_inv * tvec);

        // Store the inverted values
        qvecs.push_back(qvec_inv);
        tvecs.push_back(tvec_inv);
    }

    ofstream fout_camcent("cam_cent.txt");
    int rows = tvecs.size();
    int cols = 3;
    auto data_ptr = std::make_unique<float[]>(rows * cols);
    flann::Matrix<float> data(data_ptr.get(), rows, cols);
    for (size_t i = 0; i < tvecs.size(); ++i) {
        data[i][0] = tvecs[i][0];
        data[i][1] = tvecs[i][1];
        data[i][2] = tvecs[i][2];
        fout_camcent<<tvecs[i][0]<<" "<<tvecs[i][1]<<" "<<tvecs[i][2]<<endl;
    }

    const int tree_num = 1;
    flann::KDTreeIndexParams indexParams(tree_num);
    flann::Index<flann::L2<float>> flannIndex(data, indexParams);
    flannIndex.buildIndex();

    ofstream fout_candi("candi.txt");
    ofstream fout_query("query.txt");

    std::vector<std::pair<image_t, image_t>> candidate_pairs;
    for (size_t idx = 0; idx < tvecs.size(); ++idx) 
    {
        Vector3f tvec = tvecs[idx].cast<float>();
        float query[3]={tvec[0], tvec[1], tvec[2]};
        flann::Matrix<float> queryMat(&query[0], 1, cols);

        std::vector<std::vector<int>> indices;
        std::vector<std::vector<float>> dists;
        int num_neighbors = flannIndex.radiusSearch(queryMat, indices, dists, radius*radius, flann::SearchParams(-1));

        for (int j = 0; j < num_neighbors; ++j) 
        {
            int nn_idx = indices[0][j];

            // Calculate the angle between the quaternion rotations
            Eigen::Quaterniond q1(qvecs[idx]);
            Eigen::Quaterniond q2(qvecs[nn_idx]);
            double angle = computeQuaternionAngle(q1, q2);

            // Check if the angle is below the threshold
            if (angle < theta) {
                candidate_pairs.emplace_back(image_ids[idx], image_ids[nn_idx]);
                fout_candi << tvecs[nn_idx].transpose() << std::endl;
            }
        }
    }


    cout<<"candidate_pairs.size "<<candidate_pairs.size()<<endl;


    // Perform feature matching
    SiftMatchingOptions options;
    options.use_gpu = false;
    const size_t cache_size = 5 * 50;  // Adjust the cache size as needed
    FeatureMatcherCache cache(cache_size, &database);
    SiftFeatureMatcher matcher(options, &database, &cache);
    printf("%s: %d\n", __FUNCTION__, __LINE__);
    cache.Setup();
    matcher.Setup();
    printf("%s: %d\n", __FUNCTION__, __LINE__);
    matcher.Match(candidate_pairs);
    printf("%s: %d\n", __FUNCTION__, __LINE__);

}


struct Match {
    Eigen::Vector2d image1_point;
    Eigen::Vector2d image2_point;
    pcl::PointNormal point;
};


// 投影点到图像平面
bool ProjectPointToImage(const Camera& camera, const Eigen::Matrix3x4d& projection_matrix, 
                         const pcl::PointNormal& point, Eigen::Vector2d& image_point) {
    Eigen::Vector4d hom_point(point.x, point.y, point.z, 1.0);
    Eigen::Vector3d proj_point = projection_matrix * hom_point;
    if (proj_point.z() <= 0) return false;
    // fout<<proj_point.transpose()<<endl;
    Eigen::Vector2d normalized_point = proj_point.hnormalized();
    // image_point = camera.ImageToWorld(camera.WorldToImage(normalized_point));
    image_point = camera.WorldToImage(normalized_point);
    return true;
}

// 判断一个点是否在图像范围内
bool IsPointInImage(const Eigen::Vector2d& image_point, const Camera& camera) {
    return (image_point.x() >= 0 && image_point.x() < camera.Width() &&
            image_point.y() >= 0 && image_point.y() < camera.Height());
}

// 判断一个点是否在图像范围内，并检查它是否在给定半径范围内
bool IsPointInRadius(const Eigen::Vector2d& point, const Eigen::Vector2d& target, double radius) {
    return (point - target).norm() <= radius;
}

void matchFeaturesPointcloud(const std::string& database_path, const std::string& points_path, 
    image_t image_id1, image_t image_id2, float radius, 
    FeatureMatches& matches, std::vector<Eigen::Vector3d>& points3d) 
{
    string image_dir("/shared_folder/data/colmap-pcd/backpack/2024-07-09_14-23-22/camera/");

    pcl::PointCloud<pcl::PointNormal>::Ptr points(new pcl::PointCloud<pcl::PointNormal>);
    pcl::io::loadPLYFile<pcl::PointNormal>(points_path, *points);
    std::cout << "load " << points->size() << " pts." << std::endl;

    // 打开数据库
    Database database(database_path);

    auto image1 = database.ReadImage(image_id1);
    auto image2 = database.ReadImage(image_id2);
    

    const Camera& camera1 = database.ReadCamera(image1.CameraId());
    const Camera& camera2 = database.ReadCamera(image2.CameraId());
    

    image1.Qvec() = image1.QvecPrior();
    image1.Tvec() = image1.TvecPrior();
    image2.Qvec() = image2.QvecPrior();
    image2.Tvec() = image2.TvecPrior();
    Eigen::Matrix3x4d projection_matrix1 = image1.ProjectionMatrix();
    Eigen::Matrix3x4d projection_matrix2 = image2.ProjectionMatrix();

    TwoViewGeometry tvg = database.ReadTwoViewGeometry(image_id1, image_id2);
    std::cout << "image1: " << image_id1 << ", image2: " << image_id2 << ", matches: " << tvg.inlier_matches.size() << std::endl;

    FeatureKeypoints keypts1 = database.ReadKeypoints(image_id1);
    FeatureKeypoints keypts2 = database.ReadKeypoints(image_id2);
    

    std::vector<std::vector<float>> pt2ds_1, pt2ds_2;
    std::vector<int> pt2d_ids_1, pt2d_ids_2;
    for (const auto& match : tvg.inlier_matches) 
    {
        float u1 = keypts1[match.point2D_idx1].x;
        float v1 = keypts1[match.point2D_idx1].y;
        float u2 = keypts2[match.point2D_idx2].x;
        float v2 = keypts2[match.point2D_idx2].y;
        pt2ds_1.push_back(std::vector<float>{u1, v1});
        pt2ds_2.push_back(std::vector<float>{u2, v2});
        pt2d_ids_1.push_back(match.point2D_idx1);   // pt2d index in keypoints
        pt2d_ids_2.push_back(match.point2D_idx2);   // pt2d index in keypoints
    }
    

    KDTree tree1(pt2ds_1);
    KDTree tree2(pt2ds_2);

    std::string image1_path = image_dir + image1.Name();
    std::string image2_path = image_dir + image2.Name();
    cv::Mat image1_data = cv::imread(image1_path);
    cv::Mat image2_data = cv::imread(image2_path);
    

    for (int ipt = 0; ipt < pt2ds_1.size(); ipt++)
    {
        cv::circle(image1_data, cv::Point(pt2ds_1[ipt][0], pt2ds_1[ipt][1]), 8, cv::Scalar(0, 255, 0), -1);
        cv::circle(image2_data, cv::Point(pt2ds_2[ipt][0], pt2ds_2[ipt][1]), 8, cv::Scalar(0, 255, 0), -1);
    }

    std::vector<float> min_dist1(pt2ds_1.size(), std::numeric_limits<float>::max());
    std::vector<float> min_dist2(pt2ds_2.size(), std::numeric_limits<float>::max());
    std::vector<Eigen::Vector3d> pt3ds(pt2ds_1.size());

    Eigen::Vector3d camera_center1 = -image1.RotationMatrix().transpose() * image1.Tvec();
    Eigen::Vector3d camera_center2 = -image2.RotationMatrix().transpose() * image2.Tvec();
    

    for (const auto& point : points->points) 
    {

        Eigen::Vector2d proj_point1, proj_point2;
        if (ProjectPointToImage(camera1, projection_matrix1, point, proj_point1) 
            && ProjectPointToImage(camera2, projection_matrix2, point, proj_point2))
        {
            if(proj_point1.x()<0 || proj_point1.x()>= camera1.Width() 
            || proj_point1.y()<0 || proj_point1.y()>= camera1.Height())continue;

            if(proj_point2.x()<0 || proj_point2.x()>= camera2.Width() 
            || proj_point2.y()<0 || proj_point2.y()>= camera2.Height())continue;

            int id1, id2;
            std::vector<float> query1{proj_point1.cast<float>()(0), proj_point1.cast<float>()(1)};
            std::vector<float> nearest1 = tree1.findNearestPoint(query1, id1);
            id2 = id1;

            std::vector<float> query2{proj_point2.cast<float>()(0), proj_point2.cast<float>()(1)};
            std::vector<float> nearest2 = pt2ds_2[id2];

            float distance_to_camera1 = (camera_center1 - Eigen::Vector3d(point.x, point.y, point.z)).norm();
            float distance_to_camera2 = (camera_center2 - Eigen::Vector3d(point.x, point.y, point.z)).norm();

            float dist1 = euclideanDistance(query1, nearest1);
            float dist2 = euclideanDistance(query2, nearest2);
            
            if (dist1 < radius && dist2 < radius) 
            {
                if (distance_to_camera1 < min_dist1[id1]) 
                {
                    min_dist1[id1] = distance_to_camera1;
                    pt3ds[id1] = Eigen::Vector3d(point.x, point.y, point.z);
                }

                if (distance_to_camera2 < min_dist2[id2]) 
                {
                    min_dist2[id2] = distance_to_camera2;
                    pt3ds[id2] = Eigen::Vector3d(point.x, point.y, point.z);
                }
            }
        }
    }
    

    std::ofstream fout("points3D.txt");
    for (int ipt = 0; ipt < min_dist1.size(); ipt++)
    {
        if (min_dist1[ipt] < std::numeric_limits<float>::max()) 
        {
            pcl::PointNormal point;
            point.x = pt3ds[ipt].x(), point.y = pt3ds[ipt].y(), point.z = pt3ds[ipt].z();

            Eigen::Vector2d proj_point1, proj_point2;
            ProjectPointToImage(camera1, projection_matrix1, point, proj_point1);
            ProjectPointToImage(camera2, projection_matrix2, point, proj_point2);

            cv::circle(image1_data, cv::Point(proj_point1.x(), proj_point1.y()), 8, cv::Scalar(0, 0, 255), -1);
            cv::circle(image2_data, cv::Point(proj_point2.x(), proj_point2.y()), 8, cv::Scalar(0, 0, 255), -1);

            matches.emplace_back(pt2d_ids_1[ipt], pt2d_ids_2[ipt]);
            points3d.push_back(pt3ds[ipt]); // Use the closest point based on camera distance
            fout << pt3ds[ipt].x() << " " << pt3ds[ipt].y() << " " << pt3ds[ipt].z() << std::endl;
        }
    }

    cv::imwrite("img1.jpg", image1_data);
    cv::imwrite("img2.jpg", image2_data);

    std::cout << "matches: " << matches.size() << std::endl;
}

float euclideanDistance(const std::vector<float>& point1, const std::vector<float>& point2) 
{
    if (point1.size() != point2.size()) {
        throw std::invalid_argument("Vectors must be of the same length");
    }

    float sum = 0.0;
    for (std::size_t i = 0; i < point1.size(); ++i) {
        float diff = point1[i] - point2[i];
        sum += diff * diff;
    }

    return std::sqrt(sum);
}


void saveToIdx(colmap::Reconstruction& model, const std::string& idx_path)
{
    std::vector<image_t> image_ids = model.RegImageIds();
    std::sort(image_ids.begin(), image_ids.end());

    std::ofstream fout_idx(idx_path);
    fout_idx<<"# timestamp(us),qw,qx,qy,qz,X,Y,Z"<<endl;
    for(int iid=0; iid<image_ids.size(); iid++)
    {
        Image& image = model.Image(image_ids[iid]);
        // cout<<image.ImageId()<<" "<<image.Name()<<endl;
        Eigen::Quaterniond q(image.Qvec(0), image.Qvec(1), image.Qvec(2), image.Qvec(3));
        Eigen::Vector3d t(image.Tvec(0), image.Tvec(1), image.Tvec(2));

        Eigen::Quaterniond q_inv = q.inverse();
        Eigen::Vector3d t_inv = -q_inv.toRotationMatrix()*t;

        // # timestamp(us),qw,qx,qy,qz,X,Y,Z
        fout_idx<<image.Name()<<","
                <<q_inv.w()<<","<<q_inv.x()<<","<<q_inv.y()<<","<<q_inv.z()<<","
                <<t_inv.x()<<","<<t_inv.y()<<","<<t_inv.z()<<endl;
    }

    std::ofstream fout_points3d("points3d.txt");
    auto points3d = model.Points3D();
    for(auto point3d : points3d)
    {
        fout_points3d<<point3d.second.XYZ().transpose()<<" "<<point3d.second.Color().cast<int>().transpose() <<endl;
    }

}