#include <iostream>
#include <string>
#include <vector>

#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "opencv2/xfeatures2d.hpp"

#include "DBoW3/DBoW3.h"

#include "data_io.h"
#include "icp.h"
#include "type.h"
#include "util.h"
#include "visual_odometry.h"

using namespace cv;
using namespace cv::xfeatures2d;
using namespace std;

/***************************************************
 * 本节演示了如何根据data/目录下的十张图训练字典
 * ************************************************/

// int main(int argc, char** argv)
// {
//   vector<pcl::PointCloud<pcl::PointXYZRGB>::Ptr> pcs;
//   struct imageType image_data;
//   readConfig();
//   readData(pcs, image_data);
//   image_data.init();

//   // create vocabulary
//   cout << "creating vocabulary ... " << endl;
//   DBoW3::Vocabulary vocab;
//   vocab.create(image_data.descriptors);
//   cout << "vocabulary info: " << vocab << endl;
//   vocab.save("vocab_sift.yml.gz");
//   cout << "done" << endl;

//   return 0;
// }

int main(int argc, char** argv)
{
  // read the images and database
  cout << "reading database" << endl;
  DBoW3::Vocabulary vocab("./vocab_sift.yml.gz");
  // DBoW3::Vocabulary vocab("./vocabulary.yml.gz");
  // DBoW3::Vocabulary vocab("./vocab_larger.yml.gz");  // use large vocab if you want:
  if (vocab.empty())
  {
    cerr << "Vocabulary does not exist." << endl;
    return 1;
  }
  cout << "reading images... " << endl;
  vector<pcl::PointCloud<pcl::PointXYZRGB>::Ptr> pcs;
  struct imageType image_data;
  readConfig();
  readData(pcs, image_data);
  image_data.init();

  // NOTE: in this case we are comparing images with a vocabulary generated by themselves, this may lead to overfit.
  cout << "detecting features ... " << endl;
  // Ptr<Feature2D> detector = ORB::create();
  Ptr<SIFT> detector = SIFT::create(1000);
  vector<Mat> descriptors = image_data.descriptors;
  // for (Mat& image : image_data.imgs)
  // {
  //   vector<KeyPoint> keypoints;
  //   Mat descriptor;
  //   detector->detectAndCompute(image, Mat(), keypoints, descriptor);
  //   descriptors.push_back(descriptor);
  // }

  // we can compare the images directly or we can compare one image to a database
  // images :
  cout << "comparing images with images " << endl;
  for (int i = 0; i < image_data.imgs.size(); i++)
  {
    DBoW3::BowVector v1;
    vocab.transform(descriptors[i], v1);
    for (int j = i; j < image_data.imgs.size(); j++)
    {
      DBoW3::BowVector v2;
      vocab.transform(descriptors[j], v2);
      double score = vocab.score(v1, v2);
      cout << "image " << i << " vs image " << j << " : " << score << endl;
    }
    cout << endl;
  }

  // or with database
  cout << "comparing images with database " << endl;
  DBoW3::Database db(vocab, false, 0);
  for (int i = 0; i < descriptors.size(); i++)
    db.add(descriptors[i]);
  cout << "database info: " << db << endl;
  for (int i = 0; i < descriptors.size(); i++)
  {
    DBoW3::QueryResults ret;
    db.query(descriptors[i], ret, 10);  // max result=4
    cout << "searching for image " << i << " returns " << ret << endl;
    const float ratio_test = 0.8;
    const int frame_distance_threshold = 2;
    if ((ret[1].Score * ratio_test > ret[2].Score) && (i - int(ret[1].Id) > frame_distance_threshold))
    {
      cout << "Found loop!" << endl;
      cout << "vetex 1 :" << i << ", vetex 2:" << int(ret[1].Id) << endl;
    }
    cout << endl;
  }
  cout << "done." << endl;
}

// int main(int argv, char **argc)
// {
//   vector<pcl::PointCloud<pcl::PointXYZRGB>::Ptr> pcs;
//   struct imageType image_data;
//   readConfig();
//   readData(pcs, image_data);
//   image_data.init();

//   vector<vector<int>> loops;
//   loop_closure(image_data, loops);
//   ofstream outfile("./result.txt");

//   for (int i = 0; i < loops.size(); i++)
//   {
//     vector<Mat> imgs, depths;
//     vector<pcl::PointCloud<pcl::PointXYZRGB>::Ptr> pcs_two;

//     pcs_two.push_back(pcs[loops[i][0]]);
//     pcs_two.push_back(pcs[loops[i][1]]);

//     imgs.push_back(image_data.imgs[loops[i][0]]);
//     imgs.push_back(image_data.imgs[loops[i][1]]);
//     depths.push_back(image_data.depths[loops[i][0]]);
//     depths.push_back(image_data.depths[loops[i][1]]);

//     struct imageType image_data_two;
//     image_data_two.imgs = imgs;
//     image_data_two.depths = depths;
//     image_data_two.init();
//     vector<Matrix4f> T_init_two = calVisualOdometry(image_data_two);
//     vector<Matrix4f> T_result_two;
//     ndtRegistration(pcs_two, T_init_two, T_result_two);

//     Matrix3f rotation_matrix = T_result_two[0].topLeftCorner(3, 3).cast<double>();
//     Quaterniond q(rotation_matrix);
//     outfile << "EDGE: " << loops[i][0] << " " << loops[i][1] << " " << T_result_two[0].topRightCorner(3,
//     1).transpose()
//             << " " << q.x() << " " << q.y() << " " << q.z() << " " << q.w() << endl;
//     ;
//   }
// }
