/************************************************************
 *
 * Copyright (c) 2021, University of California, Los Angeles
 *
 * Authors: Kenny J. Chen, Brett T. Lopez
 * Contact: kennyjchen@ucla.edu, btlopez@ucla.edu
 *
 ***********************************************************/

#include "lci_slam/vilens/lidar/lidar_processor.h"

namespace vilens {

std::atomic<bool> lidarProcessor::abort_(false);

/**
 * Constructor
 **/

lidarProcessor::lidarProcessor(const config::Ptr& cfg) {
    this->getParams(cfg);

    this->stop_publish_thread = false;
    this->stop_publish_keyframe_thread = false;
    this->stop_metrics_thread = false;
    this->stop_debug_thread = false;

    this->dlo_initialized = false;
    this->imu_calibrated = false;
    this->normals_initialized = false;

    // this->icp_sub = this->nh.subscribe("pointcloud", 1,
    // &lidarProcessor::icpCB, this); this->imu_sub = this->nh.subscribe("imu",
    // 1, &lidarProcessor::imuCB, this);

    this->odom.pose.pose.position.x = 0.;
    this->odom.pose.pose.position.y = 0.;
    this->odom.pose.pose.position.z = 0.;
    this->odom.pose.pose.orientation.w = 1.;
    this->odom.pose.pose.orientation.x = 0.;
    this->odom.pose.pose.orientation.y = 0.;
    this->odom.pose.pose.orientation.z = 0.;
    this->odom.pose.covariance = {0.};

    this->T = Eigen::Matrix4f::Identity();
    this->T_s2s = Eigen::Matrix4f::Identity();
    this->T_s2s_prev = Eigen::Matrix4f::Identity();

    this->pose_s2s = Eigen::Vector3d(0., 0., 0.);
    this->rotq_s2s = Eigen::Quaterniond(1., 0., 0., 0.);

    this->pose = Eigen::Vector3d(0., 0., 0.);
    this->rotq = Eigen::Quaterniond(1., 0., 0., 0.);

    this->imu_SE3 = Eigen::Matrix4d::Identity();

    this->imu_bias.gyro.x = 0.;
    this->imu_bias.gyro.y = 0.;
    this->imu_bias.gyro.z = 0.;
    this->imu_bias.accel.x = 0.;
    this->imu_bias.accel.y = 0.;
    this->imu_bias.accel.z = 0.;

    this->imu_meas.stamp = 0.;
    this->imu_meas.ang_vel.x = 0.;
    this->imu_meas.ang_vel.y = 0.;
    this->imu_meas.ang_vel.z = 0.;
    this->imu_meas.lin_accel.x = 0.;
    this->imu_meas.lin_accel.y = 0.;
    this->imu_meas.lin_accel.z = 0.;

    this->imu_buffer.set_capacity(this->imu_buffer_size_);
    this->first_imu_time = 0.;

    this->original_scan =
        pcl::PointCloud<PointType>::Ptr(new pcl::PointCloud<PointType>);
    this->current_scan =
        pcl::PointCloud<PointType>::Ptr(new pcl::PointCloud<PointType>);
    this->current_scan_t =
        pcl::PointCloud<PointType>::Ptr(new pcl::PointCloud<PointType>);

    this->keyframe_cloud =
        pcl::PointCloud<PointType>::Ptr(new pcl::PointCloud<PointType>);
    this->keyframes_cloud =
        pcl::PointCloud<PointType>::Ptr(new pcl::PointCloud<PointType>);
    this->keyframes_map =
        pcl::PointCloud<PointType>::Ptr(new pcl::PointCloud<PointType>);
    this->num_keyframes = 0;

    this->submap_cloud =
        pcl::PointCloud<PointType>::Ptr(new pcl::PointCloud<PointType>);
    this->submap_hasChanged = true;
    this->submap_kf_idx_prev.clear();

    this->source_cloud = nullptr;
    this->target_cloud = nullptr;

    this->convex_hull.setDimension(3);
    this->concave_hull.setDimension(3);
    this->concave_hull.setAlpha(this->keyframe_thresh_dist_);
    this->concave_hull.setKeepInformation(true);

    this->gicp_s2s.setCorrespondenceRandomness(
        this->gicps2s_k_correspondences_);
    this->gicp_s2s.setMaxCorrespondenceDistance(this->gicps2s_max_corr_dist_);
    this->gicp_s2s.setMaximumIterations(this->gicps2s_max_iter_);
    this->gicp_s2s.setTransformationEpsilon(this->gicps2s_transformation_ep_);
    this->gicp_s2s.setEuclideanFitnessEpsilon(
        this->gicps2s_euclidean_fitness_ep_);
    this->gicp_s2s.setRANSACIterations(this->gicps2s_ransac_iter_);
    this->gicp_s2s.setRANSACOutlierRejectionThreshold(
        this->gicps2s_ransac_inlier_thresh_);

    this->gicp.setCorrespondenceRandomness(this->gicps2m_k_correspondences_);
    this->gicp.setMaxCorrespondenceDistance(this->gicps2m_max_corr_dist_);
    this->gicp.setMaximumIterations(this->gicps2m_max_iter_);
    this->gicp.setTransformationEpsilon(this->gicps2m_transformation_ep_);
    this->gicp.setEuclideanFitnessEpsilon(this->gicps2m_euclidean_fitness_ep_);
    this->gicp.setRANSACIterations(this->gicps2m_ransac_iter_);
    this->gicp.setRANSACOutlierRejectionThreshold(
        this->gicps2m_ransac_inlier_thresh_);

    pcl::Registration<PointType, PointType>::KdTreeReciprocalPtr temp;
    this->gicp_s2s.setSearchMethodSource(temp, true);
    this->gicp_s2s.setSearchMethodTarget(temp, true);
    this->gicp.setSearchMethodSource(temp, true);
    this->gicp.setSearchMethodTarget(temp, true);

    this->crop.setNegative(true);
    this->crop.setMin(Eigen::Vector4f(-this->crop_size_, -this->crop_size_,
                                      -this->crop_size_, 1.0));
    this->crop.setMax(Eigen::Vector4f(this->crop_size_, this->crop_size_,
                                      this->crop_size_, 1.0));

    this->vf_scan.setLeafSize(this->vf_scan_res_, this->vf_scan_res_,
                              this->vf_scan_res_);
    this->vf_submap.setLeafSize(this->vf_submap_res_, this->vf_submap_res_,
                                this->vf_submap_res_);
    this->vf_map.setLeafSize(this->vf_map_res_, this->vf_map_res_,
                             this->vf_map_res_);

    this->metrics.spaciousness.push_back(0.);

    // CPU Specs
    char CPUBrandString[0x40];
    unsigned int CPUInfo[4] = {0, 0, 0, 0};
    __cpuid(0x80000000, CPUInfo[0], CPUInfo[1], CPUInfo[2], CPUInfo[3]);
    unsigned int nExIds = CPUInfo[0];
    memset(CPUBrandString, 0, sizeof(CPUBrandString));
    for (unsigned int i = 0x80000000; i <= nExIds; ++i) {
        __cpuid(i, CPUInfo[0], CPUInfo[1], CPUInfo[2], CPUInfo[3]);
        if (i == 0x80000002)
            memcpy(CPUBrandString, CPUInfo, sizeof(CPUInfo));
        else if (i == 0x80000003)
            memcpy(CPUBrandString + 16, CPUInfo, sizeof(CPUInfo));
        else if (i == 0x80000004)
            memcpy(CPUBrandString + 32, CPUInfo, sizeof(CPUInfo));
    }

    this->cpu_type = CPUBrandString;
    boost::trim(this->cpu_type);

    FILE* file;
    struct tms timeSample;
    char line[128];

    this->lastCPU = times(&timeSample);
    this->lastSysCPU = timeSample.tms_stime;
    this->lastUserCPU = timeSample.tms_utime;

    file = fopen("/proc/cpuinfo", "r");
    this->numProcessors = 0;
    while (fgets(line, 128, file) != NULL) {
        if (strncmp(line, "processor", 9) == 0)
            this->numProcessors++;
    }
    fclose(file);

    ROS_INFO("DLO Odom Node Initialized");
}

/**
 * Destructor
 **/

lidarProcessor::~lidarProcessor() {}

void lidarProcessor::registerRosVisualizer(ros::NodeHandle& nh) {
    this->odom_pub = nh.advertise<nav_msgs::Odometry>("odom", 1);
    this->kf_pub = nh.advertise<nav_msgs::Odometry>("kfs", 1, true);
    this->keyframe_pub =
        nh.advertise<sensor_msgs::PointCloud2>("keyframe", 1, true);
}

/**
 * Odom Node Parameters
 **/

void lidarProcessor::getParams(const config::Ptr& cfg) {
    // Frames
    this->odom_frame = cfg->lidar.odom_frame;
    this->child_frame = cfg->lidar.child_frame;

    // Keyframe Threshold
    this->keyframe_thresh_dist_ = cfg->lidar.keyframe_thresh_D;
    this->keyframe_thresh_rot_ = cfg->lidar.keyframe_thresh_R;

    // Submap
    this->submap_knn_ = cfg->lidar.submap_keyframe_knn;
    this->submap_kcv_ = cfg->lidar.submap_keyframe_kcv;
    this->submap_kcc_ = cfg->lidar.submap_keyframe_kcc;

    // Crop Box Filter
    this->crop_use_ = cfg->lidar.preprocessing_cropboxfilter_use;
    this->crop_size_ = cfg->lidar.preprocessing_cropboxfilter_size;

    // Voxel Grid Filter
    this->vf_scan_use_ = cfg->lidar.preprocessing_voxelfilter_scan_use;
    this->vf_scan_res_ = cfg->lidar.preprocessing_voxelfilter_scan_res;
    this->vf_submap_use_ = cfg->lidar.preprocessing_voxelfilter_submap_use;
    this->vf_submap_res_ = cfg->lidar.preprocessing_voxelfilter_submap_res;
    this->vf_map_res_ = cfg->lidar.preprocessing_voxelfilter_map_res;

    // Adaptive Parameters
    this->adaptive_params_use_ = cfg->lidar.adaptive_params;

    // GICP
    this->gicp_min_num_points_ = cfg->lidar.gicp_minnumpoints;
    this->gicps2s_k_correspondences_ = cfg->lidar.gicp_s2s_kcorrespondences;
    this->gicps2s_max_corr_dist_ =
        cfg->lidar.gicp_s2s_maxcorrespondencedistance;
    this->gicps2s_max_iter_ = cfg->lidar.gicp_s2s_maxiterations;
    this->gicps2s_transformation_ep_ =
        cfg->lidar.gicp_s2s_transformationepsilon;
    this->gicps2s_euclidean_fitness_ep_ =
        cfg->lidar.gicp_s2s_euclideanfitnessepsilon;
    this->gicps2s_ransac_iter_ = cfg->lidar.gicp_s2s_ransac_iterations;
    this->gicps2s_ransac_inlier_thresh_ =
        cfg->lidar.gicp_s2s_ransac_outlierRejectionThresh;
    this->gicps2m_k_correspondences_ = cfg->lidar.gicp_s2m_kcorrespondences;
    this->gicps2m_max_corr_dist_ =
        cfg->lidar.gicp_s2m_maxcorrespondencedistance;
    this->gicps2m_max_iter_ = cfg->lidar.gicp_s2m_maxiterations;
    this->gicps2m_transformation_ep_ =
        cfg->lidar.gicp_s2m_transformationepsilon;
    this->gicps2m_euclidean_fitness_ep_ =
        cfg->lidar.gicp_s2m_euclideanfitnessepsilon;
    this->gicps2m_ransac_iter_ = cfg->lidar.gicp_s2m_ransac_iterations;
    this->gicps2m_ransac_inlier_thresh_ =
        cfg->lidar.gicp_s2m_ransac_outlierRejectionThresh;

    this->relPose = cfg->lidar.relPose;

    // this->publish_keyframe_thread =
    //     std::thread(&lidarProcessor::publishKeyframes, this);
}

/**
 * Start Odom Thread
 **/

void lidarProcessor::start() {
    ROS_INFO("Starting DLO Odometry Node");

    printf("\033[2J\033[1;1H");
    std::cout << std::endl
              << "==== Direct LiDAR Odometry v" << this->version_
              << " ====" << std::endl
              << std::endl;
}

/**
 * Stop Odom Thread
 **/

void lidarProcessor::stop() {
    ROS_WARN("Stopping DLO Odometry Node");

    this->stop_publish_thread = true;
    if (this->publish_thread.joinable()) {
        this->publish_thread.join();
    }

    this->stop_publish_keyframe_thread = true;
    if (this->publish_keyframe_thread.joinable()) {
        this->publish_keyframe_thread.join();
    }

    this->stop_metrics_thread = true;
    if (this->metrics_thread.joinable()) {
        this->metrics_thread.join();
    }

    this->stop_debug_thread = true;
    if (this->debug_thread.joinable()) {
        this->debug_thread.join();
    }

    ros::shutdown();
}

/**
 * Abort Timer Callback
 **/

// void lidarProcessor::abortTimerCB(const ros::TimerEvent& e) {
//   if (abort_) {
//     stop();
//   }
// }

/**
 * Publish to ROS
 **/

void lidarProcessor::publishToROS() {
    this->publishPose();
    this->publishTransform();
}

/**
 * Publish Pose
 **/

void lidarProcessor::publishPose() {
    // Sign flip check
    static Eigen::Quaterniond q_diff{1., 0., 0., 0.};
    static Eigen::Quaterniond q_last{1., 0., 0., 0.};

    q_diff = q_last.conjugate() * this->rotq;

    // If q_diff has negative real part then there was a sign flip
    if (q_diff.w() < 0) {
        this->rotq.w() = -this->rotq.w();
        this->rotq.vec() = -this->rotq.vec();
    }

    q_last = this->rotq;

    this->odom.pose.pose.position.x = this->pose[0];
    this->odom.pose.pose.position.y = this->pose[1];
    this->odom.pose.pose.position.z = this->pose[2];

    this->odom.pose.pose.orientation.w = this->rotq.w();
    this->odom.pose.pose.orientation.x = this->rotq.x();
    this->odom.pose.pose.orientation.y = this->rotq.y();
    this->odom.pose.pose.orientation.z = this->rotq.z();

    this->odom.header.stamp = this->scan_stamp;
    this->odom.header.frame_id = this->odom_frame;
    this->odom.child_frame_id = this->child_frame;
    this->odom_pub.publish(this->odom);
}

/**
 * Publish Transform
 **/

void lidarProcessor::publishTransform() {
    static tf2_ros::TransformBroadcaster br;
    geometry_msgs::TransformStamped transformStamped;

    transformStamped.header.stamp = this->scan_stamp;
    transformStamped.header.frame_id = this->odom_frame;
    transformStamped.child_frame_id = this->child_frame;

    transformStamped.transform.translation.x = this->pose[0];
    transformStamped.transform.translation.y = this->pose[1];
    transformStamped.transform.translation.z = this->pose[2];

    transformStamped.transform.rotation.w = this->rotq.w();
    transformStamped.transform.rotation.x = this->rotq.x();
    transformStamped.transform.rotation.y = this->rotq.y();
    transformStamped.transform.rotation.z = this->rotq.z();

    br.sendTransform(transformStamped);
}

/**
 * Publish Keyframe Pose and Scan
 **/

void lidarProcessor::publishKeyframe() {
    // Publish keyframe pose
    this->kf.header.stamp = this->scan_stamp;
    this->kf.header.frame_id = this->odom_frame;
    this->kf.child_frame_id = this->child_frame;

    this->kf.pose.pose.position.x = this->pose[0];
    this->kf.pose.pose.position.y = this->pose[1];
    this->kf.pose.pose.position.z = this->pose[2];

    this->kf.pose.pose.orientation.w = this->rotq.w();
    this->kf.pose.pose.orientation.x = this->rotq.x();
    this->kf.pose.pose.orientation.y = this->rotq.y();
    this->kf.pose.pose.orientation.z = this->rotq.z();

    this->kf_pub.publish(this->kf);

    // Publish keyframe scan
    if (this->keyframe_cloud->points.size() ==
        this->keyframe_cloud->width * this->keyframe_cloud->height) {
        sensor_msgs::PointCloud2 keyframe_cloud_ros;
        pcl::toROSMsg(*this->keyframe_cloud, keyframe_cloud_ros);
        keyframe_cloud_ros.header.stamp = this->scan_stamp;
        keyframe_cloud_ros.header.frame_id = this->odom_frame;
        this->keyframe_pub.publish(keyframe_cloud_ros);
    }
}

/**
 * Publish Keyframes Pose and Scan
 **/

void lidarProcessor::publishKeyframes() {
    pcl::PointCloud<PointType>::Ptr keyframe_cloud_temp(
        new pcl::PointCloud<PointType>());
    nav_msgs::Odometry kf_temp;

    while (1) {
        {
            mtx_keyframe.lock();
            *keyframe_cloud_temp = *(this->keyframe_cloud);
            kf_temp = this->kf;
            mtx_keyframe.unlock();
        }

        *this->keyframes_map += *keyframe_cloud_temp;
        this->vf_map.setInputCloud(this->keyframes_map);
        this->vf_map.filter(*this->keyframes_map);
        this->kf_pub.publish(kf_temp);

        // Publish keyframes
        if (this->keyframes_map->points.size() ==
            this->keyframes_map->width * this->keyframes_map->height) {
            sensor_msgs::PointCloud2 keyframe_cloud_ros;
            pcl::toROSMsg(*this->keyframes_map, keyframe_cloud_ros);
            keyframe_cloud_ros.header.stamp = this->scan_stamp;
            keyframe_cloud_ros.header.frame_id = this->odom_frame;
            this->keyframe_pub.publish(keyframe_cloud_ros);
        }

        usleep(1e6 * 0.05);
    }
}

/**
 * Preprocessing
 **/

void lidarProcessor::preprocessPoints() {
    // Original Scan
    *this->original_scan = *this->current_scan;

    // Remove NaNs
    std::vector<int> idx;
    this->current_scan->is_dense = false;
    pcl::removeNaNFromPointCloud(*this->current_scan, *this->current_scan, idx);

    // Crop Box Filter
    if (this->crop_use_) {
        this->crop.setInputCloud(this->current_scan);
        this->crop.filter(*this->current_scan);
    }

    // Voxel Grid Filter
    if (this->vf_scan_use_) {
        this->vf_scan.setInputCloud(this->current_scan);
        this->vf_scan.filter(*this->current_scan);
    }
}

/**
 * Initialize Input Target
 **/

void lidarProcessor::initializeInputTarget() {
    this->prev_frame_stamp = this->curr_frame_stamp;

    // Convert ros message
    this->target_cloud =
        pcl::PointCloud<PointType>::Ptr(new pcl::PointCloud<PointType>);
    this->target_cloud = this->current_scan;
    this->gicp_s2s.setInputTarget(this->target_cloud);

    // initialize keyframes
    pcl::PointCloud<PointType>::Ptr first_keyframe(
        new pcl::PointCloud<PointType>);
    pcl::transformPointCloud(*this->target_cloud, *first_keyframe, this->T);

    // voxelization for submap
    if (this->vf_submap_use_) {
        this->vf_submap.setInputCloud(first_keyframe);
        this->vf_submap.filter(*first_keyframe);
    }

    // keep history of keyframes
    this->keyframes.push_back(
        std::make_pair(std::make_pair(this->pose, this->rotq), first_keyframe));

    *this->keyframes_cloud += *first_keyframe;

    {
        // mtx_keyframe.lock();
        *this->keyframe_cloud = *first_keyframe;
        // *this->keyframes_map += *first_keyframe;

        this->kf.header.stamp = this->scan_stamp;
        this->kf.header.frame_id = this->odom_frame;
        this->kf.child_frame_id = this->child_frame;

        this->kf.pose.pose.position.x = this->pose[0];
        this->kf.pose.pose.position.y = this->pose[1];
        this->kf.pose.pose.position.z = this->pose[2];

        this->kf.pose.pose.orientation.w = this->rotq.w();
        this->kf.pose.pose.orientation.x = this->rotq.x();
        this->kf.pose.pose.orientation.y = this->rotq.y();
        this->kf.pose.pose.orientation.z = this->rotq.z();

        // mtx_keyframe.unlock();
    }

    ++this->num_keyframes;
}

/**
 * Set Input Sources
 **/

void lidarProcessor::setInputSources() {
    // set the input source for the S2S gicp
    // this builds the KdTree of the source cloud
    // this does not build the KdTree for s2m because force_no_update is true
    this->gicp_s2s.setInputSource(this->current_scan);

    // set pcl::Registration input source for S2M gicp using custom NanoGICP
    // function
    this->gicp.registerInputSource(this->current_scan);

    // now set the KdTree of S2M gicp using previously built KdTree
    this->gicp.source_kdtree_ = this->gicp_s2s.source_kdtree_;
    this->gicp.source_covs_.clear();
}

/**
 * Gravity Alignment
 **/

void lidarProcessor::gravityAlign() {
    // get average acceleration vector for 1 second and normalize
    Eigen::Vector3d lin_accel = Eigen::Vector3d::Zero();
    const double then = ros::Time::now().toSec();
    int n = 0;
    while ((ros::Time::now().toSec() - then) < 1.) {
        lin_accel[0] += this->imu_meas.lin_accel.x;
        lin_accel[1] += this->imu_meas.lin_accel.y;
        lin_accel[2] += this->imu_meas.lin_accel.z;
        ++n;
    }
    lin_accel[0] /= n;
    lin_accel[1] /= n;
    lin_accel[2] /= n;

    // normalize
    double lin_norm = sqrt(pow(lin_accel[0], 2) + pow(lin_accel[1], 2) +
                           pow(lin_accel[2], 2));
    lin_accel[0] /= lin_norm;
    lin_accel[1] /= lin_norm;
    lin_accel[2] /= lin_norm;

    // define gravity vector (assume point downwards)
    Eigen::Vector3d grav;
    grav << 0, 0, 1;

    // calculate angle between the two vectors
    Eigen::Quaterniond grav_q =
        Eigen::Quaterniond::FromTwoVectors(lin_accel, grav);

    // normalize
    double grav_norm = sqrt(grav_q.w() * grav_q.w() + grav_q.x() * grav_q.x() +
                            grav_q.y() * grav_q.y() + grav_q.z() * grav_q.z());
    grav_q.w() /= grav_norm;
    grav_q.x() /= grav_norm;
    grav_q.y() /= grav_norm;
    grav_q.z() /= grav_norm;

    // set gravity aligned orientation
    this->rotq = grav_q;
    this->T.block(0, 0, 3, 3) = this->rotq.toRotationMatrix().cast<float>();
    this->T_s2s.block(0, 0, 3, 3) = this->rotq.toRotationMatrix().cast<float>();
    this->T_s2s_prev.block(0, 0, 3, 3) =
        this->rotq.toRotationMatrix().cast<float>();

    // rpy
    auto euler = grav_q.toRotationMatrix().eulerAngles(2, 1, 0);
    double yaw = euler[0] * (180.0 / M_PI);
    double pitch = euler[1] * (180.0 / M_PI);
    double roll = euler[2] * (180.0 / M_PI);

    std::cout << "done" << std::endl;
    std::cout << "  Roll [deg]: " << roll << std::endl;
    std::cout << "  Pitch [deg]: " << pitch << std::endl << std::endl;
}

/**
 * Initialize 6DOF
 **/

void lidarProcessor::initializeDLO() {
    // Calibrate IMU
    if (!this->imu_calibrated && this->imu_use_) {
        return;
    }

    // Gravity Align
    if (this->gravity_align_ && this->imu_use_ && this->imu_calibrated) {
        std::cout << "Aligning to gravity... ";
        std::cout.flush();
        this->gravityAlign();
    }

    this->dlo_initialized = true;
    std::cout << "DLO initialized! Starting localization..." << std::endl;
}

void lidarProcessor::process(const double& timeStamp,
                             pcl::PointCloud<PointType>::Ptr& rawPoints) {
    double then = ros::Time::now().toSec();
    this->scan_stamp = ros::Time().fromSec(timeStamp);
    this->curr_frame_stamp = timeStamp;

    // If there are too few points in the pointcloud, try again
    this->current_scan = rawPoints;
    if (this->current_scan->points.size() < this->gicp_min_num_points_) {
        LOG(WARNING) << "Low number of points!";
        return;
    }

    // DLO Initialization procedures (IMU calib, gravity align)
    if (!this->dlo_initialized) {
        this->dlo_initialized = true;
        std::cout << "DLO initialized! Starting localization..." << std::endl;
        return;
    }

    // 去除运动畸变
    this->

    // Preprocess points
    this->preprocessPoints();

    // Compute Metrics
    this->metrics_thread = std::thread(&lidarProcessor::computeMetrics, this);
    this->metrics_thread.detach();

    // Set Adaptive Parameters
    if (this->adaptive_params_use_) {
        this->setAdaptiveParams();
    }

    // Set initial frame as target
    if (this->target_cloud == nullptr) {
        this->initializeInputTarget();
        return;
    }

    // Set source frame
    this->source_cloud =
        pcl::PointCloud<PointType>::Ptr(new pcl::PointCloud<PointType>);
    this->source_cloud = this->current_scan;

    // Set new frame as input source for both gicp objects
    this->setInputSources();

    // Get the next pose via IMU + S2S + S2M
    this->getNextPose();

    // Update current keyframe poses and map
    this->updateKeyframes();

    // Update trajectory
    this->trajectory.push_back(std::make_pair(this->pose, this->rotq));

    // Update next time stamp
    this->prev_frame_stamp = this->curr_frame_stamp;

    // Update some statistics
    this->comp_times.push_back(ros::Time::now().toSec() - then);

    // Publish stuff to ROS
    this->publish_thread = std::thread(&lidarProcessor::publishToROS, this);
    this->publish_thread.detach();

    // Debug statements and publish custom DLO message
    this->debug_thread = std::thread(&lidarProcessor::debug, this);
    this->debug_thread.detach();
}

// /**
//  * IMU Callback
//  **/

// void lidarProcessor::imuCB(const sensor_msgs::Imu::ConstPtr& imu) {

//   if (!this->imu_use_) {
//     return;
//   }

//   double ang_vel[3], lin_accel[3];

//   // Get IMU samples
//   ang_vel[0] = imu->angular_velocity.x;
//   ang_vel[1] = imu->angular_velocity.y;
//   ang_vel[2] = imu->angular_velocity.z;

//   lin_accel[0] = imu->linear_acceleration.x;
//   lin_accel[1] = imu->linear_acceleration.y;
//   lin_accel[2] = imu->linear_acceleration.z;

//   if (this->first_imu_time == 0.) {
//     this->first_imu_time = imu->header.stamp.toSec();
//   }

//   // IMU calibration procedure - do for three seconds
//   if (!this->imu_calibrated) {

//     static int num_samples = 0;
//     static bool print = true;

//     if ((imu->header.stamp.toSec() - this->first_imu_time) <
//     this->imu_calib_time_) {

//       num_samples++;

//       this->imu_bias.gyro.x += ang_vel[0];
//       this->imu_bias.gyro.y += ang_vel[1];
//       this->imu_bias.gyro.z += ang_vel[2];

//       this->imu_bias.accel.x += lin_accel[0];
//       this->imu_bias.accel.y += lin_accel[1];
//       this->imu_bias.accel.z += lin_accel[2];

//       if(print) {
//         std::cout << "Calibrating IMU for " << this->imu_calib_time_ << "
//         seconds... "; std::cout.flush(); print = false;
//       }

//     } else {

//       this->imu_bias.gyro.x /= num_samples;
//       this->imu_bias.gyro.y /= num_samples;
//       this->imu_bias.gyro.z /= num_samples;

//       this->imu_bias.accel.x /= num_samples;
//       this->imu_bias.accel.y /= num_samples;
//       this->imu_bias.accel.z /= num_samples;

//       this->imu_calibrated = true;

//       std::cout << "done" << std::endl;
//       std::cout << "  Gyro biases [xyz]: " << this->imu_bias.gyro.x << ", "
//       << this->imu_bias.gyro.y << ", " << this->imu_bias.gyro.z << std::endl
//       << std::endl;

//     }

//   } else {

//     // Apply the calibrated bias to the new IMU measurements
//     this->imu_meas.stamp = imu->header.stamp.toSec();

//     this->imu_meas.ang_vel.x = ang_vel[0] - this->imu_bias.gyro.x;
//     this->imu_meas.ang_vel.y = ang_vel[1] - this->imu_bias.gyro.y;
//     this->imu_meas.ang_vel.z = ang_vel[2] - this->imu_bias.gyro.z;

//     this->imu_meas.lin_accel.x = lin_accel[0];
//     this->imu_meas.lin_accel.y = lin_accel[1];
//     this->imu_meas.lin_accel.z = lin_accel[2];

//     // Store into circular buffer
//     this->mtx_imu.lock();
//     this->imu_buffer.push_front(this->imu_meas);
//     this->mtx_imu.unlock();

//   }

// }

/**
 * Get Next Pose
 **/

void lidarProcessor::getNextPose() {
    //
    // FRAME-TO-FRAME PROCEDURE
    //

    // Align using IMU prior if available
    pcl::PointCloud<PointType>::Ptr aligned(new pcl::PointCloud<PointType>);

    this->gicp_s2s.align(*aligned);

    // if first iteration, store target covariance into keyframe normals
    if (!this->normals_initialized) {
        this->keyframe_normals.push_back(this->gicp_s2s.getTargetCovariances());
        this->normals_initialized = true;
    }

    // Get the local S2S transform
    Eigen::Matrix4f T_S2S = this->gicp_s2s.getFinalTransformation();

    // Get the global S2S transform
    this->propagateS2S(T_S2S);

    // reuse covariances from s2s for s2m
    this->gicp.source_covs_ = this->gicp_s2s.source_covs_;

    // Swap source and target (which also swaps KdTrees internally) for next S2S
    this->gicp_s2s.swapSourceAndTarget();

    //
    // FRAME-TO-SUBMAP
    //

    // Get current global submap
    this->getSubmapKeyframes();

    if (this->submap_hasChanged) {
        // Set the current global submap as the target cloud
        this->gicp.setInputTarget(this->submap_cloud);

        // Set target cloud's normals as submap normals
        this->gicp.setTargetCovariances(this->submap_normals);
    }

    // Align with current submap with global S2S transformation as initial guess
    this->gicp.align(*aligned, this->T_s2s);

    // Get final transformation in global frame
    this->T = this->gicp.getFinalTransformation();

    // Set next target cloud as current source cloud
    *this->target_cloud = *this->source_cloud;
}

/**
 * Propagate S2S Alignment
 **/

void lidarProcessor::propagateS2S(Eigen::Matrix4f T) {
    this->T_s2s = this->T_s2s_prev * T;
    this->T_s2s_prev = this->T_s2s;

    this->pose_s2s << this->T_s2s(0, 3), this->T_s2s(1, 3), this->T_s2s(2, 3);
    this->rotSO3_s2s << this->T_s2s(0, 0), this->T_s2s(0, 1), this->T_s2s(0, 2),
        this->T_s2s(1, 0), this->T_s2s(1, 1), this->T_s2s(1, 2),
        this->T_s2s(2, 0), this->T_s2s(2, 1), this->T_s2s(2, 2);

    Eigen::Quaterniond q(this->rotSO3_s2s);

    // Normalize quaternion
    double norm =
        sqrt(q.w() * q.w() + q.x() * q.x() + q.y() * q.y() + q.z() * q.z());
    q.w() /= norm;
    q.x() /= norm;
    q.y() /= norm;
    q.z() /= norm;
    this->rotq_s2s = q;
}

/**
 * Propagate S2M Alignment
 **/

void lidarProcessor::propagateS2M() {
    this->pose << this->T(0, 3), this->T(1, 3), this->T(2, 3);
    this->rotSO3 << this->T(0, 0), this->T(0, 1), this->T(0, 2), this->T(1, 0),
        this->T(1, 1), this->T(1, 2), this->T(2, 0), this->T(2, 1),
        this->T(2, 2);

    Eigen::Quaterniond q(this->rotSO3);

    // Normalize quaternion
    double norm =
        sqrt(q.w() * q.w() + q.x() * q.x() + q.y() * q.y() + q.z() * q.z());
    q.w() /= norm;
    q.x() /= norm;
    q.y() /= norm;
    q.z() /= norm;
    this->rotq = q;
}

/**
 * Transform Current Scan
 **/

void lidarProcessor::transformCurrentScan() {
    this->current_scan_t =
        pcl::PointCloud<PointType>::Ptr(new pcl::PointCloud<PointType>);
    pcl::transformPointCloud(*this->current_scan, *this->current_scan_t,
                             this->T);
}

/**
 * Compute Metrics
 **/

void lidarProcessor::computeMetrics() { this->computeSpaciousness(); }

/**
 * Compute Spaciousness of Current Scan
 **/

void lidarProcessor::computeSpaciousness() {
    // compute range of points
    std::vector<float> ds;

    for (int i = 0; i <= this->current_scan->points.size(); i++) {
        float d = std::sqrt(pow(this->current_scan->points[i].x, 2) +
                            pow(this->current_scan->points[i].y, 2) +
                            pow(this->current_scan->points[i].z, 2));
        ds.push_back(d);
    }

    // median
    std::nth_element(ds.begin(), ds.begin() + ds.size() / 2, ds.end());
    float median_curr = ds[ds.size() / 2];
    static float median_prev = median_curr;
    float median_lpf = 0.95 * median_prev + 0.05 * median_curr;
    median_prev = median_lpf;

    // push
    this->metrics.spaciousness.push_back(median_lpf);
}

/**
 * Convex Hull of Keyframes
 **/

void lidarProcessor::computeConvexHull() {
    // at least 4 keyframes for convex hull
    if (this->num_keyframes < 4) {
        return;
    }

    // create a pointcloud with points at keyframes
    pcl::PointCloud<PointType>::Ptr cloud =
        pcl::PointCloud<PointType>::Ptr(new pcl::PointCloud<PointType>);

    for (const auto& k : this->keyframes) {
        PointType pt;
        pt.x = k.first.first[0];
        pt.y = k.first.first[1];
        pt.z = k.first.first[2];
        cloud->push_back(pt);
    }

    // calculate the convex hull of the point cloud
    this->convex_hull.setInputCloud(cloud);

    // get the indices of the keyframes on the convex hull
    pcl::PointCloud<PointType>::Ptr convex_points =
        pcl::PointCloud<PointType>::Ptr(new pcl::PointCloud<PointType>);
    this->convex_hull.reconstruct(*convex_points);

    pcl::PointIndices::Ptr convex_hull_point_idx =
        pcl::PointIndices::Ptr(new pcl::PointIndices);
    this->convex_hull.getHullPointIndices(*convex_hull_point_idx);

    this->keyframe_convex.clear();
    for (int i = 0; i < convex_hull_point_idx->indices.size(); ++i) {
        this->keyframe_convex.push_back(convex_hull_point_idx->indices[i]);
    }
}

/**
 * Concave Hull of Keyframes
 **/

void lidarProcessor::computeConcaveHull() {
    // at least 5 keyframes for concave hull
    if (this->num_keyframes < 5) {
        return;
    }

    // create a pointcloud with points at keyframes
    pcl::PointCloud<PointType>::Ptr cloud =
        pcl::PointCloud<PointType>::Ptr(new pcl::PointCloud<PointType>);

    for (const auto& k : this->keyframes) {
        PointType pt;
        pt.x = k.first.first[0];
        pt.y = k.first.first[1];
        pt.z = k.first.first[2];
        cloud->push_back(pt);
    }

    // calculate the concave hull of the point cloud
    this->concave_hull.setInputCloud(cloud);

    // get the indices of the keyframes on the concave hull
    pcl::PointCloud<PointType>::Ptr concave_points =
        pcl::PointCloud<PointType>::Ptr(new pcl::PointCloud<PointType>);
    this->concave_hull.reconstruct(*concave_points);

    pcl::PointIndices::Ptr concave_hull_point_idx =
        pcl::PointIndices::Ptr(new pcl::PointIndices);
    this->concave_hull.getHullPointIndices(*concave_hull_point_idx);

    this->keyframe_concave.clear();
    for (int i = 0; i < concave_hull_point_idx->indices.size(); ++i) {
        this->keyframe_concave.push_back(concave_hull_point_idx->indices[i]);
    }
}

/**
 * Update keyframes
 **/

void lidarProcessor::updateKeyframes() {
    // transform point cloud
    this->transformCurrentScan();

    // calculate difference in pose and rotation to all poses in trajectory
    float closest_d = std::numeric_limits<float>::infinity();
    int closest_idx = 0;
    int keyframes_idx = 0;

    int num_nearby = 0;

    for (const auto& k : this->keyframes) {
        // calculate distance between current pose and pose in keyframes
        float delta_d = sqrt(pow(this->pose[0] - k.first.first[0], 2) +
                             pow(this->pose[1] - k.first.first[1], 2) +
                             pow(this->pose[2] - k.first.first[2], 2));

        // count the number nearby current pose
        if (delta_d <= this->keyframe_thresh_dist_ * 1.5) {
            ++num_nearby;
        }

        // store into variable
        if (delta_d < closest_d) {
            closest_d = delta_d;
            closest_idx = keyframes_idx;
        }

        keyframes_idx++;
    }

    // get closest pose and corresponding rotation
    Eigen::Vector3d closest_pose = this->keyframes[closest_idx].first.first;
    Eigen::Quaterniond closest_pose_r =
        this->keyframes[closest_idx].first.second;

    // calculate distance between current pose and closest pose from above
    float dd = sqrt(pow(this->pose[0] - closest_pose[0], 2) +
                    pow(this->pose[1] - closest_pose[1], 2) +
                    pow(this->pose[2] - closest_pose[2], 2));

    // calculate difference in orientation
    Eigen::Quaterniond dq = this->rotq * (closest_pose_r.inverse());

    float theta_rad =
        atan2(sqrt(pow(dq.x(), 2) + pow(dq.y(), 2) + pow(dq.z(), 2)), dq.w());
    float theta_deg = theta_rad * (180.0 / M_PI);

    // update keyframe
    bool newKeyframe = false;

    if (abs(dd) > this->keyframe_thresh_dist_ ||
        abs(theta_deg) > this->keyframe_thresh_rot_) {
        newKeyframe = true;
    }
    if (abs(dd) <= this->keyframe_thresh_dist_) {
        newKeyframe = false;
    }
    if (abs(dd) <= this->keyframe_thresh_dist_ &&
        abs(theta_deg) > this->keyframe_thresh_rot_ && num_nearby <= 1) {
        newKeyframe = true;
    }

    if (newKeyframe) {
        ++this->num_keyframes;

        // voxelization for submap
        if (this->vf_submap_use_) {
            this->vf_submap.setInputCloud(this->current_scan_t);
            this->vf_submap.filter(*this->current_scan_t);
        }

        // update keyframe vector
        this->keyframes.push_back(std::make_pair(
            std::make_pair(this->pose, this->rotq), this->current_scan_t));

        // update keyframe normals
        this->keyframe_normals.push_back(this->gicp_s2s.getSourceCovariances());

        *this->keyframes_cloud += *this->current_scan_t;

        {
            mtx_keyframe.lock();
            *this->keyframe_cloud = *this->current_scan_t;
            // *this->keyframes_map += *current_scan_t;

            this->kf.header.stamp = this->scan_stamp;
            this->kf.header.frame_id = this->odom_frame;
            this->kf.child_frame_id = this->child_frame;

            this->kf.pose.pose.position.x = this->pose[0];
            this->kf.pose.pose.position.y = this->pose[1];
            this->kf.pose.pose.position.z = this->pose[2];

            this->kf.pose.pose.orientation.w = this->rotq.w();
            this->kf.pose.pose.orientation.x = this->rotq.x();
            this->kf.pose.pose.orientation.y = this->rotq.y();
            this->kf.pose.pose.orientation.z = this->rotq.z();

            mtx_keyframe.unlock();
        }
    }
}

/**
 * Set Adaptive Parameters
 **/

void lidarProcessor::setAdaptiveParams() {
    // Set Keyframe Thresh from Spaciousness Metric
    if (this->metrics.spaciousness.back() > 20.0) {
        this->keyframe_thresh_dist_ = 10.0;
    } else if (this->metrics.spaciousness.back() > 10.0 &&
               this->metrics.spaciousness.back() <= 20.0) {
        this->keyframe_thresh_dist_ = 5.0;
    } else if (this->metrics.spaciousness.back() > 5.0 &&
               this->metrics.spaciousness.back() <= 10.0) {
        this->keyframe_thresh_dist_ = 1.0;
    } else if (this->metrics.spaciousness.back() <= 5.0) {
        this->keyframe_thresh_dist_ = 0.5;
    }

    // set concave hull alpha
    this->concave_hull.setAlpha(this->keyframe_thresh_dist_);
}

/**
 * Push Submap Keyframe Indices
 **/
void lidarProcessor::pushSubmapIndices(std::vector<float> dists, int k) {
    // maintain max heap of at most k elements
    std::priority_queue<float> pq;

    for (auto d : dists) {
        if (pq.size() >= k && pq.top() > d) {
            pq.push(d);
            pq.pop();
        } else if (pq.size() < k) {
            pq.push(d);
        }
    }

    // get the kth smallest element, which should be at the top of the heap
    float kth_element = pq.top();

    // get all elements smaller or equal to the kth smallest element
    for (int i = 0; i < dists.size(); ++i) {
        if (dists[i] <= kth_element)
            this->submap_kf_idx_curr.push_back(i);
    }
}

/**
 * Get Submap using Nearest Neighbor Keyframes
 **/

void lidarProcessor::getSubmapKeyframes() {
    // reinitialize submap cloud and normals
    this->submap_cloud =
        pcl::PointCloud<PointType>::Ptr(new pcl::PointCloud<PointType>);
    this->submap_normals.clear();

    // clear vector of keyframe indices to use for submap
    this->submap_kf_idx_curr.clear();

    //
    // TOP K NEAREST NEIGHBORS FROM ALL KEYFRAMES
    //

    // calculate distance between current pose and poses in keyframe set
    std::vector<float> ds;
    for (const auto& k : this->keyframes) {
        float d = sqrt(pow(this->pose[0] - k.first.first[0], 2) +
                       pow(this->pose[1] - k.first.first[1], 2) +
                       pow(this->pose[2] - k.first.first[2], 2));
        ds.push_back(d);
    }

    // get indices for top K nearest neighbor keyframe poses
    this->pushSubmapIndices(ds, this->submap_knn_);

    //
    // TOP K NEAREST NEIGHBORS FROM CONVEX HULL
    //

    // get convex hull indices
    this->computeConvexHull();

    // get distances for each keyframe on convex hull
    std::vector<float> convex_ds;
    for (const auto& c : this->keyframe_convex) {
        convex_ds.push_back(ds[c]);
    }

    // get indicies for top kNN for convex hull
    this->pushSubmapIndices(convex_ds, this->submap_kcv_);

    //
    // TOP K NEAREST NEIGHBORS FROM CONCAVE HULL
    //

    // get concave hull indices
    this->computeConcaveHull();

    // get distances for each keyframe on concave hull
    std::vector<float> concave_ds;
    for (const auto& c : this->keyframe_concave) {
        concave_ds.push_back(ds[c]);
    }

    // get indicies for top kNN for convex hull
    this->pushSubmapIndices(concave_ds, this->submap_kcc_);

    //
    // BUILD SUBMAP
    //

    // concatenate all submap clouds and normals
    std::sort(this->submap_kf_idx_curr.begin(), this->submap_kf_idx_curr.end());
    auto last = std::unique(this->submap_kf_idx_curr.begin(),
                            this->submap_kf_idx_curr.end());
    this->submap_kf_idx_curr.erase(last, this->submap_kf_idx_curr.end());
    for (auto k : this->submap_kf_idx_curr) {
        // create current submap cloud
        *this->submap_cloud += *this->keyframes[k].second;

        // grab corresponding submap cloud's normals
        this->submap_normals.insert(std::end(this->submap_normals),
                                    std::begin(this->keyframe_normals[k]),
                                    std::end(this->keyframe_normals[k]));
    }

    // sort current and previous submap kf list of indices
    std::sort(this->submap_kf_idx_curr.begin(), this->submap_kf_idx_curr.end());
    std::sort(this->submap_kf_idx_prev.begin(), this->submap_kf_idx_prev.end());

    // check if submap has changed from previous iteration
    if (this->submap_kf_idx_curr == this->submap_kf_idx_prev) {
        this->submap_hasChanged = false;
    } else {
        this->submap_hasChanged = true;
    }

    this->submap_kf_idx_prev = this->submap_kf_idx_curr;
}

/**
 * Debug Statements
 **/

void lidarProcessor::debug() {
    // Total length traversed
    double length_traversed = 0.;
    Eigen::Vector3d p_curr = Eigen::Vector3d(0., 0., 0.);
    Eigen::Vector3d p_prev = Eigen::Vector3d(0., 0., 0.);
    for (const auto& t : this->trajectory) {
        if (p_prev == Eigen::Vector3d(0., 0., 0.)) {
            p_prev = t.first;
            continue;
        }
        p_curr = t.first;
        double l =
            sqrt(pow(p_curr[0] - p_prev[0], 2) + pow(p_curr[1] - p_prev[1], 2) +
                 pow(p_curr[2] - p_prev[2], 2));

        if (l >= 0.05) {
            length_traversed += l;
            p_prev = p_curr;
        }
    }

    // if (length_traversed == 0) {
    //     this->publish_keyframe_thread =
    //         std::thread(&lidarProcessor::publishKeyframes, this);
    //     this->publish_keyframe_thread.detach();
    // }

    // Average computation time
    double avg_comp_time =
        std::accumulate(this->comp_times.begin(), this->comp_times.end(), 0.0) /
        this->comp_times.size();

    // RAM Usage
    double vm_usage = 0.0;
    double resident_set = 0.0;
    std::ifstream stat_stream(
        "/proc/self/stat", std::ios_base::in);  // get info from proc directory
    std::string pid, comm, state, ppid, pgrp, session, tty_nr;
    std::string tpgid, flags, minflt, cminflt, majflt, cmajflt;
    std::string utime, stime, cutime, cstime, priority, nice;
    std::string num_threads, itrealvalue, starttime;
    unsigned long vsize;
    long rss;
    stat_stream >> pid >> comm >> state >> ppid >> pgrp >> session >> tty_nr >>
        tpgid >> flags >> minflt >> cminflt >> majflt >> cmajflt >> utime >>
        stime >> cutime >> cstime >> priority >> nice >> num_threads >>
        itrealvalue >> starttime >> vsize >> rss;  // don't care about the rest
    stat_stream.close();
    long page_size_kb = sysconf(_SC_PAGE_SIZE) /
                        1024;  // for x86-64 is configured to use 2MB pages
    vm_usage = vsize / 1024.0;
    resident_set = rss * page_size_kb;

    // CPU Usage
    struct tms timeSample;
    clock_t now;
    double cpu_percent;
    now = times(&timeSample);
    if (now <= this->lastCPU || timeSample.tms_stime < this->lastSysCPU ||
        timeSample.tms_utime < this->lastUserCPU) {
        cpu_percent = -1.0;
    } else {
        cpu_percent = (timeSample.tms_stime - this->lastSysCPU) +
                      (timeSample.tms_utime - this->lastUserCPU);
        cpu_percent /= (now - this->lastCPU);
        cpu_percent /= this->numProcessors;
        cpu_percent *= 100.;
    }
    this->lastCPU = now;
    this->lastSysCPU = timeSample.tms_stime;
    this->lastUserCPU = timeSample.tms_utime;
    this->cpu_percents.push_back(cpu_percent);
    double avg_cpu_usage = std::accumulate(this->cpu_percents.begin(),
                                           this->cpu_percents.end(), 0.0) /
                           this->cpu_percents.size();

    // Print to terminal
    printf("\033[2J\033[1;1H");

    std::cout << std::endl
              << "==== Direct LiDAR Odometry v" << this->version_
              << " ====" << std::endl;

    std::cout << std::endl
              << this->cpu_type << " x " << this->numProcessors << std::endl;

    std::cout << std::endl << std::setprecision(4) << std::fixed;
    std::cout << "Position    [xyz]  :: " << this->pose[0] << " "
              << this->pose[1] << " " << this->pose[2] << std::endl;
    std::cout << "Orientation [wxyz] :: " << this->rotq.w() << " "
              << this->rotq.x() << " " << this->rotq.y() << " "
              << this->rotq.z() << std::endl;
    std::cout << "Distance Traveled  :: " << length_traversed << " meters"
              << std::endl;
    std::cout << "Distance to Origin :: "
              << sqrt(pow(this->pose[0], 2) + pow(this->pose[1], 2) +
                      pow(this->pose[2], 2))
              << " meters" << std::endl;

    std::cout << std::endl << std::right << std::setprecision(2) << std::fixed;
    std::cout << "Computation Time :: " << std::setfill(' ') << std::setw(6)
              << this->comp_times.back() * 1000.
              << " ms    // Avg: " << std::setw(5) << avg_comp_time * 1000.
              << std::endl;
    std::cout << "Cores Utilized   :: " << std::setfill(' ') << std::setw(6)
              << (cpu_percent / 100.) * this->numProcessors
              << " cores // Avg: " << std::setw(5)
              << (avg_cpu_usage / 100.) * this->numProcessors << std::endl;
    std::cout << "CPU Load         :: " << std::setfill(' ') << std::setw(6)
              << cpu_percent << " %     // Avg: " << std::setw(5)
              << avg_cpu_usage << std::endl;
    std::cout << "RAM Allocation   :: " << std::setfill(' ') << std::setw(6)
              << resident_set / 1000. << " MB    // VSZ: " << vm_usage / 1000.
              << " MB" << std::endl;
}

/////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////

void lidarProcessor::process(const double& timeStamp,
                             pcl::PointCloud<PointType>::Ptr& rawPoints,
                             imuMeasurements::Ptr& imuMeas) {
    double then = ros::Time::now().toSec();
    this->scan_stamp = ros::Time().fromSec(timeStamp);
    this->curr_frame_stamp = timeStamp;

    // If there are too few points in the pointcloud, try again
    this->current_scan = rawPoints;
    if (this->current_scan->points.size() < this->gicp_min_num_points_) {
        LOG(WARNING) << "Low number of points!";
        return;
    }

    this->current_imu_meas = imuMeas;
    if (!this->current_imu_meas->discretization()) {
        LOG(WARNING) << "imu measurement is not enough!";
        return;
    }

    // DLO Initialization procedures (IMU calib, gravity align)
    if (!this->dlo_initialized) {
        // this->process(timeStamp, rawPoints);
        this->initializeDLO2();
        return;
    }

    // Preprocess points
    this->preprocessPoints();

    // Compute Metrics
    this->metrics_thread = std::thread(&lidarProcessor::computeMetrics, this);
    this->metrics_thread.detach();

    // Set Adaptive Parameters
    if (this->adaptive_params_use_) {
        this->setAdaptiveParams();
    }

    // Set initial frame as target
    if (this->target_cloud == nullptr) {
        this->initializeInputTarget2();
        return;
    }

    // Set source frame
    this->source_cloud =
        pcl::PointCloud<PointType>::Ptr(new pcl::PointCloud<PointType>);
    this->source_cloud = this->current_scan;

    this->getNextPose2();

    // Update current keyframe poses and map
    this->updateKeyframes2();

    // Update trajectory
    this->trajectory.push_back(std::make_pair(this->pose, this->rotq));

    // Update next time stamp
    this->prev_frame_stamp = this->curr_frame_stamp;

    // Update some statistics
    this->comp_times.push_back(ros::Time::now().toSec() - then);

    // Publish stuff to ROS
    this->publish_thread = std::thread(&lidarProcessor::publishToROS, this);
    this->publish_thread.detach();

    // Debug statements and publish custom DLO message
    this->debug_thread = std::thread(&lidarProcessor::debug, this);
    this->debug_thread.detach();
}

void lidarProcessor::initializeDLO2() {

    // Gravity Align
    std::cout << "Aligning to gravity... ";
    if (!this->gravityAlign2())
        return;

    this->dlo_initialized = true;
    std::cout << "DLO initialized! Starting localization..." << std::endl;
}

void lidarProcessor::getNextPose2() {
    // Align using IMU prior if available
    pcl::PointCloud<PointType>::Ptr aligned(new pcl::PointCloud<PointType>);
    *aligned = *this->current_scan;

    // Imu Pose
    Eigen::Matrix4d Transformation_init_lidar = Eigen::Matrix4d::Identity();
    Transformation_init_lidar.block<3, 3>(0, 0) = this->rotSO3;
    Transformation_init_lidar.block<3, 1>(0, 3) = this->pose;
    Eigen::Matrix4d Transformation_init_imu =
        Transformation_init_lidar * relPose.inverse();

    this->current_imu_meas->preintegration();
    Eigen::Vector3d P_init = Transformation_init_imu.block<3, 1>(0, 3),
                    P_predict;
    Eigen::Vector3d V_init = this->velocity, V_predict;
    Eigen::Matrix3d R_init = Transformation_init_imu.block<3, 3>(0, 0),
                    R_predict;
    Eigen::Vector3d gw(0, 0, 9.81);
    this->current_imu_meas->integration(P_init, V_init, R_init, gw, P_predict,
                                        V_predict, R_predict);

    Eigen::Matrix4d Transformation_predict_imu = Eigen::Matrix4d::Identity();
    Transformation_predict_imu.block<3, 3>(0, 0) = R_predict;
    Transformation_predict_imu.block<3, 1>(0, 3) = P_predict;

    // lidar pose
    Eigen::Matrix4d Transformation_predict_lidar =
        Transformation_predict_imu * relPose;
    this->pose = Transformation_predict_lidar.block<3, 1>(0, 3);
    this->rotSO3 = Transformation_predict_lidar.block<3, 3>(0, 0);
    this->rotq = Eigen::Quaterniond(this->rotSO3);

    this->getSubmapKeyframes();

    if (this->submap_hasChanged) {
        // Set the current global submap as the target cloud
        this->kdtreeSubMap.setInputCloud(this->submap_cloud);
    }

    icpEstimation::Ptr estimation(new icpEstimation(2));

    ceres::Problem problemIcp;
    ceres::LossFunction* loss_function;
    ceres::Solver::Options problemIcpOptions;
    ceres::Solver::Summary summary;
    std::vector<ceres::ResidualBlockId> residual_block_ids;
    {
        {  // Vector2Double
            estimation->imuPos[0] = P_init;
            estimation->imuPos[1] = P_predict;
            estimation->imuRot[0] = R_init;
            estimation->imuRot[1] = R_predict;
            estimation->imuVel[0] = V_init;
            estimation->imuVel[1] = V_predict;
            estimation->imuAccBias[0] = P_init;
            estimation->imuAccBias[1] = P_predict;
            estimation->imuGyrBias[0] = P_init;
            estimation->imuGyrBias[1] = P_predict;
            estimation->relPose = relPose;

            estimation->vector2double();
        }

        // problem options
        problemIcpOptions.linear_solver_type = ceres::DENSE_SCHUR;
        // options.num_threads = 2;
        problemIcpOptions.trust_region_strategy_type = ceres::DOGLEG;
        problemIcpOptions.max_num_iterations = 5;
        // options.use_explicit_schur_complement = true;
        // options.minimizer_progress_to_stdout = true;
        // options.use_nonmonotonic_steps = true;
        problemIcpOptions.max_solver_time_in_seconds = 0.04;

        // loss fuction
        loss_function = new ceres::HuberLoss(1.0);

        // ImuPose
        ceres::LocalParameterization* local_parameterization1 =
            new PoseLocalParameterization();
        problemIcp.AddParameterBlock(estimation->para_Pose[0], 7,
                                     local_parameterization1);
        problemIcp.AddParameterBlock(estimation->para_SpeedBias[0], 9);
        ceres::LocalParameterization* local_parameterization2 =
            new PoseLocalParameterization();
        problemIcp.AddParameterBlock(estimation->para_Pose[1], 7,
                                     local_parameterization2);
        problemIcp.AddParameterBlock(estimation->para_SpeedBias[1], 9);

        // if (1) {
        //     problem.SetParameterBlockConstant(para_Pose[0]);
        // }

        // relPose
        ceres::LocalParameterization* local_parameterization_ex =
            new PoseLocalParameterization();
        problemIcp.AddParameterBlock(estimation->para_Ex_Pose, 7,
                                     local_parameterization_ex);
        if (1) {
            problemIcp.SetParameterBlockConstant(estimation->para_Ex_Pose);
        }
    }

    int iterNum = 2;
    int maxResidualNum = 300;
    for (int iterCount = 0; iterCount < iterNum; iterCount++) {
        auto Indexs =
            getRamdomIndex((int)aligned->points.size(), maxResidualNum, 0);
        for (auto index : Indexs) {
            const auto& pointOri = aligned->points[index];
            const double alpha_timestamp = 0;

            PointType pointSel;
            pointTransform(pointOri, pointSel, Transformation_predict_lidar);

            std::vector<int> pointSearchInd;
            std::vector<float> pointSearchSqDis;
            // TicToc t_search;
            this->kdtreeSubMap.nearestKSearch(pointSel, 5, pointSearchInd,
                                              pointSearchSqDis);
            // t_search_nearest += t_search.toc();

            Eigen::Matrix<double, 5, 3> matA0;
            Eigen::Matrix<double, 5, 1> matB0 =
                -1 * Eigen::Matrix<double, 5, 1>::Ones();
            if (pointSearchSqDis[4] < 1.0) {
                for (int j = 0; j < 5; j++) {
                    matA0(j, 0) =
                        this->submap_cloud->points[pointSearchInd[j]].x;
                    matA0(j, 1) =
                        this->submap_cloud->points[pointSearchInd[j]].y;
                    matA0(j, 2) =
                        this->submap_cloud->points[pointSearchInd[j]].z;
                }
                // find the norm of plane
                Eigen::Vector3d reference_normal =
                    matA0.colPivHouseholderQr().solve(matB0);
                double negative_OA_dot_norm = 1 / reference_normal.norm();
                reference_normal.normalize();
                Eigen::Vector3d reference_point = Eigen::Vector3d::Zero();

                // Here n(pa, pb, pc) is unit norm of plane
                bool planeValid = true;
                for (int j = 0; j < 5; j++) {
                    // if OX * n > 0.2, then plane is not fit well
                    if (fabs(reference_normal(0) *
                                 this->submap_cloud->points[pointSearchInd[j]]
                                     .x +
                             reference_normal(1) *
                                 this->submap_cloud->points[pointSearchInd[j]]
                                     .y +
                             reference_normal(2) *
                                 this->submap_cloud->points[pointSearchInd[j]]
                                     .z +
                             negative_OA_dot_norm) > 0.2) {
                        planeValid = false;
                        break;
                    }

                    reference_point += Eigen::Vector3d(
                        this->submap_cloud->points[pointSearchInd[j]].x,
                        this->submap_cloud->points[pointSearchInd[j]].y,
                        this->submap_cloud->points[pointSearchInd[j]].z);
                }

                Eigen::Vector3d curr_point(pointOri.x, pointOri.y, pointOri.z);
                reference_point = reference_point / 5.0;

                if (planeValid) {
                    auto cost_function =
                        new ceres::AutoDiffCostFunction<CTPointToPlaneImuFactor,
                                                        1, 7, 7, 7>(
                            new CTPointToPlaneImuFactor(
                                reference_point, curr_point, reference_normal,
                                alpha_timestamp));

                    auto block_id = problemIcp.AddResidualBlock(
                        cost_function, loss_function, estimation->para_Pose[0],
                        estimation->para_Pose[1], estimation->para_Ex_Pose);
                    residual_block_ids.push_back(block_id);
                }
            }
        }

        {
            if (this->current_imu_meas->preIntegration->sum_dt < 10.0) {
                ImuFactor* cost_function =
                    new ImuFactor(this->current_imu_meas->preIntegration);
                problemIcp.AddResidualBlock(
                    cost_function, NULL, estimation->para_Pose[0],
                    estimation->para_SpeedBias[0], estimation->para_Pose[1],
                    estimation->para_SpeedBias[1]);
            }

            PriorPoseFactor* cost_function =
                new PriorPoseFactor(P_init, Eigen::Quaterniond(R_init));
            problemIcp.AddResidualBlock(cost_function, NULL,
                                        estimation->para_Pose[0]);
        }

        ceres::Solve(problemIcpOptions, &problemIcp, &summary);

        estimation->double2vector();

        Transformation_predict_imu = Eigen::Matrix4d::Identity();
        Transformation_predict_imu.block<3, 3>(0, 0) = estimation->imuRot[1];
        Transformation_predict_imu.block<3, 1>(0, 3) = estimation->imuPos[1];

        // lidar pose
        Transformation_predict_lidar =
            Transformation_predict_imu * estimation->relPose;

        double diff_trans = (estimation->imuPos[0] - P_init).norm() +
                            (estimation->imuPos[1] - P_predict).norm();
        double diff_rot = AngularDistance(estimation->imuRot[0], R_init) +
                          AngularDistance(estimation->imuRot[1], R_predict);

        double TH_trans = 1;
        double TH_rot = 5;
        if (diff_trans > TH_trans || diff_rot > TH_rot) {
            break;
        }
    }
}

void lidarProcessor::initializeInputTarget2() {
    this->prev_frame_stamp = this->curr_frame_stamp;

    // Convert ros message
    this->target_cloud =
        pcl::PointCloud<PointType>::Ptr(new pcl::PointCloud<PointType>);
    this->target_cloud = this->current_scan;

    // initialize keyframes
    pcl::PointCloud<PointType>::Ptr first_keyframe(
        new pcl::PointCloud<PointType>);
    pcl::transformPointCloud(*this->target_cloud, *first_keyframe, this->T);

    // keep history of keyframes
    this->keyframes.push_back(
        std::make_pair(std::make_pair(this->pose, this->rotq), first_keyframe));

    *this->keyframes_cloud += *first_keyframe;

    {
        // mtx_keyframe.lock();
        *this->keyframe_cloud = *first_keyframe;
        // *this->keyframes_map += *first_keyframe;

        this->kf.header.stamp = this->scan_stamp;
        this->kf.header.frame_id = this->odom_frame;
        this->kf.child_frame_id = this->child_frame;

        this->kf.pose.pose.position.x = this->pose[0];
        this->kf.pose.pose.position.y = this->pose[1];
        this->kf.pose.pose.position.z = this->pose[2];

        this->kf.pose.pose.orientation.w = this->rotq.w();
        this->kf.pose.pose.orientation.x = this->rotq.x();
        this->kf.pose.pose.orientation.y = this->rotq.y();
        this->kf.pose.pose.orientation.z = this->rotq.z();

        // mtx_keyframe.unlock();
    }

    ++this->num_keyframes;
}

void lidarProcessor::updateKeyframes2() {
    // transform point cloud
    this->transformCurrentScan();

    // calculate difference in pose and rotation to all poses in trajectory
    float closest_d = std::numeric_limits<float>::infinity();
    int closest_idx = 0;
    int keyframes_idx = 0;

    int num_nearby = 0;

    for (const auto& k : this->keyframes) {
        // calculate distance between current pose and pose in keyframes
        float delta_d = sqrt(pow(this->pose[0] - k.first.first[0], 2) +
                             pow(this->pose[1] - k.first.first[1], 2) +
                             pow(this->pose[2] - k.first.first[2], 2));

        // count the number nearby current pose
        if (delta_d <= this->keyframe_thresh_dist_ * 1.5) {
            ++num_nearby;
        }

        // store into variable
        if (delta_d < closest_d) {
            closest_d = delta_d;
            closest_idx = keyframes_idx;
        }

        keyframes_idx++;
    }

    // get closest pose and corresponding rotation
    Eigen::Vector3d closest_pose = this->keyframes[closest_idx].first.first;
    Eigen::Quaterniond closest_pose_r =
        this->keyframes[closest_idx].first.second;

    // calculate distance between current pose and closest pose from above
    float dd = sqrt(pow(this->pose[0] - closest_pose[0], 2) +
                    pow(this->pose[1] - closest_pose[1], 2) +
                    pow(this->pose[2] - closest_pose[2], 2));

    // calculate difference in orientation
    Eigen::Quaterniond dq = this->rotq * (closest_pose_r.inverse());

    float theta_rad =
        atan2(sqrt(pow(dq.x(), 2) + pow(dq.y(), 2) + pow(dq.z(), 2)), dq.w());
    float theta_deg = theta_rad * (180.0 / M_PI);

    // update keyframe
    bool newKeyframe = false;

    if (abs(dd) > this->keyframe_thresh_dist_ ||
        abs(theta_deg) > this->keyframe_thresh_rot_) {
        newKeyframe = true;
    }
    if (abs(dd) <= this->keyframe_thresh_dist_) {
        newKeyframe = false;
    }
    if (abs(dd) <= this->keyframe_thresh_dist_ &&
        abs(theta_deg) > this->keyframe_thresh_rot_ && num_nearby <= 1) {
        newKeyframe = true;
    }

    if (newKeyframe) {
        ++this->num_keyframes;

        // voxelization for submap
        if (this->vf_submap_use_) {
            this->vf_submap.setInputCloud(this->current_scan_t);
            this->vf_submap.filter(*this->current_scan_t);
        }

        // update keyframe vector
        this->keyframes.push_back(std::make_pair(
            std::make_pair(this->pose, this->rotq), this->current_scan_t));

        *this->keyframes_cloud += *this->current_scan_t;

        {
            mtx_keyframe.lock();
            *this->keyframe_cloud = *this->current_scan_t;
            // *this->keyframes_map += *current_scan_t;

            this->kf.header.stamp = this->scan_stamp;
            this->kf.header.frame_id = this->odom_frame;
            this->kf.child_frame_id = this->child_frame;

            this->kf.pose.pose.position.x = this->pose[0];
            this->kf.pose.pose.position.y = this->pose[1];
            this->kf.pose.pose.position.z = this->pose[2];

            this->kf.pose.pose.orientation.w = this->rotq.w();
            this->kf.pose.pose.orientation.x = this->rotq.x();
            this->kf.pose.pose.orientation.y = this->rotq.y();
            this->kf.pose.pose.orientation.z = this->rotq.z();

            mtx_keyframe.unlock();
        }
    }
}

void lidarProcessor::UndistortPcl(const pcl::PointCloud<PointType>::Ptr &pcl_in_out,
                              double dt_be, const Sophus::SE3d &Tbe) 
{
  const Eigen::Vector3d &tbe = Tbe.translation();
  Eigen::Vector3d rso3_be = Tbe.so3().log();
  for (auto &pt : pcl_in_out->points) {
    float dt_bi = pt.curvature;

    if (dt_bi == 0) laserCloudtmp->push_back(pt);
    double ratio_bi = dt_bi / dt_be;
    /// Rotation from i-e
    double ratio_ie = 1 - ratio_bi;

    Eigen::Vector3d rso3_ie = ratio_ie * rso3_be;
    SO3d Rie = SO3d::exp(rso3_ie);

    /// Transform to the 'end' frame, using only the rotation
    /// Note: Compensation direction is INVERSE of Frame's moving direction
    /// So if we want to compensate a point at timestamp-i to the frame-e
    /// P_compensate = R_ei * Pi + t_ei
    Eigen::Vector3d tie = ratio_ie * tbe;
    // Eigen::Vector3d tei = Eigen::Vector3d::Zero();
    Eigen::Vector3d v_pt_i(pt.x, pt.y, pt.z);
    Eigen::Vector3d v_pt_comp_e = Rie.inverse() * (v_pt_i - tie);

    /// Undistorted point
    pt.x = v_pt_comp_e.x();
    pt.y = v_pt_comp_e.y();
    pt.z = v_pt_comp_e.z();
  }
}

bool lidarProcessor::gravityAlign2() {
    // get average acceleration vector for 1 second and normalize
    Eigen::Vector3d lin_accel = Eigen::Vector3d::Zero();
    const double then = ros::Time::now().toSec();
    int n = 0;
    for (int i = 0; i < this->current_imu_meas->imus.size(); i++) {
        lin_accel[0] += this->current_imu_meas->imus[i].linear_acceleration.x;
        lin_accel[1] += this->current_imu_meas->imus[i].linear_acceleration.y;
        lin_accel[2] += this->current_imu_meas->imus[i].linear_acceleration.z;
        ++n;
    }
    lin_accel[0] /= n;
    lin_accel[1] /= n;
    lin_accel[2] /= n;

    // normalize
    double lin_norm = sqrt(pow(lin_accel[0], 2) + pow(lin_accel[1], 2) +
                           pow(lin_accel[2], 2));
    lin_accel[0] /= lin_norm;
    lin_accel[1] /= lin_norm;
    lin_accel[2] /= lin_norm;

    // define gravity vector (assume point downwards)
    Eigen::Vector3d grav;
    grav << 0, 0, 1;

    // calculate angle between the two vectors
    Eigen::Quaterniond grav_q =
        Eigen::Quaterniond::FromTwoVectors(lin_accel, grav);

    // normalize
    double grav_norm = sqrt(grav_q.w() * grav_q.w() + grav_q.x() * grav_q.x() +
                            grav_q.y() * grav_q.y() + grav_q.z() * grav_q.z());
    grav_q.w() /= grav_norm;
    grav_q.x() /= grav_norm;
    grav_q.y() /= grav_norm;
    grav_q.z() /= grav_norm;

    // set gravity aligned orientation
    Eigen::Matrix4d T_imu = Eigen::Matrix4d::identity();
    T_imu.block<3,3>(0,0) = grav_q.toRotationMatrix();
    
    this->T = T_imu * relPose;
    this->rotq = Eigen::Quaterniond(this->T.block<3,3>(0,0));
    this->rotSO3 = this->T.block<3,3>(0,0);

    // rpy
    auto euler = this->rotq.toRotationMatrix().eulerAngles(2, 1, 0);
    double yaw = euler[0] * (180.0 / M_PI);
    double pitch = euler[1] * (180.0 / M_PI);
    double roll = euler[2] * (180.0 / M_PI);

    std::cout << "done" << std::endl;
    std::cout << "  Roll [deg]: " << roll << std::endl;
    std::cout << "  Pitch [deg]: " << pitch << std::endl << std::endl;

    return true;
}

}  // namespace vilens