/*
 * Software License Agreement (BSD License)
 *
 *  Point Cloud Library (PCL) - www.pointclouds.org
 *  Copyright (c) 2011, Willow Garage, Inc.
 *
 *  All rights reserved.
 *
 *  Redistribution and use in source and binary forms, with or without
 *  modification, are permitted provided that the following conditions
 *  are met:
 *
 *   * Redistributions of source code must retain the above copyright
 *     notice, this list of conditions and the following disclaimer.
 *   * Redistributions in binary form must reproduce the above
 *     copyright notice, this list of conditions and the following
 *     disclaimer in the documentation and/or other materials provided
 *     with the distribution.
 *   * Neither the name of Willow Garage, Inc. nor the names of its
 *     contributors may be used to endorse or promote products derived
 *     from this software without specific prior written permission.
 *
 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
 *  FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
 *  COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
 *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
 *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
 *  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
 *  CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 *  LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
 *  ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 *  POSSIBILITY OF SUCH DAMAGE.
 *
 */

#include <iostream>
#include <algorithm>

#include <pcl/common/time.h>
#include <pcl/gpu/kinfu/kinfu.h> // internal.h now included inside kinfu.h

#include <Eigen/Core>
#include <Eigen/SVD>
#include <Eigen/Cholesky>
#include <Eigen/Geometry>
#include <Eigen/LU>

#ifdef HAVE_OPENCV
  #include <opencv2/opencv.hpp>
  #include <opencv2/gpu/gpu.hpp>
  #include <pcl/gpu/utils/timers_opencv.hpp>
#endif

#include <pcl/common/transforms.h>
#include <Eigen/Geometry>

using namespace std;
using namespace pcl::device;
using namespace pcl::gpu;

using Eigen::AngleAxisf;
using Eigen::Array3f;
using Eigen::Vector3i;
using Eigen::Vector3f;

bool stop = false;
int cpt = 0;


namespace pcl
{
  namespace gpu
  {
    Eigen::Vector3f rodrigues2(const Eigen::Matrix3f& matrix);
  }
}

///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
pcl::gpu::KinfuTracker::KinfuTracker (int rows, int cols) : rows_(rows), cols_(cols), global_time_(0), max_icp_distance_(0), integration_metric_threshold_(0.f)
{
  const Vector3f volume_size = Vector3f::Constant (VOLUME_SIZE);
  const Vector3i volume_resolution(VOLUME_X, VOLUME_Y, VOLUME_Z);
 
 cpt_update = 0;
   
  tsdf_volume_ = TsdfVolume::Ptr( new TsdfVolume(volume_resolution) );
  tsdf_volume_->setSize(volume_size);
  
  setDepthIntrinsics (525.f, 525.f); // default values, can be overwritten
  
  init_Rcam_ = Eigen::Matrix3f::Identity ();// * AngleAxisf(-30.f/180*3.1415926, Vector3f::UnitX());
  init_tcam_ = volume_size * 0.5f - Vector3f (0, 0, volume_size (2) / 2 * 1.2f);

  const int iters[] = {10, 5, 4};
  std::copy (iters, iters + LEVELS, icp_iterations_);

  const float default_distThres = 0.10f; //meters
  const float default_angleThres = sin (20.f * 3.14159254f / 180.f);
  const float default_tranc_dist = 0.03f; //meters

  setIcpCorespFilteringParams (default_distThres, default_angleThres);
  tsdf_volume_->setTsdfTruncDist (default_tranc_dist);

  allocateBufffers (rows, cols);

  rmats_.reserve (30000);
  tvecs_.reserve (30000);

  reset ();
  
  hasShifted_ = false;
  updateCumulativeCloud_ = false;
  volume_shifting_enabled_ = true;
  last_scan = false;
  cubeCenterPoint = Eigen::Vector3f(0.f, 0.f, 0.f);
  cubeReferencePoint = Eigen::Vector3f(1.5f, 1.5f, 1.5f);
  ignore_sample_ = false;
  
}

///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
pcl::gpu::KinfuTracker::setDepthIntrinsics (float fx, float fy, float cx, float cy)
{
  fx_ = fx;
  fy_ = fy;
  cx_ = (cx == -1) ? cols_/2-0.5f : cx;
  cy_ = (cy == -1) ? rows_/2-0.5f : cy;  
}

///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
pcl::gpu::KinfuTracker::setInitalCameraPose (const Eigen::Affine3f& pose)
{
  init_Rcam_ = pose.rotation ();
  init_tcam_ = pose.translation ();
  reset ();
}

///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
pcl::gpu::KinfuTracker::setDepthTruncationForICP (float max_icp_distance)
{
  max_icp_distance_ = max_icp_distance;
}

///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
pcl::gpu::KinfuTracker::setCameraMovementThreshold(float threshold)
{
  integration_metric_threshold_ = threshold;  
}

///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
pcl::gpu::KinfuTracker::setIcpCorespFilteringParams (float distThreshold, float sineOfAngle)
{
  distThres_  = distThreshold; //mm
  angleThres_ = sineOfAngle;
}

///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int
pcl::gpu::KinfuTracker::cols ()
{
  return (cols_);
}

///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int
pcl::gpu::KinfuTracker::rows ()
{
  return (rows_);
}

///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
pcl::gpu::KinfuTracker::reset()
{
  if (global_time_)
    cout << "Reset" << endl;

  global_time_ = 0;
  rmats_.clear ();
  tvecs_.clear ();

  rmats_.push_back (init_Rcam_);
  tvecs_.push_back (init_tcam_);
  

  tsdf_volume_->reset();
    
  if (color_volume_) // color integration mode is enabled
    color_volume_->reset();    
    
  ///Reset rolling buffer as well
  init_buffer(&buffer_, tsdf_volume_);  
  
  ///TODO: WE NEED TO RESTORE THE VALUES OF ALL THE STUFF WE HAVE CREATED: FLAGS, CLOUDS, ETC ETC. 
    
}

///TODO: return where to shift
bool 
pcl::gpu::KinfuTracker::isCameraWithinBounds(Eigen::Affine3f &camera_pose)
{
  bool result = true;
  Vector3f camera_translation = camera_pose.translation();
    
  //x
  if(camera_translation(0) < buffer_.shiftingThresholds_min_m.x)
    result = false;
  
  if(camera_translation(0) > buffer_.shiftingThresholds_max_m.x)
    result = false;
    
  //y
  if(camera_translation(1) < buffer_.shiftingThresholds_min_m.y)
    result = false;
  
  if(camera_translation(1) > buffer_.shiftingThresholds_max_m.y)
    result = false;
 
  return (result);  
}


void showMinMaxCalculations(tsdf_buffer* buffer, int shiftX, int shiftY, int shiftZ)
{
  
  int newX = buffer->origin_GRID.x + shiftX;
  int newY = buffer->origin_GRID.y + shiftY;
  int newZ = buffer->origin_GRID.z + shiftZ;
    
    int3 minBounds, maxBounds;
    
    //X
    if(newX >= 0)
    {
     minBounds.x = buffer->origin_GRID.x;
     maxBounds.x = newX;
    }
    else
    {
     minBounds.x = newX + VOLUME_X;
     maxBounds.x = buffer->origin_GRID.x + VOLUME_X;
    }
   
    //Y
    if(newY >= 0)
    {
     minBounds.y = buffer->origin_GRID.y;
     maxBounds.y = newY;
    }
    else
    {
     minBounds.y = newY + VOLUME_Y;
     maxBounds.y = buffer->origin_GRID.y + VOLUME_Y;
    }
    
    //Z
     minBounds.z = buffer->origin_GRID.z;
     maxBounds.z = shiftZ;
}
///////////////////////////////////////////////////////////////////////////////////////////////LOAD TSDF TO GPU


void 
pcl::gpu::KinfuTracker::convertTsdfVectors(pcl::PointCloud<pcl::PointXYZI> cloud, std::vector<int> &output)
{
	//std::cout << "\t\tConverting TSDF..." << std::flush;

	  const int DIVISOR = 32767;     // SHRT_MAX;

    ///For every point in the cloud
#pragma omp parallel for
 	
	for(int i = 0; i < (int) cloud.points.size(); ++i)
	{
	  int x = cloud.points[i].x;
	  int y = cloud.points[i].y;
	  int z = cloud.points[i].z;
	  
	  if(x > 0  && x < VOLUME_X && y > 0 && y < VOLUME_Y && z > 0 && z < VOLUME_Z)
	  {
	  ///Calculate the index to write
	  int dst_index = x + VOLUME_X * y + VOLUME_Y * VOLUME_X * z;
	        
	    short2& elem = *reinterpret_cast<short2*>(&output[dst_index]);
	    elem.x = static_cast<short>(cloud.points[i].intensity * DIVISOR);
	    elem.y = static_cast<short>(1);   
	  } 
  }
	//std::cout << ".Done!\n";
}


////////////////////////////////////////////////////////////////////////////////////////////////////////////////// LOAD EXISTING DATA TO TSDF : PUSH SLICE
void pcl::gpu::KinfuTracker::pushSlice(int offsetX, int offsetY, int offsetZ, PointCloud<PointXYZI>::Ptr existingCloud)
{
  std::cout << " I will push the existing data! " << std::endl;
  
  size_t gpu_array_size = existingCloud->points.size ();
  const pcl::PointXYZI *firstPointPtr = &(existingCloud->points[0]);
  
  pcl::gpu::DeviceArray<pcl::PointXYZI> cloud_gpu;
  cloud_gpu.upload(firstPointPtr,gpu_array_size);
  
  std::cout << "Existing slice CPU cloud size: " << existingCloud->points.size() << std::endl;
  std::cout << "Existing slice GPU array size: " << cloud_gpu.size() << std::endl;
  
  DeviceArray<float4>& cloud_cast = (DeviceArray<float4>&) cloud_gpu;
  volume().pushCloudAsSlice(cloud_cast);
  
}

/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

void pcl::gpu::KinfuTracker::extractSlice(int offsetX, int offsetY, int offsetZ, PointCloud<PointXYZI>::Ptr& existing_Slice, bool& has_previous_data)
{
  
  //ScopeTime time("\t[kinfu.cpp](extractSlice)");
  
  DeviceArray<PointXYZ> extracted;
  DeviceArray<float> intensities;
  
  int size;
        
    if(!last_scan)
      size = volume().fetchSliceAsCloud (*cloud_buffer_device_, cloud_buffer_device_intensities_, &buffer_, offsetX, offsetY, offsetZ);
    else
      size = volume().fetchSliceAsCloud (*cloud_buffer_device_, cloud_buffer_device_intensities_, &buffer_, VOLUME_X, VOLUME_Y, VOLUME_Z);
   
  
  extracted = DeviceArray<PointType> (cloud_buffer_device_->ptr (), size);
  intensities = DeviceArray<float> (cloud_buffer_device_intensities_.ptr(), size);
  
  PointCloud<PointXYZI>::Ptr current_slice_ (new PointCloud<PointXYZI>);
  PointCloud<PointXYZ>::Ptr current_slice_xyz (new PointCloud<PointXYZ>);
  PointCloud<PointIntensity>::Ptr current_slice_intensities (new PointCloud<PointIntensity>);

  
  {
    //ScopeTime time3("\t\t[kinfu.cpp](extractSlice)\t\t\t download tsdf");
    extracted.download (current_slice_xyz->points);  
  }
    
  current_slice_xyz->width = (int)current_slice_xyz->points.size ();
  current_slice_xyz->height = 1;
  
  std::vector<float , Eigen::aligned_allocator<float> > intensities_vector;
  
  {
    //ScopeTime time4("\t\t[kinfu.cpp](extractSlice)\t\t\t download intensities");
    intensities.download (intensities_vector);
  }
  
  
  {
   //ScopeTime time6("\t\t[kinfu.cpp](extractSlice)\t\t\t pushing back the points to acuumulated cloud"); 
  //concatenate XYZ and Intensities
  for(int i = 0 ; i < current_slice_xyz->points.size() ; ++i)
  {
    
    PointXYZI p;
    p.x = current_slice_xyz->points[i].x;
    p.y = current_slice_xyz->points[i].y;
    p.z = current_slice_xyz->points[i].z;
    p.intensity = intensities_vector[i];    
    current_slice_->points.push_back(p); /// here is where everything increases in time
  }
  //TODO replace this by pcl::concatenateFields (cloud_a, n_cloud_b, p_n_cloud_c);
  
  }
  current_slice_->width = (int)current_slice_->points.size ();
  current_slice_->height = 1;
    
  ///TRANSFORMING THE OBTAINED CLOUD IN CUBE COORDINATES TO ITS POSITION IN THE WORLD MODEL     
    Eigen::Affine3f global_cloud_transformation; 
    global_cloud_transformation.translation()[0] = buffer_.origin_GRID_absolute.x;
    global_cloud_transformation.translation()[1] = buffer_.origin_GRID_absolute.y;
    global_cloud_transformation.translation()[2] = buffer_.origin_GRID_absolute.z;
        
    global_cloud_transformation.linear() = rmats_[0];
    {
    //ScopeTime time5("\t\t[kinfu.cpp](extractSlice)\t\t\t transformation");
    //transform current slice into global coordinates
    transformPointCloud(*current_slice_, *current_slice_, global_cloud_transformation);
    }
  
  //get the bounds of the current slice
  pcl::PointXYZI ptmin, ptmax;
  pcl::getMinMax3D	(	*current_slice_, ptmin, ptmax);
  
  ///THE OBTAINED CLOUD IS ACCUMULATED IN OUR EXISTING WORLD MODEL
  //~ world_model_.addCloud(originX,
                        //~ originY,
                        //~ originZ,
                        //~ std::abs(offsetX),
                        //~ std::abs(offsetY),
                        //~ std::abs(offsetZ),
                        //~ current_slice_);
  
  //~ PointCloud<PointXYZI>::Ptr existing_Slice (new  PointCloud<PointXYZI>);
  
  if( world_model_.getExistingData(buffer_.origin_GRID_absolute.x, buffer_.origin_GRID_absolute.y, buffer_.origin_GRID_absolute.z,
                               offsetX, offsetY, offsetZ,
                               VOLUME_X, VOLUME_Y, VOLUME_Z, *existing_Slice) )
  {
    has_previous_data = true; 
  }                      
                      
  if(current_slice_->points.size() !=0)
    world_model_.addCloud(ptmin.x,
                        ptmin.y,
                        ptmin.z,
                        ptmax.x,
                        ptmax.y,
                        ptmax.z,
                        current_slice_);
  
  //~ *visualization_cloud_ptr_ += *current_slice_;
  visualization_cloud_ptr_ = world_model_.getWorld();
  
  std::cout << "After adding the slice, the accumulated pointcloud has " << visualization_cloud_ptr_->points.size() << "points" << std::endl;
  
  
  ///FRANCISCO: After we have saved the slice in the correct position, we update the global GRID origin. 
  buffer_.origin_GRID_absolute.x += offsetX;
  buffer_.origin_GRID_absolute.y += offsetY;
  buffer_.origin_GRID_absolute.z += offsetZ;
  
  ///THE ACCUMULATED CLOUD NEEDS TO BE RESIZED
  visualization_cloud_ptr_->width = (int)visualization_cloud_ptr_->points.size ();
  visualization_cloud_ptr_->height = 1;
}


void
pcl::gpu::KinfuTracker::performVolumeShifting(Eigen::Affine3f &camera_pose, pcl::PointXYZ projected_point)
{
  //ScopeTime time("[kinfu.cpp](performVolumeShifting)");
 
  ///get new metrics boundaries
  int offsetX, offsetY, offsetZ;
  
  compute_new_cube_position(&buffer_, camera_pose, projected_point, offsetX, offsetY, offsetZ);  
   
   
  PointCloud<PointXYZI>::Ptr previously_existing_Slice (new  PointCloud<PointXYZI>);
  bool has_previous_data = false;
     
  extractSlice(offsetX, offsetY, offsetZ, previously_existing_Slice, has_previous_data);
  
  pcl::device::clearTSDFSlice (tsdf_volume_->data(), &buffer_, offsetX, offsetY, offsetZ);
  
   pushSlice(buffer_.origin_GRID_absolute.x, buffer_.origin_GRID_absolute.y, buffer_.origin_GRID_absolute.z, previously_existing_Slice);
    
  ///UPDATE VOLUME ORIGINS
  shift_volume(&buffer_, tsdf_volume_, offsetX, offsetY, offsetZ);
  
  ///MARK THE SHIFT (set a bool to true)
  hasShifted_ = true;
  updateCumulativeCloud_ = true;

}


void
pcl::gpu::KinfuTracker::performVolumeShiftingTargetPointBased(pcl::PointXYZ target_point)
{
  //ScopeTime time("[kinfu.cpp](performVolumeShifting)");
 
  ///get new metrics boundaries
  int offsetX, offsetY, offsetZ;
  
  compute_new_cube_position(&buffer_, target_point, offsetX, offsetY, offsetZ);  
   
  PointCloud<PointXYZI>::Ptr previously_existing_Slice (new  PointCloud<PointXYZI>);
  bool has_previous_data = false; 
   
  extractSlice(offsetX, offsetY, offsetZ, previously_existing_Slice, has_previous_data);
  
  pcl::device::clearTSDFSlice (tsdf_volume_->data(), &buffer_, offsetX, offsetY, offsetZ);
  
  if(has_previous_data)
    pushSlice(buffer_.origin_GRID_absolute.x, buffer_.origin_GRID_absolute.y, buffer_.origin_GRID_absolute.z, previously_existing_Slice);

  ///UPDATE VOLUME ORIGINS
  shift_volume(&buffer_, tsdf_volume_, offsetX, offsetY, offsetZ);
  
  ///MARK THE SHIFT (set a bool to true)
  hasShifted_ = true;
  updateCumulativeCloud_ = true;

}


///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
pcl::gpu::KinfuTracker::allocateBufffers (int rows, int cols)
{    
  depths_curr_.resize (LEVELS);
  vmaps_g_curr_.resize (LEVELS);
  nmaps_g_curr_.resize (LEVELS);

  vmaps_g_prev_.resize (LEVELS);
  nmaps_g_prev_.resize (LEVELS);

  vmaps_curr_.resize (LEVELS);
  nmaps_curr_.resize (LEVELS);

  coresps_.resize (LEVELS);

  for (int i = 0; i < LEVELS; ++i)
  {
    int pyr_rows = rows >> i;
    int pyr_cols = cols >> i;

    depths_curr_[i].create (pyr_rows, pyr_cols);

    vmaps_g_curr_[i].create (pyr_rows*3, pyr_cols);
    nmaps_g_curr_[i].create (pyr_rows*3, pyr_cols);

    vmaps_g_prev_[i].create (pyr_rows*3, pyr_cols);
    nmaps_g_prev_[i].create (pyr_rows*3, pyr_cols);

    vmaps_curr_[i].create (pyr_rows*3, pyr_cols);
    nmaps_curr_[i].create (pyr_rows*3, pyr_cols);

    coresps_[i].create (pyr_rows, pyr_cols);
  }  
  depthRawScaled_.create (rows, cols);
  // see estimate tranform for the magic numbers
  gbuf_.create (27, 20*60);
  sumbuf_.create (27);
}

bool
pcl::gpu::KinfuTracker::operator() (const DepthMap& depth_raw)
{  

  //if(stop)
   //return (true);
   //std::cout << "global_time" << global_time_ << std::endl;
  
  /// BILATERAL FILTERING: start
  /// Input : depth_raw Raw depth map obtained from Kinect
  /// Output : vmaps_curr_[i] and nmaps_curr_[i]    Bilateral depth obtained from the Kinect. Should not know anything about local and global coo because it is AS SEEN by camera.
  device::Intr intr (fx_, fy_, cx_, cy_);
  {
    device::bilateralFilter (depth_raw, depths_curr_[0]);

	if (max_icp_distance_ > 0)
		device::truncateDepth(depths_curr_[0], max_icp_distance_);

    for (int i = 1; i < LEVELS; ++i)
      device::pyrDown (depths_curr_[i-1], depths_curr_[i]);

    for (int i = 0; i < LEVELS; ++i)
    {
      device::createVMap (intr(i), depths_curr_[i], vmaps_curr_[i]);
      computeNormalsEigen (vmaps_curr_[i], nmaps_curr_[i]);
    }
    pcl::device::sync ();
  }
  /// BILATERAL FILTERING: end
  
  ///FIRST INTEGRATION OF DATA : start
  /// Input : rmats_[0] and tvecs_[0] which comprise the initial camera pose
  /// Input : vmaps_curr_[i] and nmaps_curr_[i]    Bilateral depth obtained from the Kinect. Should not know anything about local and global coordinates because it is AS SEEN by camera.
  /// Output : vmaps_g_prev_[i], nmaps_g_prev_[i]  Vertex and Normal maps, after performing transformation with initial pose
  //can't perform more on first frame
  if (!hasShifted_ && global_time_ == 0)
  {
    //~ std::cout << std::endl << " ====================================== " << std::endl << "Iteration " << global_time_ << std::endl;
    
    Matrix3frm initial_cam_rot = rmats_[0]; //  [Ri|ti] - pos of camera, i.e.
    Matrix3frm initial_cam_rot_inv = initial_cam_rot.inverse ();
    Vector3f   initial_cam_trans = tvecs_[0]; //  transform from camera to global coo space for (i-1)th camera pose
    	
    Mat33&  device_initial_cam_rot = device_cast<Mat33> (initial_cam_rot);
    Mat33&  device_initial_cam_rot_inv = device_cast<Mat33> (initial_cam_rot_inv);
    float3& device_initial_cam_trans = device_cast<float3>(initial_cam_trans);
	 
    float3 device_volume_size = device_cast<const float3>(tsdf_volume_->getSize());

    device::integrateTsdfVolume(depth_raw, intr, device_volume_size, device_initial_cam_rot_inv, device_initial_cam_trans, tsdf_volume_->getTsdfTruncDist(), tsdf_volume_->data(), depthRawScaled_, &buffer_);

    ///transform maps from camera space to global space.
    for (int i = 0; i < LEVELS; ++i)
      device::tranformMaps (vmaps_curr_[i], nmaps_curr_[i], device_initial_cam_rot, device_initial_cam_trans, vmaps_g_prev_[i], nmaps_g_prev_[i]);

    ++global_time_;
    return (false);
  }
  ///FIRST INTEGRATION OF DATA : end

  ///////////////////////////////////////////////////////////////////////////////////////////
  // Iterative Closest Point
  
  ///GET PREVIOUS GLOBAL TRANSFORM
    //Previous global rotation
    Matrix3frm cam_rot_global_prev = rmats_[global_time_ - 1];            //[Ri|ti] - pos of camera, i.e.
    //Previous global translation
    Vector3f   cam_trans_global_prev = tvecs_[global_time_ - 1];          //tranfrom from camera to global coo space for (i-1)th camera pose
    //Previous global inverse rotation
    Matrix3frm cam_rot_global_prev_inv = cam_rot_global_prev.inverse ();  //Rprev.t();
  
  ///GET CURRENT GLOBAL TRANSFORM
    Matrix3frm cam_rot_global_curr = cam_rot_global_prev;                 //tranform to global coo for ith camera pose
    Vector3f   cam_trans_global_curr = cam_trans_global_prev;
 
  ///CONVERT TO DEVICE TYPES 
  
    ///LOCAL PREVIOUS TRANSFORM
    Mat33&  device_cam_rot_local_prev_inv = device_cast<Mat33> (cam_rot_global_prev_inv);
    //TODO Handle local rotations when cube will be rotated
    float3& device_cam_trans_local_prev_tmp = device_cast<float3> (cam_trans_global_prev);
    float3 device_cam_trans_local_prev;
        device_cam_trans_local_prev.x = device_cam_trans_local_prev_tmp.x - buffer_.origin_metric.x;
        device_cam_trans_local_prev.y = device_cam_trans_local_prev_tmp.y - buffer_.origin_metric.y;
        device_cam_trans_local_prev.z = device_cam_trans_local_prev_tmp.z - buffer_.origin_metric.z;
 
  ///ICP: start 
  {
    //ScopeTime time("icp-all");
    
    bool substractOrigin = true;
    for (int level_index = LEVELS-1; level_index>=0; --level_index)
    {
      int iter_num = icp_iterations_[level_index];

      //current maps
      MapArr& vmap_curr = vmaps_curr_[level_index];
      MapArr& nmap_curr = nmaps_curr_[level_index];   
      
      //previous maps
      MapArr& vmap_g_prev = vmaps_g_prev_[level_index];
      MapArr& nmap_g_prev = nmaps_g_prev_[level_index];
      
     ///FRANCISCO 
    /// PRE-PROCESSING: We are still determining if this step is needed
    ///                  We need to transform the maps from global into the local space.
    
    Mat33&  rotation_id = device_cast<Mat33> (rmats_[0]); // Identity Rotation Matrix. Because we only need translation
    float3 cube_origin = buffer_.origin_metric;
    cube_origin.x = -cube_origin.x;
    cube_origin.y = -cube_origin.y;
    cube_origin.z = -cube_origin.z;
    
    MapArr& vmap_temp = vmap_g_prev;
    MapArr& nmap_temp = nmap_g_prev;
    device::tranformMaps (vmap_temp, nmap_temp, rotation_id, cube_origin, vmap_g_prev, nmap_g_prev);

    ///PRE-PROCESSING
      for (int iter = 0; iter < iter_num; ++iter)
      {
        ///CONVERT TO DEVICE TYPES
          // CURRENT LOCAL TRANSFORM
            Mat33&  device_cam_rot_local_curr = device_cast<Mat33> (cam_rot_global_curr);/// We have not dealt with changes in rotations
            //TODO Handle local rotations when new cube will be rotated (to come) 
                      
            float3& device_cam_trans_local_curr_tmp = device_cast<float3> (cam_trans_global_curr);
            float3 device_cam_trans_local_curr; 
            
            {
              device_cam_trans_local_curr.x = device_cam_trans_local_curr_tmp.x - buffer_.origin_metric.x;
              device_cam_trans_local_curr.y = device_cam_trans_local_curr_tmp.y - buffer_.origin_metric.y;
              device_cam_trans_local_curr.z = device_cam_trans_local_curr_tmp.z - buffer_.origin_metric.z;
              substractOrigin = false;
            }        

        Eigen::Matrix<double, 6, 6, Eigen::RowMajor> A;
        Eigen::Matrix<double, 6, 1> b;
                
        ///LOCAL   
        estimateCombined (device_cam_rot_local_curr, device_cam_trans_local_curr, vmap_curr, nmap_curr, device_cam_rot_local_prev_inv, device_cam_trans_local_prev, intr (level_index), vmap_g_prev, nmap_g_prev, distThres_, angleThres_, gbuf_, sumbuf_, A.data (), b.data (), &buffer_, hasShifted_);
        
        //checking nullspace
        double det = A.determinant ();
        //std::cout <<  "determinant " << det << std::endl;

    

		//if (fabs (det) < 1e-15 || pcl_isnan (det)) ///ORIGINAL ONE
    if (fabs (det) < 1e11 || pcl_isnan (det))
        {
          if (pcl_isnan (det)) cout << "qnan" << endl;
          //std::cout << "/!\\ /!\\ /!\\ /!\\ /!\\ TRACKING LOST /!\\ /!\\ /!\\ /!\\ /!\\"  << std::endl;
          
          ///THIS IS WHERE WE NEED TO MAKE THE MAGIC of not losing
          ignore_sample_ = true;
          //stop = true;
          //return (true);
            
          //reset ();
          //return (false); 
        }

      if(!ignore_sample_)
      {
        Eigen::Matrix<float, 6, 1> result = A.llt ().solve (b).cast<float>();

        float alpha = result (0);
        float beta  = result (1);
        float gamma = result (2);

        Eigen::Matrix3f cam_rot_incremental = (Eigen::Matrix3f)AngleAxisf (gamma, Vector3f::UnitZ ()) * AngleAxisf (beta, Vector3f::UnitY ()) * AngleAxisf (alpha, Vector3f::UnitX ());
        Vector3f cam_trans_incremental = result.tail<3> ();

        //compose
        cam_trans_global_curr = cam_rot_incremental * cam_trans_global_curr + cam_trans_incremental;
        cam_rot_global_curr = cam_rot_incremental * cam_rot_global_curr;
      }
      //~ else
      //~ {
        //~ std::cout << std::endl << "/!\\ /!\\ /!\\ /!\\ /!\\ YOUR ARE LOST, PLEASE GO BACK TO THE POSITION SHOWN IN THE 3D VIEW /!\\ /!\\ /!\\ /!\\ /!\\" << std::endl;
      //~ }
        
        
      }
    }
  }
  /// ICP : end
    
  hasShifted_ = false;

  //save tranform
  if(!ignore_sample_)
  {
    rmats_.push_back (cam_rot_global_curr); 
    tvecs_.push_back (cam_trans_global_curr);
  }

  
  /// TEST FOR SHIFT : start
  /// Here we check if the camera is within the boundaries of the cube. If the camera is leaving the cube, then the appropriate shifting procedure will happen inside the call to isCameraWithinBounds().
  ///If we shifted, hasShifted_ is set to true and cube origins (metric and indices) are updated! So we get them below:
   
  if (volume_shifting_enabled_)
  {
    
    Eigen::Affine3f cam_pose = getCameraPose(global_time_ - 1);
    //ScopeTime time("[kinfu.cpp](operator) Checking for shift");
    ///ROTATION AND TRANSLATION
    //isTargetPointWithinBounds(cam_pose);
    
    ///TRANSLATION ONLY
    //if( !isCameraWithinBounds(cam_pose) )
    if( !isTargetPointWithinBounds(cam_pose) )
    {
      //std::cout << "\n ========================================================  WE NEED TO SHIFT " << std::endl;
      //pcl::PointXYZ point;
      //performVolumeShifting(cam_pose, point);
      performVolumeShiftingTargetPointBased(targetPoint);
    }
    
  
  }  
  /// TEST FOR SHIFT : end

    /// get NEW local rotation 
    Matrix3frm cam_rot_local_curr_inv = cam_rot_global_curr.inverse ();
    Mat33&  device_cam_rot_local_curr_inv = device_cast<Mat33> (cam_rot_local_curr_inv);
    Mat33&  device_cam_rot_local_curr = device_cast<Mat33> (cam_rot_global_curr); 
    //TODO Handle local rotations when new cube will be rotated (to come)
  
    ///get NEW local translation
    float3& device_cam_trans_local_curr_tmp = device_cast<float3> (cam_trans_global_curr);
    float3 device_cam_trans_local_curr;
    device_cam_trans_local_curr.x = device_cam_trans_local_curr_tmp.x - buffer_.origin_metric.x;
    device_cam_trans_local_curr.y = device_cam_trans_local_curr_tmp.y - buffer_.origin_metric.y;
    device_cam_trans_local_curr.z = device_cam_trans_local_curr_tmp.z - buffer_.origin_metric.z;

  ///////////////////////////////////////////////////////////////////////////////////////////
  // Integration check - We do not integrate volume if camera does not move.  
  float rnorm = rodrigues2(cam_rot_global_curr.inverse() * cam_rot_global_prev).norm();
  float tnorm = (cam_trans_global_curr - cam_trans_global_prev).norm();  
  const float alpha = 1.f;
  bool integrate = (rnorm + alpha * tnorm)/2 >= integration_metric_threshold_;  

  ///////////////////////////////////////////////////////////////////////////////////////////
  /// INTEGRATION : start
  float3 device_volume_size = device_cast<const float3> (tsdf_volume_->getSize());
  
  if (!ignore_sample_)
  {
    if (integrate || hasShifted_)
    {
      integrateTsdfVolume (depth_raw, intr, device_volume_size, device_cam_rot_local_curr_inv, device_cam_trans_local_curr, tsdf_volume_->getTsdfTruncDist(), tsdf_volume_->data(), depthRawScaled_, &buffer_);
    }
  }
  /// INTEGRATION : end


  /// RAYCASTING : start
     {
    //ScopeTime time("ray-cast-all");      
    raycast (intr, device_cam_rot_local_curr, device_cam_trans_local_curr, tsdf_volume_->getTsdfTruncDist(), device_volume_size, tsdf_volume_->data(), vmaps_g_prev_[0], nmaps_g_prev_[0], &buffer_);
      
    /// POST-PROCESSING: We are still determining if this step is needed
    ///                  We need to transform the maps into the global space.
    
    Mat33&  rotation_id = device_cast<Mat33> (rmats_[0]); /// Identity Rotation Matrix. Because we only need translation
    float3 cube_origin = buffer_.origin_metric;
    
    MapArr& vmap_temp = vmaps_g_prev_[0];
    MapArr& nmap_temp = nmaps_g_prev_[0];
    
    device::tranformMaps (vmap_temp, nmap_temp, rotation_id, cube_origin, vmaps_g_prev_[0], nmaps_g_prev_[0]);
    
    ///Update maps pyramids: Definitely needed. Do not comment out. Part of original raycasting.
    for (int i = 1; i < LEVELS; ++i)
    {
      resizeVMap (vmaps_g_prev_[i-1], vmaps_g_prev_[i]);
      resizeNMap (nmaps_g_prev_[i-1], nmaps_g_prev_[i]);
    }
    pcl::device::sync ();
  }
  /// RAYCASTING : end 
  
  
  if (!ignore_sample_)
  {
    ++global_time_;
  }
  
  ignore_sample_ = false;
  return (true);
}


bool
pcl::gpu::KinfuTracker::isTargetPointWithinBounds(Eigen::Affine3f cam_pose)
{
  
  bool result = true;
  
  targetPoint.x = 0.0f;//cam_pose.translation()[0];
  targetPoint.y = 0.0f;//cam_pose.translation()[1];
  targetPoint.z = 0.0f;//cam_pose.translation()[2];
  
  //~ std::cout << std::endl << "Cube origin: " << buffer_.origin_metric.x << " " << buffer_.origin_metric.y << " " << buffer_.origin_metric.z << std::endl;   
  //~ std::cout << "Camera pose: " << cam_pose.translation()[0] << " " << cam_pose.translation()[1] << " " << cam_pose.translation()[2] << std::endl;
  //~ std::cout << "Camera rotation" << cam_pose.linear() << std::endl;
  
  targetPoint.z += 0.6f * VOLUME_SIZE; // place the point at camera position + (0.6 * CubeSize) on Z
  
  //~ std::cout << "Target point pose in camera space: " << targetPoint.x << " " << targetPoint.y << " " << targetPoint.z << std::endl;

  targetPoint = pcl::transformPoint(targetPoint, cam_pose/*.inverse()*/);
  
  //~ std::cout << "Target point pose in global space: " << targetPoint.x << " " << targetPoint.y << " " << targetPoint.z << std::endl;
  
  pcl::PointXYZ center_cube;
  center_cube.x = buffer_.origin_metric.x + VOLUME_SIZE/2.0;
  center_cube.y = buffer_.origin_metric.y + VOLUME_SIZE/2.0;
  center_cube.z = buffer_.origin_metric.z + VOLUME_SIZE/2.0;
  
  float dist = pcl::euclideanDistance(targetPoint, center_cube);
  
  
  //~ std::cout << "distance target-cube_center: " << dist << std::endl;
  
  if( dist > VOLUME_SIZE/2.2)
    result = false;
  
  return (result);
 
}

///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
Eigen::Affine3f
pcl::gpu::KinfuTracker::getCameraPose (int time) const
{
  if (time > (int)rmats_.size () || time < 0)
    time = rmats_.size () - 1;

  Eigen::Affine3f aff;
  aff.linear () = rmats_[time];
  aff.translation () = tvecs_[time];
  return (aff);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////

size_t
pcl::gpu::KinfuTracker::getNumberOfPoses () const
{
  return rmats_.size();
}

///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//RAPHAEL 
Eigen::Affine3f
pcl::gpu::KinfuTracker::getCubeTransform () const
{

  Eigen::Affine3f aff;
  aff.linear () = rmats_[0];
 
  aff.translation ()[0] = buffer_.origin_metric.x;
  aff.translation ()[1] = buffer_.origin_metric.y;
  aff.translation ()[2] = buffer_.origin_metric.z;
  return (aff);
}

tsdf_buffer* 
pcl::gpu::KinfuTracker::buffer()  
{ 
  return &buffer_; 
}

///////////////////////////////////////////////////////////////////////////////////////////////////////////////////

const TsdfVolume& 
pcl::gpu::KinfuTracker::volume() const 
{ 
  return *tsdf_volume_; 
}

///////////////////////////////////////////////////////////////////////////////////////////////////////////////////

TsdfVolume& 
pcl::gpu::KinfuTracker::volume()
{
  return *tsdf_volume_;
}

///////////////////////////////////////////////////////////////////////////////////////////////////////////////////

const ColorVolume& 
pcl::gpu::KinfuTracker::colorVolume() const
{
  return *color_volume_;
}

///////////////////////////////////////////////////////////////////////////////////////////////////////////////////

ColorVolume& 
pcl::gpu::KinfuTracker::colorVolume()
{
  return *color_volume_;
}
     
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
pcl::gpu::KinfuTracker::getImage (View& view) const
{
  Eigen::Vector3f light_source_pose = tsdf_volume_->getSize() * (-3.f);

  device::LightSource light;
  light.number = 1;
  light.pos[0] = device_cast<const float3>(light_source_pose);

  view.create (rows_, cols_);
  generateImage (vmaps_g_prev_[0], nmaps_g_prev_[0], light, view);
}

///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
pcl::gpu::KinfuTracker::getLastFrameCloud (DeviceArray2D<PointType>& cloud) const
{
  cloud.create (rows_, cols_);
  DeviceArray2D<float4>& c = (DeviceArray2D<float4>&)cloud;
  device::convert (vmaps_g_prev_[0], c);
}

///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
pcl::gpu::KinfuTracker::getLastFrameNormals (DeviceArray2D<NormalType>& normals) const
{
  normals.create (rows_, cols_);
  DeviceArray2D<float8>& n = (DeviceArray2D<float8>&)normals;
  device::convert (nmaps_g_prev_[0], n);
}

///////////////////////////////////////////////////////////////////////////////////////////////////////////////////

void
pcl::gpu::KinfuTracker::initColorIntegration(int max_weight)
{     
  color_volume_ = pcl::gpu::ColorVolume::Ptr( new ColorVolume(*tsdf_volume_, max_weight) );  
}

///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool 
pcl::gpu::KinfuTracker::operator() (const DepthMap& depth, const View& colors)
{ 
  bool res = (*this)(depth);

  if (res && color_volume_)
  {
    const float3 device_volume_size = device_cast<const float3> (tsdf_volume_->getSize());
    device::Intr intr(fx_, fy_, cx_, cy_);

    Matrix3frm R_inv = rmats_.back().inverse();
    Vector3f   t     = tvecs_.back();
    
    Mat33&  device_Rcurr_inv = device_cast<Mat33> (R_inv);
    float3& device_tcurr = device_cast<float3> (t);
    
    device::updateColorVolume(intr, tsdf_volume_->getTsdfTruncDist(), device_Rcurr_inv, device_tcurr, vmaps_g_prev_[0], 
        colors, device_volume_size, color_volume_->data(), color_volume_->getMaxWeight());
  }

  return res;
}

///////////////////////////////////////////////////////////////////////////////////////////////////////////////////

namespace pcl
{
  namespace gpu
  {
    PCL_EXPORTS void 
    paint3DView(const KinfuTracker::View& rgb24, KinfuTracker::View& view, float colors_weight = 0.5f)
    {
      device::paint3DView(rgb24, view, colors_weight);
    }

    PCL_EXPORTS void
    mergePointNormal(const DeviceArray<PointXYZ>& cloud, const DeviceArray<Normal>& normals, DeviceArray<PointNormal>& output)
    {
      const size_t size = min(cloud.size(), normals.size());
      output.create(size);

      const DeviceArray<float4>& c = (const DeviceArray<float4>&)cloud;
      const DeviceArray<float8>& n = (const DeviceArray<float8>&)normals;
      const DeviceArray<float12>& o = (const DeviceArray<float12>&)output;
      device::mergePointNormal(c, n, o);           
    }

    Eigen::Vector3f rodrigues2(const Eigen::Matrix3f& matrix)
    {
      Eigen::JacobiSVD<Eigen::Matrix3f> svd(matrix, Eigen::ComputeFullV | Eigen::ComputeFullU);    
      Eigen::Matrix3f R = svd.matrixU() * svd.matrixV().transpose();

      double rx = R(2, 1) - R(1, 2);
      double ry = R(0, 2) - R(2, 0);
      double rz = R(1, 0) - R(0, 1);

      double s = sqrt((rx*rx + ry*ry + rz*rz)*0.25);
      double c = (R.trace() - 1) * 0.5;
      c = c > 1. ? 1. : c < -1. ? -1. : c;

      double theta = acos(c);

      if( s < 1e-5 )
      {
        double t;

        if( c > 0 )
          rx = ry = rz = 0;
        else
        {
          t = (R(0, 0) + 1)*0.5;
          rx = sqrt( std::max(t, 0.0) );
          t = (R(1, 1) + 1)*0.5;
          ry = sqrt( std::max(t, 0.0) ) * (R(0, 1) < 0 ? -1.0 : 1.0);
          t = (R(2, 2) + 1)*0.5;
          rz = sqrt( std::max(t, 0.0) ) * (R(0, 2) < 0 ? -1.0 : 1.0);

          if( fabs(rx) < fabs(ry) && fabs(rx) < fabs(rz) && (R(1, 2) > 0) != (ry*rz > 0) )
            rz = -rz;
          theta /= sqrt(rx*rx + ry*ry + rz*rz);
          rx *= theta;
          ry *= theta;
          rz *= theta;
        }
      }
      else
      {
        double vth = 1/(2*s);
        vth *= theta;
        rx *= vth; ry *= vth; rz *= vth;
      }
      return Eigen::Vector3d(rx, ry, rz).cast<float>();
    }
  }
}
