#include "aux_functions_def.hpp"
#include "aux_functions_impl.hpp"

#include "kmeans_def.hpp"
#include "kmeans_impl.hpp"

#include "kkmeans_def.hpp"
#include "kkmeans_impl.hpp"

#include "opt_kkmeans_def.hpp" // Hoping this is faster that previous version
#include "opt_kkmeans_impl.hpp"



#include "Riemann_Kernel_def.hpp"
#include "Riemann_Kernel_impl.hpp"

#include "dct_def2.hpp" 
#include "dct_impl2.hpp"

inline	
clustering_RM::clustering_RM(  std::string in_video_path,  field<std::string> in_frames_list, uword in_ro, uword in_co, std::string in_tosavein,  bool in_isDCT )
:frames_list(in_frames_list), video_path( in_video_path ) , ro(in_ro) , co(in_co), tosavein (in_tosavein), isDCT(in_isDCT)
{
  
  precalculate_features();
  
  
}


inline	
clustering_RM::clustering_RM(std::string load_ker,std::string save_part, std::string in_GT, int summ_percentage, std::string in_video_path, field<std::string> in_frames_list )
:frames_list(in_frames_list), video_path( in_video_path ), GT (in_GT)
{
  
  
  mat stein_ker;
  stein_ker.load(load_ker);
  int max_iter = 100;
  
  int Ncent = stein_ker.n_cols*summ_percentage/100;
  //int Ncent =10;
  run_kkmeans(stein_ker, max_iter,  Ncent, save_part) ;
  //run_kkmeans(mat K, uword max_iter, uword Ncent, std::string part_name, std::string GT)
}






inline	
void
clustering_RM::precalculate_features()
{
  nf = frames_list.n_rows;
  double ni = 5; // # of consecutive images to be used to calculate the point in the manifold
  cout << "# images to calculate a covariance matrix (Riemmannian Manifold): " << ni << endl;
  cout << "# of frames " << nf << endl;
  double nDx = floor(nf/ni); // # of Descriptors ( Covariance matrices or vectors of means)
  //cout << "# of covariance matrices is " << n_covMx << endl;
  
  field<mat> Cov_Features;
  field<mat> Mean_segment;
  // 4 regions
  int num_reg = 4;
  isBlock = true;
  double wx = co/num_reg;
  double hy = ro/num_reg;
  double n_regions = num_reg*num_reg;
  //Cov_Features.set_size(nDx-1,num_reg*num_reg); // Covariance matrices for all the segments. 1 segment = 1 second = 10 frames in UCSD dataset
  Mean_segment.set_size(nDx-1); // As the first and last frame are not used
  mat mean_subframe;
  uword pos = 0;
  uword prevIdx,currIdx,nextIdx;
  //idx_frames.zeros(ni+1);
  
  
  //Features:
  double rnf = nDx*ni; //
  double t = 1;
  int segmAcc = 0;
  int segm = 0;
  mean_subframe.zeros( 15 , n_regions);

  //cv::Rect rec;
  
  for (uword f = 1; f <  rnf - 4; ++f)//  Should be nf, but I want this to be multiple of ni.
  {
    int reg = 0;
    
    prevIdx = f - 1;
    currIdx = f;
    nextIdx = f + 1;
    
    load_frames(prevIdx,currIdx,nextIdx);
    //cout << "Frame= " << f << endl;
    
    for (uword y = 0; y<num_reg; y++){
      int ini_y = hy*y;
      for (uword x = 0; x<num_reg; x++){
	int ini_x = wx*x;
	//cout << ini_y << " " << ini_x << endl;
	//cout << ini_x << " " << ini_y << " " << wx << " "<< hy <<  endl;
	mean_subframe(span::all,reg) += creating_RiemmannPoint_temporal(cv::Rect(ini_x, ini_y, wx, hy)); //Using spatio-temporal features
	reg++;
	}
      }
    
    segmAcc++;
    if (segmAcc == ni){
      cout << "segm: " << segm<< endl;
      mean_subframe = mean_subframe/ni;
      Mean_segment(segm) = mean_subframe;
      
      mean_subframe.zeros( 15 , n_regions);
      segm++;
      segmAcc = 0;
    }
    t++;
  }
  Mean_segment.save(tosavein);
}


inline
void
clustering_RM::load_frames(uword prevIdx, uword  currIdx, uword  nextIdx)
{
  std::stringstream tmp_name;
  
  tmp_name << video_path << frames_list(prevIdx) ; 
  //cout << "previous " << tmp_name.str() << endl;
  prevImg = cv::imread(tmp_name.str(), CV_LOAD_IMAGE_GRAYSCALE);
  
  tmp_name.str(std::string());
  tmp_name << video_path << frames_list(currIdx); 
  //cout << "current " << tmp_name.str() << endl;
  currentImg = cv::imread(tmp_name.str(), CV_LOAD_IMAGE_GRAYSCALE);
  
  
  tmp_name.str(std::string());
  tmp_name << video_path << frames_list(nextIdx);
  //cout << "next " << tmp_name.str() << endl;
  nextImg = cv::imread(tmp_name.str(), CV_LOAD_IMAGE_GRAYSCALE);
}

//feat_segment.zeros(15,l*(n_frames)); //see Andres's paper (WACV)
inline	
vec
clustering_RM::creating_RiemmannPoint_temporal(cv::Rect rec)
{
  cv::Mat bl_prevImg, bl_currentImg, bl_nextImg;
  
  if (isBlock)
  {
    bl_prevImg = prevImg(rec);
    bl_currentImg = currentImg(rec);
    bl_nextImg = nextImg(rec);
    //cv::namedWindow( "ROI", CV_WINDOW_AUTOSIZE );// Create a window for display.
    //cv::imshow( "ROI", bl_currentImg );    
    //cv::namedWindow( "Original", CV_WINDOW_AUTOSIZE );// Create a window for display.
    //cv::imshow( "Original", currentImg );    
    //cv::waitKey();
    //getchar();
    
  }
  else
  {
    bl_prevImg = prevImg;
    bl_currentImg = currentImg;
    bl_nextImg = nextImg;
  }
  
  vec mean_current_frame;
  mean_current_frame.zeros(15);
  
  //uword n_frames = idx_frames.n_elem;
  
  cv::Mat flow, prevFlow;
  cv::Size winSize(5, 5);
  
  float u, v;
  double l  = co*ro;
  double num_vec = 0;
  
  
  cv::calcOpticalFlowFarneback(bl_currentImg, bl_nextImg, flow, 0.5,  3, 15, 3, 5, 1.2, 0); 
  cv::calcOpticalFlowFarneback(bl_prevImg, bl_currentImg, prevFlow, 0.5,  3, 15, 3, 5, 1.2, 0); 
  
  
  cv::Mat ixMat, iyMat, ixxMat, iyyMat;
  cv::Sobel(currentImg, ixMat, CV_32F, 1, 0, 1);
  cv::Sobel(currentImg, iyMat, CV_32F, 0, 1, 1);
  cv::Sobel(currentImg, ixxMat, CV_32F, 2, 0, 1);
  cv::Sobel(currentImg, iyyMat, CV_32F, 0, 2, 1);
  
  uword col = bl_currentImg.cols;
  uword row = bl_currentImg.rows;
  
  
  //cout << bl_currentImg.rows << endl;
  //cout << bl_currentImg.cols << endl;
  
  //cout << "Ready to get features" << endl;
  
  for (uword x = 0 ; x < col ; ++x ){
    for (uword y = 0 ; y < row ; ++y ) {
      
      vec features_one_pixel(15); 
      u = flow.at<cv::Vec2f>(y, x)[0];
      v = flow.at<cv::Vec2f>(y, x)[1];
      
      
      
      // x grad
      //cout << " x y grad" << endl;
      float ix = ixMat.at<float>(y, x);
      //cout << " y grad" << endl;
      float iy = iyMat.at<float>(y, x);
      
      
      // grad direction &  grad magnitude
      //cout << "grad direction &  grad magnitude" << endl;
      float gd = std::atan2(std::abs(iy), std::abs(ix));
      float gm = std::sqrt(ix * ix + iy * iy);
      
      // x second grad
      //cout << "x y  second grad " << endl;
      float ixx = ixxMat.at<float>(y, x);
      // y second grad
      float iyy = iyyMat.at<float>(y, x);
      
      
      //du/dt
      float ut = u - prevFlow.at<cv::Vec2f>(y, x)[0];
      // dv/dt
      float vt = v - prevFlow.at<cv::Vec2f>(y, x)[1];
      
      //// divergence &  vorticity
      float ux = u - flow.at<cv::Vec2f>(y, x - 1)[0];
      float uy = u - flow.at<cv::Vec2f>(y - 1, x)[0];
      float vx = v - flow.at<cv::Vec2f>(y, x - 1)[1];
      float vy = v - flow.at<cv::Vec2f>(y - 1, x)[1];
      
      
      
      features_one_pixel  << x << y << t << abs(ix) << abs(iy) << abs(ixx) 
      << abs(iyy) << gm << gd <<  u << v << abs(ut) 
      << abs(ut) << (ux - vy)  << (vx - uy);
      
      mean_current_frame += features_one_pixel;
      num_vec++;
      //cout << "end features" << endl;
      
      //stats(features_one_pixel);
    }
  }
  
  
  //cout <<  "num_vec= " << num_vec << endl;
  
  return mean_current_frame/num_vec;
}



inline	
mat
clustering_RM::creating_RiemmannPoint_dct(field<std::string>  name_frames)
{
  uword n_frames = name_frames.n_rows;
  cv::Mat cvImage;
  double N = 8 ;
  dct2d dct(N);
  mat coef_one_frame;
  //mat F = zeros(vec_size,n_frames); //vec_size is an input
  
  double jump = 2;
  double lw = floor((co - N)/jump + 1);
  double lh = floor((ro - N)/jump + 1);
  double l  = lw*lh;
  
  mat coef_segment ; // coef for 10 frames
  coef_segment.zeros(15,l*n_frames);
  
  
  for (uword n = 0; n < n_frames;  ++n)
  {
    //cout << name_frames(n,0) << endl;
    std::stringstream tmp_name;
    
    tmp_name << video_path << name_frames(n,0); 
    //cout << tmp_name.str() << endl;
    cvImage = cv::imread(tmp_name.str(), CV_LOAD_IMAGE_GRAYSCALE);
    
    /*cv::namedWindow("Image", CV_WINDOW_AUTOSIZE );
     *    cv::imshow("Image", cvImage);
     *    cv::waitKey(2);*/
    
    aux_functions aux_fx;
    mat frame;
    frame = aux_fx.convert2Arma_nor(cvImage); // hacer dentro del otro
    coef_one_frame = dct.get_coeff_overlap_6(frame); // Jumping 2 pixels & 15 coefficients
    //cout << "Rows: " << coef_one_frame.n_rows << "- Cols: " <<coef_one_frame.n_cols << endl;
    
    uword ci = l*n;
    uword  cf = (l*n+l) - 1;
    coef_segment.submat(span::all , span(ci, cf)) = coef_one_frame;
    
  }
  //getchar();
  mat Riemmannian_point = cov(coef_segment.t());
  //cout << "Coef rows: " << coef_segment.n_rows << " - Coef cols: " << coef_segment.n_cols << endl;;
  return Riemmannian_point;
}

//save_ker: to load the Kernel
//save_part: to save the partitions--Maybe I don't need it any more
//GT: load the indices of the Ground Truth
inline	
void
clustering_RM::run_kkmeans(mat K, uword max_iter, uword Ncent, std::string part_name)
{ 
  
  k_kmeans kkm(K, Ncent);
  kkm.calc(max_iter);
  
  vec gt_ind;
  gt_ind.load(GT);
  
  /// Selecting one segment per cluster
  double  matching_cluster = 0;
  uvec nearpoints = kkm.near_points();
  //nearpoints.t().print("summary 1");
  for (uword i = 0; i < nearpoints.n_elem; ++i )
  {
    uword Rn = nearpoints(i);    
    //cout << "Gn= " << Gn << endl;
    //uvec q1 = find( gt_ind == Gn );
    double check  = gt_ind (Rn);
    
    if (check==1)
    {
      matching_cluster++;
    }
  }
  
  cout << "My summary selecting one point per cluster has: " << nearpoints.n_elem << " Grassmann Points. " << matching_cluster << " match with GT" << endl;
  
  cout << "Showing summary for 'Selecting one segment per cluster' " << endl;
  //showing_summary(nearpoints);
  //getchar();
  
  
  
  /// Selecting all points in smaller clusters
  
  //cout << "Nov 24" << endl;
  uvec rare_event = kkm.small_clusters();
  //rare_event.t().print("rare_points");
  double  matching_clusterII = 0;
  
  
  for (uword i = 0; i < rare_event .n_elem; ++i )
  {
    uword Rn = rare_event (i);    
    //cout << "Gn= " << Gn << endl;
    //uvec q1 = find( gt_ind == Gn );
    double check  = gt_ind (Rn);
    if (check==1)
    {
      matching_clusterII++;
      
    }
  }
  
  cout << "My summary selecting all points in smaller clusters has: " << rare_event.n_elem << " Points. " << matching_clusterII << " match with GT" << endl;
  
  cout << "Showing summary for 'Selecting all points in smaller clusters' " << endl;
  //showing_summary(rare_event);
  getchar();
  
  
  cout << "Showing summary for 'Random Selection'" << endl;
  
  
  double nf = frames_list.n_rows;
  double n_covMx = floor(nf/10);
  
  
  aux_functions aux_fx;
  uvec fake_summ = aux_fx.rand_vector(Ncent, 1, n_covMx); // Initial centroids
  
  
  
  uvec sort_fake_summ =  sort(fake_summ);
  sort_fake_summ.print("sort_fake_summ:");
  showing_summary(sort(fake_summ));
  
  
  
  
  
  
}


inline	
void
clustering_RM::showing_summary(uvec vec_summ)
{
  ///Showing summary
  cv::namedWindow("Image", CV_WINDOW_AUTOSIZE );
  cv::Mat show_frame;
  
  for (uword i = 0; i < vec_summ.n_elem; ++i )
  {
    double ini_frame = ( vec_summ(i)  -1 + 1)*10 + 1; // I add 1 as vec_summ starts in zero
    //cout << "ini_frame : " << ini_frame << endl;
    //cout << "vec_summ(i) " << vec_summ(i) + 1 << endl;
    
    for (int j = 0; j < 10; ++j )
    {
      std::stringstream tmp_name;
      tmp_name << video_path << "/"<<frames_list(ini_frame + j - 1, 0);
      
      //cout << tmp_name.str() << endl;
      
      show_frame = cv::imread(tmp_name.str(), CV_LOAD_IMAGE_GRAYSCALE);
      resize(show_frame, show_frame, cv::Size(240,160 ) );
      
      cv::imshow("Image", show_frame);
      cv::moveWindow("Image", 10, 50);
      cv::waitKey(50);
    }
  }
}





