#include "log_exp_def.hpp"
#include "log_exp_impl.hpp"


//Features as per Andres' WACV paper

inline
opt_feat::opt_feat(const std::string in_path, 
		   const std::string in_actionNames,  
		   const std::string in_feat_path,
		   const int in_col, 
		   const int in_row)
:path(in_path), actionNames(in_actionNames), feat_path(in_feat_path), col(in_col), row(in_row), n_samples_tr(17), n_samples_te(8)
{
  THRESH = 0.000001;
}


inline
void
opt_feat::training()
{
  calc_features(true);
  saving(true);
}

inline
void
opt_feat::testing(){
  
  log_exp log_exp1;
  int acc = 0; //accuracy
  
  
  //******************************************************
  //Loading Training Set and precalculating the logarithm of all training covariance descriptors
  field<mat> features_training;
  std::stringstream tmp_train;
  tmp_train << feat_path << "train" << "/"<< "all_actions";
  cout << "Loading in" << tmp_train.str()<< endl;
  features_training.load(tmp_train.str());
  int n_train = features_training.n_rows;
  
  mat cov_desc;
  
  field<mat> feat_log_training; // Precalculating the logarithm of all training covariance descriptors
  feat_log_training.set_size(n_train,2);
  
  for (uword tr = 0; tr < n_train; ++tr)
  { 
    cov_desc = features_training(tr,0);
    feat_log_training(tr,0) = log_exp1.log_matrix(cov_desc);
    feat_log_training(tr,1) = features_training(tr,1); //Copying the labels.
  }
  //******************************************************
  
  
  rowvec dist;
  dist.set_size(n_train);
  mat log_covMte;
  mat log_covMtr;
  
  
  std::string type;
  int num_samples;
  
  type = "test";    
  num_samples = n_samples_te;
  
  ///Testing con training
  //type = "train";    
  //num_samples = n_samples_tr;
  
  
  
  std::stringstream tmp_ss;
  tmp_ss << path << actionNames;
  actions.load(tmp_ss.str());
  //actions.print("All actions");
  
  int n_test = actions.n_rows*num_samples;
  
  
  for (uword act = 0 ; act < actions.n_rows; ++act) {
    
    std::stringstream tmp_ss2;
    tmp_ss2 << path << actions(act)<<"/" <<type << "/"<< type <<"_list.txt";
    //cout << tmp_ss2.str()<< endl;
    //getchar();
    videos.load(tmp_ss2.str());
    //videos.print("All videos");
    
    
    for (uword vi = 0; vi <videos.n_rows; ++vi ){ 
      
      // Para cada video hay que inicializar
      covs.clear();
      lab_covs.clear();
      num_covs = 0;
      
      //cout << "Size: " << covs.size() << endl;
      std::stringstream tmp_ss3;
      tmp_ss3 << path << actions(act)<<"/" << type << "/"<<  videos(vi);
      
      
      feature_video(tmp_ss3.str(), act);
      vec count = zeros<vec>(actions.n_rows);
      
      for (uword i=0; i<num_covs; ++i) // Cada de una estas covarianzas la voy a clasificar
      {
	//cout << "num_covs= " << i << endl;
	cov_desc = covs.at(i);
	log_covMte = log_exp1.log_matrix(cov_desc);  
	dist.zeros();
	
	for (uword tr=0; tr<n_train; ++tr)
	{
	  log_covMtr = feat_log_training(tr,0);
	  dist (tr) = norm( log_covMte - log_covMtr ,"fro");
	  
	}
	
	uword  index;
	double min_val = dist.min(index);
	//cout << "index = " << index << ". Label is: " << feat_log_training(index,1) << endl;
	
	uword  est_class = conv_to<uword>::from(feat_log_training(index,1));
	//cout << "index = " << index << ". Label is: " << est_class << endl;
	count(est_class)++;
	//count.t().print("count:");
      }
      uword  index_video;
      double max_val = count.max(index_video);
      cout << "This video is " << actions(act) << " and was classified as class: " << actions(index_video ) << endl;
      
      if (index_video == act)
      {
      acc++;
      }
    }
  }
    cout << "Performance: " << acc*100/n_test << " %" << endl;
}



inline
void // Modificado June 30th usando mas decsriptors por video.
opt_feat::calc_features(bool isTraining) // Este solo sirve para entreamiento, no para validacion. 
{
  std::string type;
  int num_samples;
  
  covs.clear();
  lab_covs.clear();
  
  num_covs = 0;
  
  if (isTraining)
  {
    type = "train";    
    num_samples = n_samples_tr;
  }
  else
  {
    type = "test";    
    num_samples = n_samples_te;
  }
  
  
  std::stringstream tmp_ss;
  tmp_ss << path << actionNames;
  actions.load(tmp_ss.str());
  //actions.print("All actions");
  
  
  
  for (uword act = 0 ; act < actions.n_rows; ++act) {
    
    std::stringstream tmp_ss2;
    tmp_ss2 << path << actions(act)<<"/" <<type << "/"<< type <<"_list.txt";
    //cout << tmp_ss2.str()<< endl;
    //getchar();
    videos.load(tmp_ss2.str());
    //videos.print("All videos");
    
    
    for (uword vi = 0; vi <videos.n_rows; ++vi ){ 
      std::stringstream tmp_ss3;
      tmp_ss3 << path << actions(act)<<"/" << type << "/"<<  videos(vi);
      //cout << tmp_ss3.str()<< endl;
      
      
      //features(count,0) = feature_video(tmp_ss3.str(), act);
      feature_video(tmp_ss3.str(), act);//Aqui voy
      //features(count,1) = act;
      //cout << "This is the action # : " << features(count,1) << endl;
      
    }
  }
}


inline 
void
opt_feat::feature_video(std::string one_video, uword act)
{
  
  cv::VideoCapture capVideo(one_video);
  cout << one_video << endl;
  //double fps = capVideo.get(CV_CAP_PROP_FPS); //get the frames per seconds of the video
  //cout << "Frame per seconds : " << fps << endl;
  
  //cv::namedWindow("MyVideo",CV_WINDOW_AUTOSIZE); //create a window called "MyVideo"
  
  //double frmcount = capVideo.get(CV_CAP_PROP_FRAME_COUNT);
  //cout << "# of frames is: " << frmcount << endl;
  
  if( !capVideo.isOpened() )
  {
    cout << "Video couldn't be opened" << endl;
    return;
  }
  
  cv::Mat prevgray, gray, flow, cflow, frame, prevflow;
  cv::Mat ixMat, iyMat, ixxMat, iyyMat;
  //cv::namedWindow("My Video", 1);
  running_stat_vec<vec> stats_video(true);
  //cout << "Frame: ";
  int t = 0;
  int N_vectors = 0;
  int n_segm = 0;
  for(;;){
    
    
    //capVideo >> frame;
    
    bool bSuccess = capVideo.read(frame); // read a new frame from video
    
    if (!bSuccess) //if not success, break loop
      	{
	  //cout << "Cannot read the frame from video file" << endl;
	  break;
	}
	t++;
    cv::cvtColor(frame, gray, CV_BGR2GRAY);
    
    if( prevgray.data )
    {
      //cout << "Cuando entra aca?? en t= " << t << endl;
      cv::calcOpticalFlowFarneback(prevgray, 
				   gray, 
				   flow, 
				   0.5, //pyr_scale
				   3,   //levels
				   9,   //winsize
				   1,   //iterations
				   5,   //poly_n
				   1.1, //poly_sigma
				   0);  //flags
      //cv::calcOpticalFlowFarneback(bl_currentImg, bl_nextImg, flow, 0.5,  3, 5, 3, 5, 1.2, 0); 
      //cv::cvtColor(prevgray, cflow, CV_GRAY2BGR);
      //drawOptFlowMap(flow, cflow, 8, 1.5, CV_RGB(0, 255, 0));
      //cv::imshow("flow", cflow);
      
      
      cv::Sobel(gray, ixMat, CV_32F, 1, 0, 1);
      cv::Sobel(gray, iyMat, CV_32F, 0, 1, 1);
      cv::Sobel(gray, ixxMat, CV_32F, 2, 0, 1);
      cv::Sobel(gray, iyyMat, CV_32F, 0, 2, 1);
      
      float  ux = 0, uy = 0, vx = 0,  vy = 0;
      float u, v;
      //cout << "Llega a ciclo de Pixels???" << endl;
      //cout << "col: " << col << "- row " << row << endl;
      
      //printing frame number
      //cout << " " << t;
      if( prevflow.data )
      {
	
	if (n_segm == 19)
	{
	  //cout << "n_segm= " << n_segm;
	  //cout << ". N_vectors = " << N_vectors;
	  mat cov = stats_video.cov();
	  if (N_vectors > col*row/20)
	  {
	    //cout << "Aqui 0 ";
	    //Following Mehrtash suggestions as per email dated June26th 2014
	    //cov.print("cov");
	    
	    cov = 0.5*(cov + cov.t());
	    //cout << "Aqui 0.5 ";
	    vec D;
	    mat V;
	    eig_sym(D, V, cov);
	    uvec q1 = find(D < THRESH);
	    //cout << "Aqui 1 ";
	    if (q1.n_elem>0)
	    {
	      for (uword pos = 0; pos < q1.n_elem; ++pos)
	      {
		D( q1(pos) ) = THRESH;
	      }
	      //cout << "***cov_hat***" << endl;
	      cov = V*diagmat(D)*V.t();  //
	    }  
	    //cout << "Aqui 2 ";
	    
	    covs.push_back(cov);
	    lab_covs.push_back(act);
	    num_covs++;
	    
	  }
	  else{
	    //cout << ". Covariance discarded.";
	  }
	  
	  //cout << "Aqui 3 ";
	  stats_video.reset();
	  N_vectors = 0;
	  n_segm = 0;
	  
	  //cout << " num_covs= " << num_covs << ". Label is: " << act <<endl;
	  
	}
	
	
	for (uword x = 0 ; x < col ; ++x ){
	  for (uword y = 0 ; y < row ; ++y ) {
	    
	    vec features_one_pixel(15);
	    u = flow.at<cv::Vec2f>(y, x)[0];
	    v = flow.at<cv::Vec2f>(y, x)[1];
	    
	    //cout << "x= " << x << " - y= " << y << endl;
	    // x grad
	    //cout << " x y grad" << endl;
	    float ix = ixMat.at<float>(y, x);
	    //cout << " y grad" << endl;
	    float iy = iyMat.at<float>(y, x);
	    
	    // grad direction &  grad magnitude
	    //cout << "grad direction &  grad magnitude" << endl;
	    float gd = std::atan2(std::abs(iy), std::abs(ix));
	    float gm = std::sqrt(ix * ix + iy * iy);
	    
	    // x second grad
	    //cout << "x y  second grad " << endl;
	    float ixx = ixxMat.at<float>(y, x);
	    // y second grad
	    float iyy = iyyMat.at<float>(y, x);
	    
	    //du/dt
	    float ut = u - prevflow.at<cv::Vec2f>(y, x)[0];
	    // dv/dt
	    float vt = v - prevflow.at<cv::Vec2f>(y, x)[1];
	    
	    //// divergence &  vorticity
	    //cout << "divergence &  vorticity" << endl;
	    if (x>0 && y>0 )
	    {
	      ux = u - flow.at<cv::Vec2f>(y, x - 1)[0];
	      uy = u - flow.at<cv::Vec2f>(y - 1, x)[0];
	      vx = v - flow.at<cv::Vec2f>(y, x - 1)[1];
	      vy = v - flow.at<cv::Vec2f>(y - 1, x)[1];
	    }
	    //int x_submat = x + rec.x;
	    //int y_submat = y + rec.y;
	    //cout << x_submat << "&" << y_submat << endl;
	    

	    
	    features_one_pixel  << x << y << t << abs(ix) << abs(iy) << abs(ixx) 
	    << abs(iyy) << gm << gd <<  u << v << abs(ut) 
	    << abs(ut) << (ux - vy)  << (vx - uy);
	    //features_one_pixel.t().print("Features Current Pixel: ");
	    //getchar();
	    
	    
	    if (!is_finite( features_one_pixel ) )
	    {
	      cout << "It's not FINITE... continue???" << endl;
	      getchar(); 
	    }
	    
	    // Plotting Moving pixels
	    //cout << " " << gm;
	    if (gm>40) // Empirically set to 40
			    {
			      //frame.at<cv::Vec3b>(y,x)[0] = 0;
			      //frame.at<cv::Vec3b>(y,x)[1] = 0;
			      //frame.at<cv::Vec3b>(y,x)[2] = 255;
			      stats_video(features_one_pixel);
			      N_vectors++;
			      
			    }
			    //cout << stats_video.cov() << endl;
	    //cout << stats_video.mean() << endl;
	  }
	}
	n_segm++;
      }
    }
    if(cv::waitKey(30)>=0)
      break;
    //cout << " t: " <<t;
      std::swap(prevgray, gray);
      std::swap(prevflow, flow);//aca esta el problema.... cuando hay flow????
      
      
      //cv::imshow("color", frame);
      //cv::waitKey();
  } 
  
  
  //************ Without Overlapping*******************************
  //   mat cov = stats_video.cov();
  //   
  //   //Following Mehrtash suggestions as per email dated June26th 2014
  //   cov = 0.5*(cov + cov.t());
  //   vec D;
  //   mat V;
  //   eig_sym(D, V, cov);
  //   uvec q1 = find(D < THRESH);
  //  
  //   if (q1.n_elem>0)
  //   {
    //     for (uword pos = 0; pos < q1.n_elem; ++pos)
    //     {
      //       D( q1(pos) ) = THRESH;
      //     }
      //     //cout << "***cov_hat***" << endl;
      //     cov = V*diagmat(D)*V.t();  //
      //   }  
      //return cov;
      //*******************************************
      
      //cout << "covariance matrix size [ " << cov.n_rows << " x " << cov.n_cols << " ] " << endl;
      //cout << "Number of vectors per video " << N_vectors << endl;
      //cout << stats_video.cov() << endl;
      //getchar();
      
}

inline 
void
opt_feat::saving(bool isTraining)
{
  
  std::string type;
  
  
  if (isTraining)
  {
    type = "train";    
  }
  else
  {
    type = "test";    
  }
  
  
  
  features.set_size(num_covs,2); // Column1: Cov Matrices, Column2: labels
  cout << "num_covs: " << num_covs << endl;
  
  for (uword i=0; i<num_covs; ++i)
  {
    
    features(i,0) = covs.at(i);
    features(i,1) = lab_covs.at(i);
  }
  
  std::stringstream tmp_ss4;
  tmp_ss4 << feat_path << type << "/"<< "all_actions";
  cout << "Saving in" << tmp_ss4.str()<< endl;
  //getchar();
  features.save(tmp_ss4.str());
  
}


inline
void
opt_feat::nn_riemannian() // Only one covariance descriptor per video
{
  
  log_exp log_exp1;
  field<mat> features_testing;
  field<mat> features_training;
  
  std::stringstream tmp_test;
  tmp_test << feat_path << "test" << "/"<< "all_actions";
  cout << "Loading in" << tmp_test.str()<< endl;
  features_testing.load(tmp_test.str());
  int n_test = features_testing.n_rows;
  
  std::stringstream tmp_train;
  tmp_train << feat_path << "train" << "/"<< "all_actions";
  cout << "Loading in" << tmp_train.str()<< endl;
  features_training.load(tmp_train.str());
  int n_train = features_training.n_rows;
  
  mat cov_desc;
  
  field<mat> feat_log_training; // Precalculating the logarithm of all training covariance descriptors
  feat_log_training.set_size(n_train,2);
  
  int acc = 0; //accuracy
  for (uword tr = 0; tr < n_train; ++tr)
  { 
    //cout << "D and V #" << tr << endl;
    cov_desc = features_training(tr,0);
    feat_log_training(tr,0) = log_exp1.log_matrix(cov_desc);
    feat_log_training(tr,1) = features_training(tr,1); //Copying the labels.
    
    //cout << "Cov Descriptor_training "<< tr << endl;
    //cov_desc.print();
    //    
    //cout << "LOG_Cov Descriptor_training "<< tr << endl;
    //feat_log_training(tr,0).print();
    //getchar();
  }
  
  rowvec dist;
  dist.set_size(n_train);
  mat log_covMte;
  mat log_covMtr;
  log_exp log_exp2;
  
  
  for (uword te=0; te<n_test; ++te)
  {
    cov_desc   = features_testing(te,0);
    //cout << "Cov Descriptor_testing "<< te << endl;
    //cov_desc.print();
    //getchar();
    
    //cout << "LOG_Cov Descriptor_testing "<< te << endl;
    //cout << log_exp2.log_matrix(cov_desc) << endl;
    //getchar();
    
    log_covMte = log_exp1.log_matrix(cov_desc);  
    
    
    
    //cout << "Log_Testing "<< te << endl;
    //log_covMte.print();
    //getchar();
    for (uword tr=0; tr<n_train; ++tr)
    {
      log_covMtr = feat_log_training(tr,0);
      //cout << "Log_Training "<< tr << endl;
      //log_covMtr.print();
      //getchar();
      dist (tr) = norm( log_covMte - log_covMtr ,"fro");
      
    }
    
    //cout << "Distances for testing # " << te << endl;
    //dist.print();
    
    uword  index;
    double min_val = dist.min(index);
    cout << "index = " << index << endl;
    
    cout << "Testing sample label is " <<  features_testing(te,1) << ". It was classified as " << features_training(index,1) << endl;
    
    uword in_tr = conv_to<uword>::from(features_training(index,1));
    uword in_te = conv_to<uword>::from(features_testing(te,1));
    
    if ( in_te == in_tr )
    {  
      acc++;
    }
    
    //getchar();
    
    
    
  }
  cout << "Performance: " << acc*100/n_test << " %" << endl;
  
  
}
//     
//   
// }
// inline
// void 
// opt_feat::drawOptFlowMap(const cv::Mat& flow, cv::Mat& cflowmap, int step, double, const cv::Scalar& color)
// {
  //   for(int y = 0; y < cflowmap.rows; y += step)
  //     for(int x = 0; x < cflowmap.cols; x += step)
  //     {
    //       const cv::Point2f& fxy = flow.at<cv::Point2f>(y, x);
    //       cv::line(cflowmap, cv::Point(x,y), cv::Point(cvRound(x+fxy.x), cvRound(y+fxy.y)), color);
    //       cv::circle(cflowmap, cv::Point(x,y), 2, color, -1);
    //     }
    // }
    
    