#include "log_exp_def.hpp"
#include "log_exp_impl.hpp"

#include "kmeans_def.hpp"
#include "kmeans_impl.hpp"




//Features as per Andres' WACV paper

inline
opt_feat::opt_feat(const std::string in_path, 
		   const std::string in_actionNames,  
		   const std::string in_feat_path,
		   const int in_col, 
		   const int in_row)
:path(in_path), actionNames(in_actionNames), feat_path(in_feat_path), col(in_col), row(in_row), n_samples_tr(17), n_samples_te(8)
{
  THRESH = 0.000001;
  THRESH_2 = col*row*1/100;
  N_cent = 32;
}


inline
void
opt_feat::training()// Este solo sirve para entreamiento, no para validacion. 
{
  cout << "# clusters: " << N_cent << endl;
  
  
  std::stringstream tmp_ss;
  tmp_ss << path << actionNames;
  actions.load(tmp_ss.str());
  //actions.print("All actions");
  
  int n_train = n_samples_tr*actions.n_rows;
  std::vector < vec > video_i;
  
  for (uword act = 0 ; act < actions.n_rows; ++act) {
    
    std::stringstream tmp_ss2;
    tmp_ss2 << path << actions(act)<<"/train/train_list.txt";
    //cout << tmp_ss2.str()<< endl;
    //getchar();
    videos.load(tmp_ss2.str());
    //videos.print("All videos");
    
    
    for (uword vi = 0; vi <videos.n_rows; ++vi ){ 
      std::stringstream tmp_ss3;
      tmp_ss3 << path << actions(act)<<"/train/"<<  videos(vi);
      cout << tmp_ss3.str()<< endl;
      feature_video(tmp_ss3.str()); //feat_video_i is calculated inside this method
      
      //cout << "Calculating features vector per video ..." << ;
      mat mat_features(15,feat_video_i.size());
      
      for (uword i = 0; i < feat_video_i.size(); ++i)
      {
	mat_features.col(i) = feat_video_i.at(i);
	
      }
      cout << "Kmeans ... ";
      kmeans km(mat_features, N_cent);
      km.run(10);
      
      field <mat> full_covs_vi;
      full_covs_vi = km.get_covs();

      std::stringstream tmp_ss4;
      tmp_ss4 << feat_path << "train" << "/fullCov_" << actions(act) << "_"<< vi+1; // vi Starting at 1
      cout << "Saving in" << tmp_ss4.str()<< endl;
      full_covs_vi.save(tmp_ss4.str());
      
    }
  }
  
  
  
}




inline
void
opt_feat::testing(){
  
    cout << "# clusters: " << N_cent << endl;

  
  log_exp log_exp1;
  double acc = 0; //accuracy
  int num_samples;
  num_samples = n_samples_te; 
  int n_test = actions.n_rows*num_samples;
  
  
  
  std::stringstream tmp_ss;
  tmp_ss << path << actionNames;
  actions.load(tmp_ss.str());
  

  int n_train = n_samples_tr*actions.n_rows;
  
  
  rowvec dist;
  rowvec lab_train;
  dist.set_size(n_train*N_cent); // each video has N_Cent cov matrices
  lab_train.set_size(n_train*N_cent); 
  mat log_covMte;
  mat log_covMtr;
  
  
  for (uword act = 0 ; act < actions.n_rows; ++act) {
    
    cout << actions(act) << endl;
    std::stringstream tmp_ss2;
    tmp_ss2 << path << actions(act)<<"/test/test_list.txt";
    //cout << tmp_ss2.str()<< endl;
    //getchar();
    videos.load(tmp_ss2.str()); 
    
    for (uword vi = 0; vi <videos.n_rows; ++vi ){ 
      
      //cout << "Size: " << covs.size() << endl;
      std::stringstream tmp_ss3;
      tmp_ss3 << path << actions(act)<<"/test/"<<  videos(vi);
      
      //cout << tmp_ss3.str()<< endl;
      feature_video(tmp_ss3.str()); //feat_video_i is calculated inside this method
      
      //cout << "Calculating features vector per video " << endl;
      //cout << "Features...";
      mat mat_features(15,feat_video_i.size());
      
      for (uword i = 0; i < feat_video_i.size(); ++i)
      {
	mat_features.col(i) = feat_video_i.at(i);
	
      }
      //cout << "Kmeans ... ";
      kmeans km(mat_features, N_cent);
      km.run(10);//max_iter
      
      
      field<mat> full_covs_test = km.get_covs();
      vec count = zeros<vec>(actions.n_rows);
      
      for (uword i = 0; i < N_cent; ++i) // Each covariance matrix per video testing_vi is classified
      {
	
	int con = 0;
	mat cov_i_te = full_covs_test(i);
	
	//*****************************

	cov_i_te = 0.5*(cov_i_te + cov_i_te.t());
	vec D;
	mat V;
	eig_sym(D, V, cov_i_te);
	uvec q1 = find(D < THRESH);
	if (q1.n_elem>0)
	{
	  for (uword pos = 0; pos < q1.n_elem; ++pos)
	  {
	    D( q1(pos) ) = THRESH;
	  }
	  //cout << "***cov_hat***" << endl;
	  cov_i_te = V*diagmat(D)*V.t();  //
	}  
	log_covMte = log_exp1.log_matrix(cov_i_te);

	//*****************************

	log_covMte = log_exp1.log_matrix(cov_i_te);
	dist.zeros();
	lab_train.zeros();
		
	for (uword act_tr = 0 ; act_tr < actions.n_rows; ++act_tr) {
	  for (uword tr = 0; tr <n_samples_tr; ++tr ){ 
	    
	    std::stringstream tmp_full;
	    tmp_full << feat_path << "train" << "/fullCov_" << actions(act_tr) << "_" << tr+1; // starting at 1
	    //cout << "Comparing with: " << tmp_full.str() << endl;
	    field<mat> full_covs_tr;
	    full_covs_tr.load(tmp_full.str());
	    
	    for (uword k = 0; k < N_cent; ++k){
	      //cout << "i: " << i << ". j: " << j << ". k: " << k << endl;
	      mat cov_i_tr = full_covs_tr(k);
	      
	      //*****************************
	      cov_i_tr = 0.5*(cov_i_tr + cov_i_tr.t());
	      vec D;
	      mat V;
	      eig_sym(D, V, cov_i_tr);
	      uvec q1 = find(D < THRESH);
	      if (q1.n_elem>0)
	      {
		for (uword pos = 0; pos < q1.n_elem; ++pos)
		{
		  D( q1(pos) ) = THRESH;
		}
		//cout << "***cov_hat***" << endl;
		cov_i_tr = V*diagmat(D)*V.t();  //
	      }  
	      //*****************************

	      
	      
	      //cout << "aqui 1" << endl;
	      log_covMtr = log_exp1.log_matrix(cov_i_tr);
	      //cout << "aqui 2" << endl;
	      dist ( con ) = norm( log_covMte - log_covMtr ,"fro");
	      
	      lab_train( con ) = act_tr;
	      //cout << "This is action: " << lab_train( con ) << endl;
	      //cout << "aqui 5" << endl;
	      con++;
	      
	    }
	    
	  }
	  
	}
	//lab_train.print("labels training:");
	//getchar();
	
	//lab_train.print("lab_train");
	uword  index;
	double min_val = dist.min(index);
	//dist.print("dist: ");
	//cout << "index = " << index << endl;
	uword  est_class = lab_train(index);
	count(est_class)++;
	
	
      }
      
      uword  index_video;
      double max_val = count.max(index_video);
      //count.t().print("count:");
      cout << "This video is " << actions(act) << " and was classified as class: " << actions(index_video ) << endl;
      //getchar();
      if (index_video == act)
      {
	acc++;
      }
    }
  }
  cout << "Performance: " << acc*100/n_test << " %" << endl;
}


inline 
void
opt_feat::feature_video(std::string one_video)
{
  feat_video_i.clear();
  
  std::vector < vec > feat_frame;
  
  
  cv::VideoCapture capVideo(one_video);
  //cout << one_video << endl;
  //double fps = capVideo.get(CV_CAP_PROP_FPS); //get the frames per seconds of the video
  //cout << "Frame per seconds : " << fps << endl;
  
  //cv::namedWindow("MyVideo",CV_WINDOW_AUTOSIZE); //create a window called "MyVideo"
  
  //double frmcount = capVideo.get(CV_CAP_PROP_FRAME_COUNT);
  //cout << "# of frames is: " << frmcount << endl;
  
  if( !capVideo.isOpened() )
  {
    cout << "Video couldn't be opened" << endl;
    return;
  }
  
  cv::Mat prevgray, gray, flow, cflow, frame, prevflow;
  cv::Mat ixMat, iyMat, ixxMat, iyyMat;
  //cv::namedWindow("My Video", 1);
  //running_stat_vec<vec> stats_video(true);
  //cout << "Frame: ";
  int t = 0;
  
  
  for(;;){
    
    bool bSuccess = capVideo.read(frame); // read a new frame from video
    
    if (!bSuccess) //if not success, break loop
      	{
	  //cout << "Cannot read the frame from video file" << endl;
	  break;
	}
	t++;
    cv::cvtColor(frame, gray, CV_BGR2GRAY);
    
    if( prevgray.data )
    {
      //cout << "Cuando entra aca?? en t= " << t << endl;
      cv::calcOpticalFlowFarneback(prevgray, 
				   gray, 
				   flow, 
				   0.5, //pyr_scale
				   3,   //levels
				   9,   //winsize
				   1,   //iterations
				   5,   //poly_n
				   1.1, //poly_sigma
				   0);  //flags
      
      
      cv::Sobel(gray, ixMat, CV_32F, 1, 0, 1);
      cv::Sobel(gray, iyMat, CV_32F, 0, 1, 1);
      cv::Sobel(gray, ixxMat, CV_32F, 2, 0, 1);
      cv::Sobel(gray, iyyMat, CV_32F, 0, 2, 1);
      
      float  ux = 0, uy = 0, vx = 0,  vy = 0;
      float u, v;
      
      if( prevflow.data )
      {
	
	for (uword x = 0 ; x < col ; ++x ){
	  for (uword y = 0 ; y < row ; ++y ) {
	    
	    vec features_one_pixel(15);
	    u = flow.at<cv::Vec2f>(y, x)[0];
	    v = flow.at<cv::Vec2f>(y, x)[1];
	    
	    //cout << "x= " << x << " - y= " << y << endl;
	    // x grad
	    //cout << " x y grad" << endl;
	    float ix = ixMat.at<float>(y, x);
	    //cout << " y grad" << endl;
	    float iy = iyMat.at<float>(y, x);
	    
	    // grad direction &  grad magnitude
	    //cout << "grad direction &  grad magnitude" << endl;
	    float gd = std::atan2(std::abs(iy), std::abs(ix));
	    float gm = std::sqrt(ix * ix + iy * iy);
	    
	    // x second grad
	    //cout << "x y  second grad " << endl;
	    float ixx = ixxMat.at<float>(y, x);
	    // y second grad
	    float iyy = iyyMat.at<float>(y, x);
	    
	    //du/dt
	    float ut = u - prevflow.at<cv::Vec2f>(y, x)[0];
	    // dv/dt
	    float vt = v - prevflow.at<cv::Vec2f>(y, x)[1];
	    
	    //// divergence &  vorticity
	    //cout << "divergence &  vorticity" << endl;
	    if (x>0 && y>0 )
	    {
	      ux = u - flow.at<cv::Vec2f>(y, x - 1)[0];
	      uy = u - flow.at<cv::Vec2f>(y - 1, x)[0];
	      vx = v - flow.at<cv::Vec2f>(y, x - 1)[1];
	      vy = v - flow.at<cv::Vec2f>(y - 1, x)[1];
	    }
	    //int x_submat = x + rec.x;
	    //int y_submat = y + rec.y;
	    //cout << x_submat << "&" << y_submat << endl;
	    
	    
	    
	    features_one_pixel  << x << y << t << abs(ix) << abs(iy) << abs(ixx) 
	    << abs(iyy) << gm << gd <<  u << v << abs(ut) 
	    << abs(ut) << (ux - vy)  << (vx - uy);
	    //features_one_pixel.t().print("Features Current Pixel: ");
	    //getchar();
	    
	    
	    if (!is_finite( features_one_pixel ) )
	    {
	      cout << "It's not FINITE... continue???" << endl;
	      getchar(); 
	    }
	    
	    // Plotting Moving pixels
	    //cout << " " << gm;
	    if (gm>40) // Empirically set to 40
			    {
			      frame.at<cv::Vec3b>(y,x)[0] = 0;
			      frame.at<cv::Vec3b>(y,x)[1] = 0;
			      frame.at<cv::Vec3b>(y,x)[2] = 255;
			      feat_frame.push_back(features_one_pixel);
			      
			    }
			    //cout << stats_video.cov() << endl;
	    //cout << stats_video.mean() << endl;
	  }
	}
	
	
	
      }
    }
    if(cv::waitKey(30)>=0)
      break;
    //cout << " t: " <<t;
      std::swap(prevgray, gray);
      std::swap(prevflow, flow);//aca esta el problema.... cuando hay flow????
      {
	//cout << "Number of feature vectors in current frame:  " << feat_frame.size() << endl;
	
	
	//****To print in frame Num of feature vectors ****
	//std::stringstream num_vec;
	//num_vec << feat_frame.size();
	//string text = num_vec.str();
	//int fontFace = cv::FONT_HERSHEY_SCRIPT_SIMPLEX;
	
	//double fontScale = 1;
	//int thickness = 1;  
	//cv::Point textOrg(10, 100);
	//cv::putText(frame, text, textOrg, fontFace, fontScale, cv::Scalar::all(255), thickness,8);
	//****end ****
	
	
	//       if (feat_frame.size() < THRESH_2) 
	//       {
	  //       cv::Point textOrg(20, 20);
	  //       cv::putText(frame, "Discarded", textOrg, fontFace, fontScale, cv::Scalar::all(255), thickness,6);
	  //       }
      }
      if (feat_frame.size() > THRESH_2) 
      {
	//cout << "Used " << endl;
	feat_video_i.insert(feat_video_i.end(), feat_frame.begin(), feat_frame.end()  );
      }
      //cout << "otro tam: " <<feat_video_i.size() << endl;
      
      
      
      //cv::imshow("color", frame);
      //cv::waitKey();
      //
      
      
      feat_frame.clear();
  }
  //cout << "" <<feat_video_i.size() << endl;
  //cout << feat_video_i.at(feat_video_i.size() - 1) << endl;
  
}




