#include "log_exp_def.hpp"
#include "log_exp_impl.hpp"




//Features as per Andres' WACV paper

inline
opt_feat::opt_feat(const std::string in_path, 
		   const std::string in_actionNames,  
		   const std::string in_feat_path,
		   const int in_col, 
		   const int in_row)
:path(in_path), actionNames(in_actionNames), feat_path(in_feat_path), col(in_col), row(in_row), n_samples_tr(17), n_samples_te(8)
{
  THRESH = 0.000001;
  THRESH_2 = col*row*1/100;
  N_cent = 16;
}



inline
void
opt_feat::training()
{
  // Este solo sirve para entreamiento, no para validacion. Fue tomado de G2. No lo necesito aca. 
  
  
}



inline
void
opt_feat::testing(){
  
    cout << "# clusters: " << N_cent << endl;

  
  //log_exp log_exp1;
 
  int num_samples;
  num_samples = n_samples_te; 
  
  
  
  
  std::stringstream tmp_ss;
  tmp_ss << path << actionNames;
  actions.load(tmp_ss.str());
  int n_test = actions.n_rows*num_samples;

  int n_train = n_samples_tr*actions.n_rows;
  
  
  rowvec dist_stein;
  rowvec lab_train;
  dist_stein.set_size(n_train*N_cent); // each video has N_Cent cov matrices
  lab_train.set_size(n_train*N_cent); 
  mat log_covMte;
  mat log_covMtr;
  
  
 uword act = 0 ; ///cambiar
 
    
///cargar lista con nombres de videos con multi_acciones
    std::stringstream tmp_ss2;
    tmp_ss2 << path << "/multi_list.txt";
    
     std::stringstream tmp_ss3;
     tmp_ss3 << path << "/list_labels.txt";
     
     std::stringstream tmp_ss4;
     tmp_ss4 << path << "/person_list.txt";
    //cout << tmp_ss2.str()<< endl;
    //getchar();
    videos.load(tmp_ss2.str()); 
    field<std::string> labels;
    labels.load(tmp_ss3.str()); 
    
    field<std::string> person;
    person.load(tmp_ss4.str());
    
    for (uword vi = 0; vi <videos.n_rows; ++vi ){ 
       double acc = 0; //accuracy
       double acc2 = 0; //accuracy
       covs.clear();
       lab_covs.clear();
      
      //cout << "Size: " << covs.size() << endl;
      std::stringstream tmp_ss3;
      tmp_ss3 << path << videos(vi);
      cout << tmp_ss3.str()<< endl;

      std::stringstream tmp_lbs;
      tmp_lbs << path << labels(vi);
      cout << tmp_lbs.str()<< endl;

      cout << "Calculating Overlapping Covariance Descriptors" << endl;
      ///Saving covs and lab_covs for current video
      //If already saved. Just load and comment feature_video(tmp_ss3.str(),tmp_lbs.str() );
      
      
      
      ///When I need to calculate covariance descriptors for each video
//	Ini
//       feature_video(tmp_ss3.str(),tmp_lbs.str() ); //covs and lab_covs are calculated inside
//          
//       field<mat> arma_covs( covs.size() );
//       field<vec> arma_lab_covs( covs.size() );
//       
//       for (uword ci = 0; ci < covs.size(); ++ci) 
//       {
// 	arma_covs(ci) = covs.at(ci);
// 	arma_lab_covs(ci) = lab_covs.at(ci);
// 	
//       }
//       
//       
//       std::stringstream save_multicovs;
//       save_multicovs<< "./multi/per_" << person(vi) << "_covs";
//       cout << save_multicovs.str()<< endl;
//       arma_covs.save(save_multicovs.str());
//      
//       
//       std::stringstream save_multilabels;
//       save_multilabels<< "./multi/per_" << person(vi) << "_lab_covs";
//       cout << save_multilabels.str()<< endl;
//       arma_lab_covs.save(save_multilabels.str());
//       cout << "Done" << endl;
//	end
      
    
      
//       ///When covs and lab_covs already calulated:
      //ini
       field<mat> arma_covs;
       field<vec> arma_lab_covs;
       
       covs.clear();
       lab_covs.clear();//labels
       
       std::stringstream save_multicovs;
       save_multicovs<< "./multi/per_" << person(vi) << "_covs";
       cout << save_multicovs.str()<< endl;
       arma_covs.load(save_multicovs.str());
      cout << "Loading " << save_multicovs.str() << endl;
       
       std::stringstream save_multilabels;
       save_multilabels<< "./multi/per_" << person(vi) << "_lab_covs";
       cout << save_multilabels.str()<< endl;
       arma_lab_covs.load(save_multilabels.str());
       
       for (uword ci = 0; ci < arma_covs.n_rows; ++ci) 
       {
 	covs.push_back( arma_covs( ci ) );
 	uword ll = conv_to<uword>:: from( arma_lab_covs(ci) );
 	lab_covs.push_back( ll );
 	
       }
       cout << "Done" << endl;
       cout <<covs.size() << endl;
//        // end
      
      
      
      for (uword i = 0; i < covs.size(); ++i) //covs.size() lo cambie por 0 para solo guardar los covs por cada video
      {
	
	int con = 0;
	mat cov_i_te = covs.at(i);

	dist_stein.zeros();//se mantiene
	lab_train.zeros();//se mantiene
	
	rowvec dist_action_i;
	dist_action_i.zeros(actions.n_rows);
		
	for (uword act_tr = 0 ; act_tr < actions.n_rows; ++act_tr) {
	  double sum_dist = 0;
	  
	  for (uword tr = 0; tr <n_samples_tr; ++tr ){ 
	    
	    std::stringstream tmp_full;
	    tmp_full << feat_path << "train" << "/fullCov_" << actions(act_tr) << "_" << tr+1; // starting at 1
	    //cout << "Comparing with: " << tmp_full.str() << endl;
	    field<mat> full_covs_tr;
	    full_covs_tr.load(tmp_full.str());
	    
	    for (uword k = 0; k < N_cent; ++k){
	      
	      //cout << "act_tr: " << act_tr << ". tr: " << tr << ". k: " << k << endl;
	      mat cov_i_tr = full_covs_tr(k);
	      
	      //*****************************
	      cov_i_tr = 0.5*(cov_i_tr + cov_i_tr.t());
	      vec D;
	      mat V;
	      eig_sym(D, V, cov_i_tr);
	      uvec q1 = find(D < THRESH);
	      if (q1.n_elem>0)
	      {
		for (uword pos = 0; pos < q1.n_elem; ++pos)
		{
		  D( q1(pos) ) = THRESH;
		}
		//cout << "***cov_hat***" << endl;
		cov_i_tr = V*diagmat(D)*V.t();  //
	      }  
	      //*****************************
	      double det_op1 = det( diagmat( (cov_i_tr + cov_i_te)/2 ) );
	      double det_op2 = det( diagmat( ( cov_i_tr%cov_i_te ) ) );
	      dist_stein (con) = sqrt ( log( det_op1 ) -0.5*log( det_op2 ) );
	      sum_dist = sum_dist + dist_stein (con);
	    	      
	      lab_train( con ) = act_tr;
	      con++;
	      
	    }
	    
	  }
	  sum_dist = sum_dist/(N_cent*n_samples_tr);
	  dist_action_i ( act_tr ) = sum_dist;
	  //dist_action_i.print("dist_action");
	
	}
	
	uword  index;
	double min_val = dist_stein.min(index);
	uword real_label = lab_covs.at(i);
	uword est_label = lab_train(index);
	
	uword  index_act;
	double min_val_act = dist_action_i.min(index_act);

	uword est_label2 = index_act;
	
	
	
	
	//dist_stein.print("dist_stein");
	//cout << "min= " << min_val << ". index= " << index << endl;
	//getchar();
	
	//cout << "Que pasa" << endl;
	//lab_train.print("labels training:");
	//getchar();
	
	//lab_train.print("lab_train");
	
	

	
	//cout << "real_label : " << real_label   << endl;
	//cout << "est_label : " << est_label   << endl;
	 //cout << "This segment starting at " << i << " is '" << actions(real_label) << "' and was classified as class: '" << actions(est_label) << "' " << endl;
	 //cout << "             Using average distance, it was classified as class: '" << actions(est_label2) << "' " << endl << endl;

	 
	 if (est_label==real_label)
	 {
	   acc++;
	 }
	 
	 if (est_label2==real_label)
	 {
	   acc2++;
	 }
	 
	
	
	
	
      }
    
    cout << "Doing for: " << videos(vi) << endl;
    cout << "Performance in a frame basis " << acc/covs.size()<< endl;
    cout << "Performance in a frame basis. Average Distance " << acc2/covs.size()<< endl;
    cout << "Press a key " << endl;
    //getchar();
    }
}




inline 
void
opt_feat::feature_video(std::string one_video, std::string labels_onevideo) 
{
  
  cv::VideoCapture capVideo(one_video);
  //cout << one_video << endl;
  
  rowvec arma_multi_labels;
  arma_multi_labels.load(labels_onevideo); //labels are in a frame basis.
  
  int num_frames = capVideo.get(CV_CAP_PROP_FRAME_COUNT);
  int L = 25; // L: segment length
  
  //double fps = capVideo.get(CV_CAP_PROP_FPS); //get the frames per seconds of the video
  //cout << "Frame per seconds : " << fps << endl;
  
  //cv::namedWindow("MyVideo",CV_WINDOW_AUTOSIZE); //create a window called "MyVideo"
  
  //double frmcount = capVideo.get(CV_CAP_PROP_FRAME_COUNT);
  //cout << "# of frames is: " << frmcount << endl;
  
  if( !capVideo.isOpened() )
  {
    cout << "Video couldn't be opened" << endl;
    return;
  }
  
  
  //cv::namedWindow("My Video", 1);
  running_stat_vec<vec> stats_video(true);
  //cout << "Frame: ";
  int t = 0;
  int N_vectors = 0;
  int n_segm = 0;
  
  
  cout << endl;
  for (int i = 0; i< num_frames - L-1 ; i++)
  {
    
    capVideo.set(CV_CAP_PROP_POS_FRAMES,i);
    cv::Mat prevgray, gray, flow, cflow, frame, prevflow;
    cv::Mat ixMat, iyMat, ixxMat, iyyMat;
    
    stats_video.reset();
    N_vectors = 0;
    n_segm = 0;
    
    //cout << "L= " << L << endl;
    
    //counts Por cada frame voy a acumular a que clase hace falta. Ese segmento tendra label de la clase mas popular
    vec counts(actions.n_rows); 
    counts.zeros();
    for( int j = 0; j<L+2; j++) //< L
    {
      
      //capVideo >> frame;
      //cout << " " << capVideo.get(CV_CAP_PROP_POS_FRAMES) ;
      //cout << "count: " << stats_video.count() << endl;
      t = capVideo.get(CV_CAP_PROP_POS_FRAMES);
      //cout << "t= " << t << endl;
    
      bool bSuccess = capVideo.read(frame); // read a new frame from video
      
      if (!bSuccess) //if not success, break loop
      	{
	  cout << "Cannot read the frame from video file" << endl;
	  break;
	}
	
      cv::cvtColor(frame, gray, CV_BGR2GRAY);
      
      if( prevgray.data )
      {
	//cout << "Cuando entra aca?? en t= " << t << endl;
	cv::calcOpticalFlowFarneback(prevgray, 
				     gray, 
			      flow, 
			      0.5, //pyr_scale
			      3,   //levels
			      9,   //winsize
			      1,   //iterations
			      5,   //poly_n
			      1.1, //poly_sigma
			      0);  //flags
	//cv::calcOpticalFlowFarneback(bl_currentImg, bl_nextImg, flow, 0.5,  3, 5, 3, 5, 1.2, 0); 
	//cv::cvtColor(prevgray, cflow, CV_GRAY2BGR);
	//drawOptFlowMap(flow, cflow, 8, 1.5, CV_RGB(0, 255, 0));
	//cv::imshow("flow", cflow);
	
	
	cv::Sobel(gray, ixMat, CV_32F, 1, 0, 1);
	cv::Sobel(gray, iyMat, CV_32F, 0, 1, 1);
	cv::Sobel(gray, ixxMat, CV_32F, 2, 0, 1);
	cv::Sobel(gray, iyyMat, CV_32F, 0, 2, 1);
	
	float  ux = 0, uy = 0, vx = 0,  vy = 0;
	float u, v;
	//cout << "Llega a ciclo de Pixels???" << endl;
	//cout << "col: " << col << "- row " << row << endl;
	
	//printing frame number
	//cout << " " << t;
	if( prevflow.data )
	{
	  
	  int lb = arma_multi_labels(t);
	  counts(lb)++;
	  
	  
	  if (n_segm == L - 1)
	  {
	    //cout << "n_segm= " << n_segm;
	    //cout << ". N_vectors = " << N_vectors;
	    //cout << "count: " << stats_video.count() << endl;
	    mat cov = stats_video.cov();
	    if (N_vectors > col*row/20)
	    {
	      //cout << "Aqui 0 ";
	      //Following Mehrtash suggestions as per email dated June26th 2014
	      //cov.print("cov");
	      
	      cov = 0.5*(cov + cov.t());
	      //cout << "Aqui 0.5 ";
	      vec D;
	      mat V;
	      eig_sym(D, V, cov);
	      uvec q1 = find(D < THRESH);
	      //cout << "Aqui 1 ";
	      if (q1.n_elem>0)
	      {
		for (uword pos = 0; pos < q1.n_elem; ++pos)
		{
		  D( q1(pos) ) = THRESH;
		}
		//cout << "***cov_hat***" << endl;
		
		cov= V*diagmat(D)*V.t();  //
	      }  
	      //cout << "Aqui 2 ";
	      uword index_video;
	      double max_val = counts.max(index_video);
	      //counts.t().print("this segment labels");
	      //getchar();
	     // cout << "" << endl;
    
      
	      covs.push_back(cov);
	      //cout << "this segment is " << actions(index_video)  << endl;
	      lab_covs.push_back(index_video);
	            
	    }
	    else{
	      //cout << ". Covariance discarded.";
	    }
	    

	    
	  }
	  
	  
	  for (uword x = 0 ; x < col ; ++x ){
	    for (uword y = 0 ; y < row ; ++y ) {
	      
	      vec features_one_pixel(15);
	      u = flow.at<cv::Vec2f>(y, x)[0];
	      v = flow.at<cv::Vec2f>(y, x)[1];
	      
	      
	      float ix = ixMat.at<float>(y, x);
	      float iy = iyMat.at<float>(y, x);
	      
	      // grad direction &  grad magnitude
	      float gd = std::atan2(std::abs(iy), std::abs(ix));
	      float gm = std::sqrt(ix * ix + iy * iy);
	      
	      // x second grad
	      float ixx = ixxMat.at<float>(y, x);
	      // y second grad
	      float iyy = iyyMat.at<float>(y, x);
	      //du/dt
	      float ut = u - prevflow.at<cv::Vec2f>(y, x)[0];
	      // dv/dt
	      float vt = v - prevflow.at<cv::Vec2f>(y, x)[1];
	      //// divergence &  vorticity
	      //cout << "divergence &  vorticity" << endl;
	      if (x>0 && y>0 )
	      {
		ux = u - flow.at<cv::Vec2f>(y, x - 1)[0];
		uy = u - flow.at<cv::Vec2f>(y - 1, x)[0];
		vx = v - flow.at<cv::Vec2f>(y, x - 1)[1];
		vy = v - flow.at<cv::Vec2f>(y - 1, x)[1];
	      }
	      //int x_submat = x + rec.x;
	      //int y_submat = y + rec.y;
	      //cout << x_submat << "&" << y_submat << endl;
	      
	      
	      features_one_pixel  << x << y << t << abs(ix) << abs(iy) << abs(ixx) 
	      << abs(iyy) << gm << gd <<  u << v << abs(ut) 
	      << abs(ut) << (ux - vy)  << (vx - uy);
	      //features_one_pixel.t().print("Features Current Pixel: ");
	      //getchar();
	      
	      if (!is_finite( features_one_pixel ) ){
		cout << "It's not FINITE... continue???" << endl;
		getchar(); 
	      }
	      
	      // Plotting Moving pixels
	      //cout << " " << gm;
	      // Empirically set to 40
	      if (gm>40) {
		frame.at<cv::Vec3b>(y,x)[0] = 0;
		frame.at<cv::Vec3b>(y,x)[1] = 0;
		frame.at<cv::Vec3b>(y,x)[2] = 255;
		stats_video(features_one_pixel);
		N_vectors++;}
	    }
	  }
	  n_segm++;
	}
      }
      
      if(cv::waitKey(30)>=0)
	break;
      //cout << " t: " <<t;
	std::swap(prevgray, gray);
	std::swap(prevflow, flow);//aca esta el problema.... cuando hay flow????
	
	
	//cv::imshow("color", frame);
	//cv::waitKey(5);
	
    }
  }
}  




