
//Features as per Andres' WACV paper

inline
opt_feat::opt_feat(const std::string in_path, 
		   const std::string in_actionNames,  
		   const std::string in_feat_path,
		   const int in_col, 
		   const int in_row,
		   const int in_n_cent
)
:path(in_path), actionNames(in_actionNames), feat_path(in_feat_path), col(in_col), row(in_row), n_samples_tr(17), n_samples_te(8), N_cent(in_n_cent)
{
  THRESH = 0.000001;
  THRESH_2 = col*row*1/100;
   L = 25;
  
  actions.load( actionNames );  //actions.print("All actions");
}







//*****************************************************************************************
///***************************************TESTING***************************************** 
//*****************************************************************************************


inline
void
opt_feat::testing_svm(std::string path_multi, std::string model_name)
{
  
  //eatures_testing_overlapping( path_multi );
  svm_predict( model_name, path_multi );
  
  
}

///inline void features_testing

inline
void
opt_feat::features_testing_overlapping(std::string path_multi)
{

  
  std::stringstream tmp_ss2;
  tmp_ss2 << path_multi << "/multi_list.txt";
  
  std::stringstream tmp_ss3;
  tmp_ss3 << path_multi << "/list_labels.txt";
  
  videos.load(tmp_ss2.str()); 
  field<std::string> labels;
  labels.load(tmp_ss3.str()); 
  
  
  field<std::string> person;
  person.load("person_list.txt");
  
  
  
  ///OJO:: yo ya calcule para v=0
  for (uword vi = 3; vi <videos.n_rows; ++vi ){ 
    
    
    std::stringstream tmp_lbs;
    tmp_lbs << path_multi << labels(vi);
    arma_multi_labels.load(tmp_lbs.str()); //labels are in a frame basis.
       
    std::stringstream tmp_ss;
    tmp_ss << path_multi << videos(vi);
    
    cout << "****************************************************" << endl;
    cout << "****************************************************" << endl;
    cout << "Calculating for video " << videos(vi) << endl;
    dist_overlapping_frame( tmp_ss.str(), person(vi) ); //covs is calculated here and label_multivideo
    
  }
}



///inline svm_predict
///este debe cambiar
inline
void
opt_feat::svm_predict(std::string model_name, std::string path_multi)
{
  
  cout << "loading SVM model for multi-action: ";
  
  std::stringstream tmp;
  tmp << "./svm_model/"<< model_name;
  cout << tmp.str() << endl;
  
  CvSVM SVM;
  SVM.load( tmp.str().c_str() );
  
  field<std::string> person;
  person.load("person_list.txt");
  
 
  vec dist_stein;
  int dim = N_cent*actions.n_rows;
  float average = 0;
  
  std::stringstream tmp_ss3;
  tmp_ss3 << path_multi << "/list_labels.txt";
  field<std::string> labels;
  labels.load(tmp_ss3.str()); 
  rowvec est_label;
  rowvec real_label;
  
  
  for (uword pi = 0; pi <person.n_rows; ++pi ){ //person.n_rows
    
    
    
    double acc = 0; //accuracy
       
    std::stringstream tmp_lbs;
    tmp_lbs << path_multi << labels(pi);
    arma_multi_labels.load(tmp_lbs.str()); //labels are in a frame basis.
    //arma_multi_labels.print("Labels");
    
    int total_frames = arma_multi_labels.n_elem;
    real_label = arma_multi_labels;
    cout << "Total frames: " << total_frames  << endl;
    est_label.zeros(total_frames);
   
 
    
    
    
    ///OJO!!!!!!!!!!!!!!!!!!!!!!
    uword last = total_frames - (L + 2);
    cout << "last: " << last << endl;
    //getchar();
     
    for (uword fr=L+1; fr<last; fr++ )
    {    
    
    
    std::stringstream test_fr;
    test_fr << feat_path << "test/per" << person(pi) << "_fr"<<fr << ".dat";//haciendo para 18
    //cout << "Loading from " << test_fr.str() << endl;
    field<fvec> dist_frame_fr;
    dist_frame_fr.load( test_fr.str() );
    
    //clasificar cada vector en dist_frame_fr. Usar majority vote rule 
    
    vec fr_labels;
    fr_labels.zeros(actions.n_rows);
    //cout << "                   " << dist_frame_fr.n_elem << endl;
   
    for (uword di = 0; di<dist_frame_fr.n_elem; ++di)
    {
      
      fvec vec_dist_frame= dist_frame_fr(di);
      float fl_dist[dim] ;
      ///pq  vec_dist_frame para person20 in d = 26 is size [1x0]????
      
      uword res;
      if (vec_dist_frame.n_elem == dim)
      {
      
      //cout << "d= " ;
      for (uword d = 0; d < dim; ++d)
      {
	//cout << " " << d ;
	fl_dist[d] = vec_dist_frame(d);
	
      }
      cv::Mat sampleMat(dim, 1, CV_32FC1,fl_dist );
      float response = SVM.predict(sampleMat);
      res = response; 
      fr_labels(res)++;
      }
      else
      {
	//cout << "Loading from " << test_fr.str() << endl;
	//cout << "di " << di << endl;
	vec_dist_frame.t().print("??????????");
	//getchar();
      }
      
      
      //cout << ": response " << response << " res " << res << endl;
     
      
    }
    
    
    //fr_labels.t().print("");
    uword  index;
    double max_val = fr_labels.max(index);
    
    est_label(fr) = index;
    
    
    /// The first L+1 frames are assigned to the same label as per frame L+2
    ///The last L+1 frames are assigend to the same last estimated label  
    if ( fr == L+1 )
    {
      for (uword x = 0; x <= L+1; ++x) 
      {
	est_label(x) = index;
	
      }
      
    }
    
       if ( fr == last-1 )
       {
	 for (uword x = last-1; x < total_frames; ++x) 
	 {
	   est_label(x) = index;
	}
      }
    }
    
  urowvec comparison = (real_label == est_label);
  //comparison.print("comparison");
  
  uvec good = find( comparison == 1 ) ;
  //good.t().print("good");
  double N_good = good.n_elem;
  //    cout << "Gamma " << setprecision(4) << fixed << params.gamma << endl;

  cout << "Performance for person " << person(pi) << " is: " << setprecision(2) << fixed << N_good/total_frames*100 << " %" << endl;
  
  getchar();
  
  }
  
 
  

}



//*****************************************************************************************
///*************************CALCULATING dist_overlapping_frame for Testing***********************
//*****************************************************************************************
inline 
void
opt_feat::dist_overlapping_frame(std::string one_video, std::string per)//covariance features per video
{
  
  cv::VideoCapture capVideo(one_video);
  //cout << one_video << endl;
  
  
  
  int num_frames = capVideo.get(CV_CAP_PROP_FRAME_COUNT);

  
  //double fps = capVideo.get(CV_CAP_PROP_FPS); //get the frames per seconds of the video
  //cout << "Frame per seconds : " << fps << endl;
  
  //cv::namedWindow("MyVideo",CV_WINDOW_AUTOSIZE); //create a window called "MyVideo"
  
  //double frmcount = capVideo.get(CV_CAP_PROP_FRAME_COUNT);
  //cout << "# of frames is: " << frmcount << endl;
  
  if( !capVideo.isOpened() )
  {
    cout << "Video couldn't be opened" << endl;
    return;
  }
  
  
  //cv::namedWindow("My Video", 1);
  running_stat_vec<vec> stats_video(true);
  //std::vector <vec> feature_vec_segm; // feature vector per segment
  //cout << "Frame: ";
  int t = 0;
  int N_vectors = 0;
  int n_segm = 0;
  
  
  int ini_frame = L+1;
  int fin_frame = (num_frames - L) - 2;
  int total_seg_fr = (L+2);  //mirar que si de este valor
  //cout << "Total de segmentos por frame " <<total_seg_fr << endl;
  
  
  //Frame de referencia
  for (int fr = ini_frame; fr<= fin_frame; fr++) // I need L+ 1 frames, as I need two more to calculate Optical Flow
  {
    cout << "***************Doing for frame " << fr << "*****************" << endl;
    int ini_seg = fr-(L+1);
    int fin_seg = fr;
    field<fvec> dist_frame_fr(total_seg_fr);
    
    int cont_dist = 0;
    
    //Rango donde voy a calcular todas las matrices de covarianza para la frame de referencia fr
    
    for (int seg = ini_seg; seg <= fin_seg; seg++ )
    { 
    
    
    cout << "Calculating starting at  " << seg <<  " ending at " << (seg + L + 1) << endl;

    
    //Calculando overlapped covariance matrices
    N_vectors = 0;
    n_segm = 0;
    cv::Mat prevgray, gray, flow, cflow, frame, prevflow;
    cv::Mat ixMat, iyMat, ixxMat, iyyMat;
    
    stats_video.reset();
    
    
    for( int j = seg; j< (seg + L +2); j++) 
    {
      capVideo.set( CV_CAP_PROP_POS_FRAMES, j );
      
     
      

      
      
      //cout << "j = " << j ;
      //capVideo >> frame;
      //cout << " " << capVideo.get(CV_CAP_PROP_POS_FRAMES) ;
      //cout << "count: " << stats_video.count() << endl;
      t = capVideo.get(CV_CAP_PROP_POS_FRAMES);
      
      //cout << "t= " << t << endl;
      
      bool bSuccess = capVideo.read(frame); // read a new frame from video
      
      if (!bSuccess) //if not success, break loop
      	{
	  cout << "Cannot read the frame from video file" << endl;
	  break;
	}
	
	cv::cvtColor(frame, gray, CV_BGR2GRAY);
      
      if( prevgray.data )
      {
	//cout << "Cuando entra aca?? en t= " << t << endl;
	cv::calcOpticalFlowFarneback(prevgray, 
				     gray, 
			      flow, 
			      0.5, //pyr_scale
			      3,   //levels
			      9,   //winsize
			      1,   //iterations
			      5,   //poly_n
			      1.1, //poly_sigma
			      0);  //flags
	//cv::calcOpticalFlowFarneback(bl_currentImg, bl_nextImg, flow, 0.5,  3, 5, 3, 5, 1.2, 0); 
	//cv::cvtColor(prevgray, cflow, CV_GRAY2BGR);
	//drawOptFlowMap(flow, cflow, 8, 1.5, CV_RGB(0, 255, 0));
	//cv::imshow("flow", cflow);
	
	
	cv::Sobel(gray, ixMat, CV_32F, 1, 0, 1);
	cv::Sobel(gray, iyMat, CV_32F, 0, 1, 1);
	cv::Sobel(gray, ixxMat, CV_32F, 2, 0, 1);
	cv::Sobel(gray, iyyMat, CV_32F, 0, 2, 1);
	
	float  ux = 0, uy = 0, vx = 0,  vy = 0;
	float u, v;
	//cout << "Llega a ciclo de Pixels???" << endl;
	//cout << "col: " << col << "- row " << row << endl;
	
	//printing frame number
	//cout << " " << t;
	if( prevflow.data )
	{
	  
	  if (n_segm == L - 1)
	  {
	    //cout << "n_segm "  << n_segm << endl;
	    mat cov = stats_video.cov();
	    
	    if (N_vectors > col*row/20)
	    {
	      cov = 0.5*(cov + cov.t());
	      vec D;
	      mat V;
	      eig_sym(D, V, cov);
	      uvec q1 = find(D < THRESH);
	      //cout << "Aqui 1 ";
	      if (q1.n_elem>0)
	      {
		for (uword pos = 0; pos < q1.n_elem; ++pos)
		{
		  D( q1(pos) ) = THRESH;
		}
		cov = V*diagmat(D)*V.t();  //
	      }  
	      
	     
	      //en vez de almacenar las covs, almacenar los vectores dist
	      fvec dist_fr = calc_dist_one_cov (cov);
	      dist_frame_fr(cont_dist) = dist_fr;
	      cont_dist++;
	      //cout << cont_dist << endl;
	      
	    }
	    else{
	      cout << ". Covariance discarded.";
	    }
	    
	    
	    
	  }
	  
	  
	  for (uword x = 0 ; x < col ; ++x ){
	    for (uword y = 0 ; y < row ; ++y ) {
	      
	      vec features_one_pixel(15);
	      u = flow.at<cv::Vec2f>(y, x)[0];
	      v = flow.at<cv::Vec2f>(y, x)[1];
	      
	      
	      float ix = ixMat.at<float>(y, x);
	      float iy = iyMat.at<float>(y, x);
	      
	      // grad direction &  grad magnitude
	      float gd = std::atan2(std::abs(iy), std::abs(ix));
	      float gm = std::sqrt(ix * ix + iy * iy);
	      
	      // x second grad
	      float ixx = ixxMat.at<float>(y, x);
	      // y second grad
	      float iyy = iyyMat.at<float>(y, x);
	      //du/dt
	      float ut = u - prevflow.at<cv::Vec2f>(y, x)[0];
	      // dv/dt
	      float vt = v - prevflow.at<cv::Vec2f>(y, x)[1];
	      //// divergence &  vorticity
	      //cout << "divergence &  vorticity" << endl;
	      if (x>0 && y>0 )
	      {
		ux = u - flow.at<cv::Vec2f>(y, x - 1)[0];
		uy = u - flow.at<cv::Vec2f>(y - 1, x)[0];
		vx = v - flow.at<cv::Vec2f>(y, x - 1)[1];
		vy = v - flow.at<cv::Vec2f>(y - 1, x)[1];
	      }
	      //int x_submat = x + rec.x;
	      //int y_submat = y + rec.y;
	      //cout << x_submat << "&" << y_submat << endl;
	      
	      ///OJO, esta j en lugar de t. Lo volvi a poner como t para "partL"
	      features_one_pixel  << x << y << t << abs(ix) << abs(iy) << abs(ixx) 
	      << abs(iyy) << gm << gd <<  u << v << abs(ut) 
	      << abs(ut) << (ux - vy)  << (vx - uy);
	      //features_one_pixel.t().print("Features Current Pixel: ");
	      //getchar();
	      
	      if (!is_finite( features_one_pixel ) ){
		cout << "It's not FINITE... continue???" << endl;
		getchar(); 
	      }
	      
	      // Plotting Moving pixels
	      //cout << " " << gm;
	      // Empirically set to 40
	      if (gm>40) {
		frame.at<cv::Vec3b>(y,x)[0] = 0;
		frame.at<cv::Vec3b>(y,x)[1] = 0;
		frame.at<cv::Vec3b>(y,x)[2] = 255;
		//feature_vec_segm.push_back(features_one_pixel);
		stats_video(features_one_pixel);
		N_vectors++;}
	    }
	  }
	  n_segm++;
	}
      }
      
      if(cv::waitKey(30)>=0)
	break;
      //cout << " t: " <<t;
	std::swap(prevgray, gray);
	std::swap(prevflow, flow);//aca esta el problema.... cuando hay flow????
	
	
	//cv::imshow("color", frame);
	//cv::waitKey(5);
	//getchar();
      
    }
      
    }
    std:stringstream name_fr;
    name_fr << feat_path << "test/per" << per << "_fr" << fr <<".dat";
    cout << "saving: "<< name_fr.str() << endl;
    dist_frame_fr.save(name_fr.str());
     cout << "******************************************" <<endl;
    //getchar();
  }
  

}  




inline
fvec
opt_feat::calc_dist_one_cov(const mat & cov)
{
  mat cov_i_te = cov;
  fvec dist_stein;
  dist_stein.zeros(N_cent*actions.n_rows);
  uword posi = 0;
  
  
  for (uword act_tr = 0 ; act_tr < actions.n_rows; ++act_tr) {
    
    std::stringstream tmp_ss4;
    tmp_ss4 << feat_path << "ref_matrices" << "/fullCov_" << actions(act_tr); 
    //cout << "Loading " << tmp_ss4.str()<< endl;
    field <mat> full_covs_vi;
    full_covs_vi.load(tmp_ss4.str()); // 16 covariance matrices per action group
    
    for (uword co_tr = 0 ; co_tr< full_covs_vi.n_rows; ++co_tr){
      
      //convertir con lo que Methash me explico y luego calcular distancia
      mat cov_i_tr = full_covs_vi(co_tr) ;
      
      cov_i_tr = 0.5*(cov_i_tr + cov_i_tr.t());
      vec D;    mat V;    eig_sym(D, V, cov_i_tr);   uvec q1 = find(D < THRESH);
      if (q1.n_elem>0)
      {
	for (uword pos = 0; pos < q1.n_elem; ++pos)
	{
	  D( q1(pos) ) = THRESH;
	  
	}
	cov_i_tr = V*diagmat(D)*V.t();  //
      }
      
      double det_op1 = det( diagmat( (cov_i_tr + cov_i_te)/2 ) );
      double det_op2 = det( diagmat( ( cov_i_tr%cov_i_te ) ) );
      double dist = sqrt ( log( det_op1 ) -0.5*log( det_op2 ) );
      dist_stein(posi) = dist;
      posi++;
      
    }
  }
  
  //dist_stein.t().print("dist");
  return dist_stein;	
  
}