//Features as per Andres' WACV paper

inline
opt_feat::opt_feat(const std::string in_Spath, 
		   const std::string in_Mpath, 
		   const std::string in_actionNames,  
		   const int in_col, 
		   const int in_row,
		   const uvec in_peo_train,
		   const uvec in_peo_test 
)
:single_path(in_Spath), multi_path(in_Mpath), actionNames(in_actionNames), col(in_col), row(in_row), peo_train(in_peo_train), peo_test(in_peo_test)
{
  THRESH = 0.000001;
  THRESH_2 = col*row*1/100;
  DIM = 39;// 14 optical flow features + 25 texture features (neighbour )
  
  actions.load( actionNames );  //actions.print("All actions");
  
  ismultiAction = false; // per default is false
  //Only Run once
  //create_data_list();
}



inline
void
opt_feat::features_per_action_training()
{
  
  field<string> train_list;
  uvec train_labels;
  
  
  train_list.load("./run1/train_list_Run1");
  train_labels.load("./run1/train_label_Run1");
  
  int total_train = train_list.n_elem;
  
  ///OJO!!!!!!
  for (uword act = 2; act < actions.n_rows; ++act)
  {
    
    feat_all_videos_action_i.clear(); 
    uvec q1 = find(train_labels==act);
    cout << "Doing for action " << actions(act) << endl;
    for (uword u = 0; u< q1.n_elem; ++u)
    {
      
      std::stringstream tmp_ss;
      tmp_ss << single_path << train_list(q1(u));
      //cout << tmp_ss3.str()<< endl;
      
      cout << "Calculating features vector per video ..." << tmp_ss.str() << endl;
      feature_video(tmp_ss.str()); //feat_all_videos_action_i is calculated inside this method
      //getchar();
      
    }
    
    cout << "Converting to Arma:" << endl;
    cout << "# of Vectors: " << feat_all_videos_action_i.size() << endl;
    
   //int len   //using only each 2 frames
    mat mat_features(DIM,feat_all_videos_action_i.size());
    //cout << "Llega aqui? " << endl;
    
    
    for (uword i = 0; i < feat_all_videos_action_i.size(); ++i)
    {
      
      //cout << i << " " << endl;
      mat_features.col(i) = feat_all_videos_action_i.at(i);
      
    }
    
    std::stringstream tmp_ss4;
    tmp_ss4 << "./run1/features/train/feature_vectors_" << actions(act);
    cout << "Saving at " << tmp_ss4.str() << endl;
    mat_features.save( tmp_ss4.str() );
    
  }
}



inline
void
opt_feat::create_gmm_action(int in_Ncent)
{ 
  N_cent = in_Ncent;
  cout << "# clusters: " << N_cent << endl;
  
  for (uword act = 0 ; act < actions.n_rows;  ++act) {
    
    std::stringstream tmp_ss4;
    tmp_ss4 << "./run1/features/train/feature_vectors_" << actions(act); 
    cout << "Calculating GMM for action " << actions(act) << endl;
    mat mat_features;
    mat_features.load( tmp_ss4.str() );
    
    
    gmm_diag gmm_model;
    gmm_model.learn(mat_features, N_cent, eucl_dist, static_subset, 20, 5, 1e-10, true);   
    
    std::stringstream tmp_ss5;
    tmp_ss5 << "./gmm_models/Ng" << N_cent << "_" << actions(act); 
    cout << "Saving GMM in " << tmp_ss5.str() << endl;
    gmm_model.save( tmp_ss5.str() );
  }
  
}


//*******************************************************************************************
//***************************Feature per video: Training and Testing ************************

inline 
void
opt_feat::feature_video(std::string one_video)
{
  
  std::vector < vec > feat_frame;
  
  
  //one_video = "/home/johanna/codes/multi-actions/kth_single_action/person04_running_d4_uncomp.avi";
  cv::VideoCapture capVideo(one_video);
  //cout << one_video << endl;
  //double fps = capVideo.get(CV_CAP_PROP_FPS); //get the frames per seconds of the video
  //cout << "Frame per seconds : " << fps << endl;
  
  //cv::namedWindow("MyVideo",CV_WINDOW_AUTOSIZE); //create a window called "MyVideo"
  
  double frmcount = capVideo.get(CV_CAP_PROP_FRAME_COUNT);
  cout << "# of frames is: " << frmcount << endl;
  
  if( !capVideo.isOpened() )
  {
    cout << "Video couldn't be opened" << endl;
    return;
  }
  
  cv::Mat prevgray, gray, flow, cflow, frame, prevflow;
  cv::Mat ixMat, iyMat, ixxMat, iyyMat;
  //cv::namedWindow("My Video", 1);
  //running_stat_vec<vec> stats_video(true);
  //cout << "Frame: ";
  int t = -1;
 
  //Saving each two frames
  int par_fr = 0;
  int len = ceil(frmcount/2)-1; //0 and 1 not used
  featuresframe_video_i.set_size( len );
  string text;
  //cout << "Len " << len << endl;
  //lab_feature_vectors.set_size( frmcount );
  
  
  for(uword fr=0; fr<frmcount; fr++){
    
    //cout << t << " " ;
    
    bool bSuccess = capVideo.read(frame); // read a new frame from video
    
    if (!bSuccess) //if not success, break loop
	{
	  //cout << "Cannot read the frame from video file" << endl;
	  break;
	}
	t++;
    cv::cvtColor(frame, gray, CV_BGR2GRAY);
    //cv::cvtColor(frame,gray, cv::COLOR_BGR2GRAY);//For Opencv 3.0 (installed at home)
    cv::Scalar mean_gray = mean(gray);
    double m_gray = mean_gray.val[0];
    
    if( prevgray.data )
    {
      //cout << t << " " ;
      cv::calcOpticalFlowFarneback(prevgray, 
				   gray, 
				   flow, 
				   0.5, //pyr_scale
				   3,   //levels
				   9,   //winsize
				   1,   //iterations
				   5,   //poly_n
				   1.1, //poly_sigma
				   0);  //flags
      
      
      cv::Sobel(gray, ixMat, CV_32F, 1, 0, 1);
      cv::Sobel(gray, iyMat, CV_32F, 0, 1, 1);
      cv::Sobel(gray, ixxMat, CV_32F, 2, 0, 1);
      cv::Sobel(gray, iyyMat, CV_32F, 0, 2, 1);
      
      float  ux = 0, uy = 0, vx = 0,  vy = 0;
      float u, v;
     
      
      if( prevflow.data )
      {
	  //cout << t << " " ;
	for (uword x = 0 ; x < col ; ++x ){
	  for (uword y = 0 ; y < row ; ++y ) {
	    
	    vec features_one_pixel(DIM);
	    vec features_one_pixel_opfl(14);
	    vec neigh_pixels;//neighbour pixels
	    u = flow.at<cv::Vec2f>(y, x)[0];
	    v = flow.at<cv::Vec2f>(y, x)[1];
	    
	    //cout << "x= " << x << " - y= " << y << endl;
	    // x grad
	    //cout << " x y grad" << endl;
	    float ix = ixMat.at<float>(y, x);
	    //cout << " y grad" << endl;
	    float iy = iyMat.at<float>(y, x);
	    
	    // grad direction &  grad magnitude
	    //cout << "grad direction &  grad magnitude" << endl;
	    float gd = std::atan2(std::abs(iy), std::abs(ix));
	    float gm = std::sqrt(ix * ix + iy * iy);
	    
	    // x second grad
	    //cout << "x y  second grad " << endl;
	    float ixx = ixxMat.at<float>(y, x);
	    // y second grad
	    float iyy = iyyMat.at<float>(y, x);
	    
	    //du/dt
	    float ut = u - prevflow.at<cv::Vec2f>(y, x)[0];
	    // dv/dt
	    float vt = v - prevflow.at<cv::Vec2f>(y, x)[1];
	    
	    //// divergence &  vorticity
	    //cout << "divergence &  vorticity" << endl;
	    if (x>0 && y>0 )
	    {
	      ux = u - flow.at<cv::Vec2f>(y, x - 1)[0];
	      uy = u - flow.at<cv::Vec2f>(y - 1, x)[0];
	      vx = v - flow.at<cv::Vec2f>(y, x - 1)[1];
	      vy = v - flow.at<cv::Vec2f>(y - 1, x)[1];
	    }
	    //int x_submat = x + rec.x;
	    //int y_submat = y + rec.y;
	    //cout << x_submat << "&" << y_submat << endl;
	    
	    
	    
	    features_one_pixel_opfl  << x << y << abs(ix) << abs(iy) << abs(ixx) 
	    << abs(iyy) << gm << gd <<  u << v << abs(ut) 
	    << abs(vt) << (ux - vy)  << (vx - uy);
	  
	    
	    if (!is_finite( features_one_pixel_opfl ) )
	    {
	      cout << "It's not FINITE... continue???" << endl;
	      getchar(); 
	    }
	    // Plotting Moving pixels
	    //cout << " " << gm;
	    
	    bool isinRange = ((y+5)<row && (x+5)<col);
	    if (gm>m_gray/3 && isinRange) // 
			    {

			      int n = 0;
			      neigh_pixels.zeros(25);

			      
			      
			      for (uword nx = 0; nx<5; ++nx){
				for (uword ny = 0; ny<5; ++ny){
				  
				
				   neigh_pixels(n) = gray.at<uchar>(y+ny,x+nx);;
				   n++;
				   //neigh_pixels.t().print();
				   //cv::waitKey();
				  
				}
			      }
			      
			      
			      
			     
			     ///AQUI VOY!!!!!!!!! Agregar a features_one_pixel
			     //features_one_pixel_opfl.t().print();
			     //neigh_pixels.t().print();
			     features_one_pixel.subvec(0,13)   = features_one_pixel_opfl;
			     features_one_pixel.subvec(14,DIM -1) = neigh_pixels;
			      
			      
			      //features_one_pixel.print("features");
			      //getchar();
			      feat_frame.push_back(features_one_pixel);
			      
			      
			      //to plot optical flow
			      frame.at<cv::Vec3b>(y,x)[0] = 0;
			      frame.at<cv::Vec3b>(y,x)[1] = 0;
			      frame.at<cv::Vec3b>(y,x)[2] = 255;
  
			    }
			    
	  }
	  
	}
	
	    
    
    
    if (!ismultiAction) // For Single Videos
    {
      if (feat_frame.size() > THRESH_2) //frames has enough moving pixels
      {
	
	
	//fr es par?
	if ( (fr % 2 ) == 0 ) 
	{
	  feat_all_videos_action_i.insert(feat_all_videos_action_i.end(), feat_frame.begin(), feat_frame.end()  );
	  text = "Frame used";
	  par_fr++;//este contador es solo util en multi-action videos
	  
	}
	
      }
      else
      {
	text = "Frame discarded";
	//cout << "Frame discarded" << endl;
	
      }
      
    }
    
    
    else//For multiaction
    {
      //cout << "Frame " << t << ". Feature vectors "<< feat_frame.size() << endl;
      //cout << "Llega aqui? ";
      text = "Frame wasn't discarded. Multivideo";
      
       if ( (fr % 2 ) == 0 ) 
       {
	 //cout << "fr= " << fr << " par_fr= " << par_fr << " " ;
	 mat feat_frame_i( DIM,feat_frame.size() );
	 
	 for (uword l = 0; l<feat_frame.size(); ++l )
	 {
	   feat_frame_i.col(l) = feat_frame.at(l);
	   
	}
	featuresframe_video_i(par_fr) = feat_frame_i;
	uword lab = arma_multi_labels(fr);
	lab_feature_vectors.push_back( lab ); 
	par_fr++;
	//cout << par_fr << " ";
	 
      }
      
    }
	
      }
      
    }
    
    if(cv::waitKey(30)>=0)
      break;
    
    
    std::swap(prevgray, gray);
    std::swap(prevflow, flow);
    

    
    
     
    //string text;
    
    /*
    std::stringstream tmp_tx;
    tmp_tx << m_gray;
    text = tmp_tx.str();
    int fontFace = cv::FONT_HERSHEY_PLAIN;
    double fontScale = 0.8;
    int thickness = 1;  
    cv::Point textOrg(10, 100);
    cv::putText(frame, text, textOrg, fontFace, fontScale, cv::Scalar(255,0,0), thickness,8);
    
    
    string text;
    std::stringstream tmp_ss;
    tmp_ss << t;
    text = tmp_ss.str();
    */

    //cv::imshow("color", frame);
    //cv::waitKey();
    //
    
    
    feat_frame.clear();
  }
  cout << "# of frames used " << par_fr<< endl;
  //getchar();
  
}

//******************TESTING MULTI ACTIONS***********************************
//***************************************************************************


inline 
void 
opt_feat::feature_multi_action()
{
  ismultiAction = true; // per default is false
  
  field<string> test_list; 
  field<string> test_label_list;
  test_list.load("./run1/test_list_Run1");
  test_label_list.load("./run1/test_list_lab_Run1");
  
  
  test_list.print("Test_list");

  
  
  for (uword vi = 0; vi <test_list.n_rows; ++vi ){ //videos.n_rows

      
      feat_all_videos_action_i.clear(); 
      lab_feature_vectors.clear();
      
      
      std::stringstream tmp_ss3;
      tmp_ss3 << multi_path << test_list(vi);
      cout << tmp_ss3.str()<< endl;
      
      std::stringstream tmp_lbs;
      tmp_lbs << multi_path << test_label_list(vi);
      cout << tmp_lbs.str() << endl;
      arma_multi_labels.load(tmp_lbs.str(), raw_ascii); //labels are in a frame basis.
      
      
      
      cout << "Calculating features vector per video ..." << test_label_list(vi) << endl;;
      feature_video(tmp_ss3.str()); //feat_all_video_action_i is calculated inside this method
      
      
      
      cout << "Total feature vectors: " << featuresframe_video_i.n_elem << endl;
      //cout << "Total labels: " << lab_feature_vectors.size() << endl;
      
       cout << "Converting to Arma:" << endl;
       cout << "# of Vectors: " << lab_feature_vectors.size() << endl;
       uvec lab_feature_vectors_arma(lab_feature_vectors.size());
    
    
     for (uword i = 0; i < lab_feature_vectors.size(); ++i)
    {
      //cout << i << endl;
      lab_feature_vectors_arma(i) = lab_feature_vectors.at(i);
      
    }
    
    
      
      
      std::stringstream tmp_ss4;
      tmp_ss4 << "./run1/features/multi_test/feat_"<< test_label_list(vi);  // 17 for training from 18 onwards for testing
      
      std::stringstream tmp_vec_lab;
      tmp_vec_lab << "./run1/features/multi_test/lab_"<< test_label_list(vi);  // 17 for training from 18 onwards for testing
      
      
      //cout << tmp_ss4.str() << endl;
      //getchar();
      cout << "Saving " << endl;
      featuresframe_video_i.save( tmp_ss4.str() );
      lab_feature_vectors_arma.save( tmp_vec_lab.str(), raw_ascii );
      //cout << "Press a key" << endl;
      //getchar();
      
      
  }
 
}

inline 
void 
opt_feat::gmm_multi_action( int in_Ncent )
{
  
   double ave_acc=0;
  int L = 13;
  N_cent = in_Ncent;
  field<mat> featuresframe_video_i; // Features for frame i are in  arow of this field
  
  uvec real_labels;
  
  cout << "Testing for GMM with " << N_cent << " centroids" << endl;
  
  field<std::string> person;
  person.load("./run1/features/multi_test/person_list_Run1.txt");
  //person.print();
  
  field<std::string> lab_person;
  lab_person.load("./run1/features/multi_test/Labperson_list_Run1.txt");
  //lab_person.print();
  
  /*
   std::stringstream tmp_ss;
      tmp_ss << single_path << train_list(q1(u));
   */
  
  //cout << "person.n_rows " << person.n_rows << endl;
  //cout << "lab_person.n_rows "  << lab_person.n_rows << endl;
  
 
  
  for (uword vi = 0; vi <person.n_rows; ++vi ){ 
    
    std::stringstream tmp_ss4;
    tmp_ss4 << "./run1/features/multi_test/"<< person(vi);  // 17 for training from 18 onwards for testing	std::stringstream tmp_vec_lab;
    std::stringstream tmp_vec_lab;
    
    tmp_vec_lab << "./run1/features/multi_test/"<< lab_person(vi);  // 17 for training from 18 onwards for testing
    //cout << tmp_vec_lab.str()<< endl;
    
    featuresframe_video_i.load( tmp_ss4.str() );
    
    
    real_labels.load( tmp_vec_lab.str(), raw_ascii );
   
    
    //real_labels.t().print("real_labels");
    
    
    //conv_to< colvec >::from(x)
    int num_frames = featuresframe_video_i.n_rows;
    
    
    mat log_prob;
    log_prob.zeros(num_frames, actions.n_rows);  
    
    cout << "Doing for person " << person(vi) << endl;
    //cout << "num_frames " << num_frames << endl;
    //cout << "num_labels " << real_labels.n_elem << endl;
    
    
    for ( uword fr = 0; fr < num_frames -L +1 ; ++fr)
       {
	 //cout << fr << endl;
	 mat mat_features;
	 
	 //cout << " j = " << fr << " to  j< " << fr+L << endl ;
	 for (uword j = fr; j< fr+L; j++)
	 {
	   //cout << j << " " ;
	   mat_features	 = join_rows( mat_features, featuresframe_video_i(j) );
	   
	   
	   
	 }
	 
	 
	 //cout << endl;
	 vec likelihood_actions = get_loglikelihoods(mat_features);
	 //likelihood_actions.t().print();
	 //cout << endl << "ini " << fr << " fin:" << fr + L-1 << endl;
	 log_prob.rows(fr, fr + L-1).each_row() += likelihood_actions.t();
	 //getchar();
	 //cout << log_prob.rows(0, fr + L + 5) << endl;
	 //getchar();
	 
	 
       }
       //log_prob.print();
       //cout << "Done here" << endl;
       //getchar();
       uvec est_labels(num_frames);
       
       
       
       for ( uword i = 0; i < num_frames; ++i)
       {
	 //cout << " " << i;
	 uword index;
	 double max_frame = log_prob.row(i).max(index);
	 //cout << "Log_prob_frame " << i << "= " << log_prob.row(i) << endl;
	 
	 //cout << "Max for " << log_prob.row(i) << " is " << max_frame << " pos: " << index<< endl;
	 est_labels(i) = index;
	 //getchar();
	 
       }
       
       std::stringstream tmp_save_estlab;
       tmp_save_estlab<< "./run1/results/est_"<<lab_person(vi);  // 17 for training from 18 onwards for testing
       
       est_labels.save(tmp_save_estlab.str(), raw_ascii);
       
       
       std::stringstream tmp_save_real_lab;
       tmp_save_real_lab << "./run1/results/real_"<<lab_person(vi);  // 17 for training from 18 onwards for testing
       
       real_labels.save( tmp_save_real_lab.str(), raw_ascii );
       
       uvec comparing = find( est_labels == real_labels);
       double acc = comparing.n_elem;
       cout <<"performance for person "<<person(vi)<<" is "<<setprecision(2)<<fixed<<100*acc/est_labels.n_elem << " %"<<endl;
       ave_acc = ave_acc + 100*acc/est_labels.n_elem;
       //getchar();
  }
  
  cout << "Average performance is " << setprecision(2) << fixed << ave_acc/person.n_rows << " %"<<endl;

}


inline
vec
opt_feat::get_loglikelihoods(mat mat_features)
{
  vec likelihood_actions(actions.n_rows);
  
  for (uword act_tr=0; act_tr < actions.n_rows; ++act_tr)
  {
    gmm_diag gmm_model;
    std::stringstream tmp_ss5;
    tmp_ss5 << "./gmm_models/Ng" << N_cent << "_" << actions(act_tr); 
    //cout << "Loading model:  " << tmp_ss5.str() << endl;
    gmm_model.load( tmp_ss5.str());
    
    likelihood_actions (act_tr) = gmm_model.avg_log_p(mat_features);
    
  }
  
  
  return likelihood_actions;
  
}




///Create list of People in Training Set and Testing Set
inline
void
opt_feat::create_data_list()
{
  
  int n_actions   = actions.n_rows;
  int n_scenarios = 4;
  
  
  //Training
  //peo_train 
  //peo_test
  int np_train = peo_train.n_elem;
  int total_train = np_train*n_scenarios*n_actions;
  field<string> train_list (total_train);
  uvec train_labels(total_train);
  int f = 0;
  bool isMissing = false;
  
  for (uword act = 0; act < n_actions; ++act)
  {
    for (uword p_tr=0; p_tr <np_train; ++p_tr)
    {
    
      for (uword sc = 1; sc<=4; ++sc  )
      {
	
	stringstream tmp_train;
	
	if ( !(peo_train(p_tr)== 13 && act==4 && sc==3)  )
	{
	  if (peo_train(p_tr)<10)
	  {
	    tmp_train << "person0"<< peo_train(p_tr) << "_" << actions(act) << "_d"<< sc << "_uncomp.avi";
	  }
	  else
	  {
	    tmp_train << "person"<< peo_train(p_tr) << "_" << actions(act) << "_d"<< sc << "_uncomp.avi"; 
	    
	  }
	  train_list(f) = tmp_train.str();
	  train_labels(f) = act;
	  ++f;
	}
	else
	{
	  tmp_train << "person"<< peo_train(p_tr) << "_" << actions(act) << "_d"<< sc << "_uncomp.avi"; 
	  cout << tmp_train.str()<< endl;
	  isMissing=true;
	}
	
      }
      
    }
    
  }
  
  //cout << train_list.n_elem << endl;
  if (isMissing)
  {
    //cout << "isMissing " << endl;
    field<string>train_list2(total_train-1);
    uvec train_labels2 (total_train-1);
    for (uword i=0; i<total_train-1; ++i)
    {
      train_list2(i)=train_list(i);
      train_labels2(i) = train_labels(i);
    }
    train_list = train_list2;
    train_labels=train_labels2;
    
  }
  //train_labels.t().print();
  //getchar();
  train_list.print();
 
  int np_test = peo_test.n_elem;
  int total_test = np_test*n_scenarios;
  field<string> test_list (total_test); 
  field<string> test_label_list (total_test);

  f =0;
  
  for (uword p_te = 0; p_te < np_test; ++p_te)
  {
    for (uword sc = 1; sc<=4; ++sc  )
      {
	stringstream tmp_test;
	stringstream tmp_test_lab;
	
	if (peo_test(p_te)<10)
	  {
	    tmp_test << "person0"<< peo_test(p_te) << "_d"<< sc << "_multiactions.avi";
	    tmp_test_lab << "person0"<< peo_test(p_te) << "_d"<< sc << "_MultiLabels.dat";
	  }
	  else
	  {
	    tmp_test << "person"<< peo_test(p_te) << "_d"<< sc << "_multiactions.avi";
	    tmp_test_lab << "person"<< peo_test(p_te) << "_d"<< sc << "_MultiLabels.dat";

	    
	  }
	  test_list(f) = tmp_test.str();
	  test_label_list(f) = tmp_test_lab.str();

	  ++f;

   }
 
}


train_list.save("train_list_Run1");
train_labels.save("train_label_Run1",raw_ascii);
test_list.save("test_list_Run1");
test_label_list.save("test_list_lab_Run1");
}
