
inline
features::features(const std::string in_Spath, 
		   const std::string in_Mpath, 
		   const std::string in_Spath_mask, 
		   const std::string in_Mpath_mask,
		   field<std::string> in_actions,  
		   const uword in_col, 
		   const uword in_row
)
:single_path(in_Spath), multi_path(in_Mpath), single_path_mask(in_Spath_mask), multi_path_mask(in_Mpath_mask), actions(in_actions), col(in_col), row(in_row)
{
  
  THRESH_2 = col*row*1/100;
  ismultiAction = false; // per default is false
  //dim = col*row;
  dim = 18;
  
  
}



inline
void
features::features_per_action_training(const  field<string>  peo_train, int run)
{
  
  
  
  //for (uword sc = 1; sc <= 4; ++sc)
  //{
    //actions.print();
    //getchar();
    for (uword act = 0; act < actions.n_rows; ++act)
    {
       feat_all_videos_action_i.clear(); 
      
      //cout << "Doing for action " << actions(act) << endl;
      //num_personas
      int peo_tr =  peo_train.n_rows;
      
      for (uword pe = 0; pe< peo_tr; ++pe)
      {
	
	std::stringstream tmp_ss;
	tmp_ss << single_path << actions(act) << "/" << peo_train (pe) << "_" << actions(act) << ".avi";
	//cout << tmp_ss.str()<< endl;
	
	std::stringstream tmp_ss_mask;
	tmp_ss_mask<< single_path_mask << actions(act) << "/" << peo_train (pe) << "_" << actions(act) << ".avi";
	//cout << tmp_ss_mask.str()<< endl;
	
	
	//cout << "Calculating features vector per video ..." << tmp_ss.str() << endl;
	feature_video(tmp_ss.str(), tmp_ss_mask.str() ) ; //feat_all_videos_action_i is calculated inside this method
	//cout << "# of Vectors: " << feat_all_videos_action_i.size() << endl;
	//getchar();
	
      }
      
      //cout << "Converting to Arma:" << endl;
      //cout << "# of Vectors: " << feat_all_videos_action_i.size() << endl;
      
      //int len   //using only each 2 frames
      mat mat_features(dim,feat_all_videos_action_i.size());
      
      
      for (uword i = 0; i < feat_all_videos_action_i.size(); ++i)
      {
	//cout << i << endl;
	mat_features.col(i) = feat_all_videos_action_i.at(i);
	
      }
      
      
      
      
      
      std::stringstream tmp_ss4;
      tmp_ss4 << "./run" << run << "/features/train/feature_vectors_" << actions(act);
      //cout << "OJO!!!!!!" << endl;
      
      
      //cout << "Saving at " << tmp_ss4.str() << endl;
      mat_features.save( tmp_ss4.str() );
      //getchar();
      
    }
    //}
}






//*******************************************************************************************
//***************************Feature per video: Training and Testing ************************

inline 
void
features::feature_video(std::string one_video, std::string one_video_mask)
{
  
  cv::VideoCapture capVideo(one_video);
  
  cv::VideoCapture capVideo_mask(one_video_mask);
  //cout << one_video << endl;
  //double fps = capVideo.get(CV_CAP_PROP_FPS); //get the frames per seconds of the video
  //cout << "Frame per seconds : " << fps << endl;
  
  //cv::namedWindow("MyVideo",CV_WINDOW_AUTOSIZE); //create a window called "MyVideo"
  
  double frmcount = capVideo.get(CV_CAP_PROP_FRAME_COUNT);
  //cout << "# of frames is: " << frmcount << endl;
  
  if( !capVideo.isOpened() )
  {
    cout << "Video couldn't be opened" << endl;
    return;
  }
  
  cv::Mat  frame, gray, mask, mask_rgb; //gray mask as I don't know how to open a VIDEO IN GRAYSCALES
  cv::Mat bb_prevgray, bb_prevflow, bb_flow;
  cv::Mat ixMat, iyMat, ixxMat, iyyMat;
  cv::Mat flow_xy[2], mag, ang;
  running_stat_vec<vec> featvec_frame;
  
  
  //cv::namedWindow("My Video", 1);
  //running_stat_vec<vec> stats_video(true);
  //cout << "Frame: ";

  
  //Saving each two frames
  //int par_fr = 0;
  //int len = ceil(frmcount/2)-1; //0 and 1 not used
  //featuresframe_video_i.set_size( len );
  //cout << "Len " << len << endl;
  //lab_feature_vectors.set_size( frmcount );
  
  
  for(uword fr=0; fr<frmcount; fr++){
    
    // cout << t << " " << endl;
    
    bool bSuccess = capVideo.read(frame); // read a new frame from video
    bool bSuccess2= capVideo_mask.read(mask_rgb); // read a new frame from video
    
    //cout << mask_rgb << endl;
    
    //getchar();
    cvtColor(mask_rgb,mask,CV_BGR2GRAY);
    //cv::imshow("Dios", mask);
    cv::Mat tmp_mask;
    mask.copyTo(tmp_mask);
    //cv::split(mask_rgb, mask_3ch);
    //mask = mask>100;
    
    //mask_rgb = mask_rgb>=128;
    //mask_rgb.row(row-1)=cv::Scalar(255,255,255);
    
    
    //cv::cvtColor(mask_rgb, mask, CV_BGR2GRAY);
    //mask = mask>=128;
    //cout << mask << endl;
    //getchar();
    
        
    if (!bSuccess2) //if not success, break loop
	{
	  //cout << "Cannot read the frame from video file" << endl;
	  break;
	}

	
      ///Finding the Bounding Box
	
	vector<vector<cv::Point> > contours;
	vector<cv::Vec4i> hierarchy;
	cv::findContours( tmp_mask, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0) );
	//cout << "Number of contours: " << contours.size() << endl;
	cv::Rect bb;
	int largest_area=0;
	int largest_contour_index=0;
	
	
	for( int i = 0; i< contours.size(); i++ ) // iterate through each contour. 
      {
	double a=contourArea( contours[i],false);  //  Find the area of contour
	
	if(a>largest_area){
	  largest_area=a;
	  largest_contour_index=i;                //Store the index of largest contour
	  bb=boundingRect(contours[i]); // Find the bounding rectangle for biggest contour
	  
	}
   
      }
      
	 
	//end finding BB
	
	
	///Creating bb_mask
	//cv::Mat bb_mask(mask, bb);
	//cv::imshow("bb1", bb_mask);
	
	
	///Cropping image 
	cv::Mat bb_frame(frame, bb);
	
	
	///Resizing Image and grayscale conversion
	cv::resize(bb_frame,bb_frame, cv::Size(col,row) );
	cv::Mat bb_gray;
	cv::cvtColor(bb_frame, bb_gray, CV_BGR2GRAY);
	
	cv::Mat cflow(bb_frame.size(), CV_32FC3, cvScalar(0,0,0));
	
	
	if( bb_prevgray.data ) 
	{
	  //cout << t << " " ;
	  cv::calcOpticalFlowFarneback(bb_prevgray, 
				       bb_gray, 
				bb_flow, 
				0.5, //pyr_scale
				3,   //levels
				9,   //winsize
				1,   //iterations
				5,   //poly_n
				1.1, //poly_sigma
				0);  //flags
	  
	  
	  cv::Sobel(bb_gray, ixMat, CV_32F, 1, 0, 1);
	  cv::Sobel(bb_gray, iyMat, CV_32F, 0, 1, 1);
	  cv::Sobel(bb_gray, ixxMat, CV_32F, 2, 0, 1);
	  cv::Sobel(bb_gray, iyyMat, CV_32F, 0, 2, 1);
	  
	   
	  
	  float  ux = 0, uy = 0, vx = 0,  vy = 0;
	  float u, v;
	  float Gten, Sten;
	  
	  
	  if( bb_prevflow.data )
	  {
	    
	    cv::split(bb_flow, flow_xy);
	    cv::cartToPolar(flow_xy[0], flow_xy[1], mag, ang, true);
	    
	    
	    //To Plot Optical Flow
	    {
	    
// 	     	    cv::Mat hsv_channels[3];
// 	     	    cv::Mat hsv(bb_frame.size(), CV_32FC3, cvScalar(0,0,0));
// 	     	    cv::split(hsv,hsv_channels);
// 	     	    
// 	     	    double mag_max, mag_min;
// 	     	    cv::minMaxLoc(mag, &mag_min, &mag_max);
// 	     	    mag.convertTo(mag, -1, 1.0/mag_max);
// 	     	    hsv_channels[0] = ang;
// 	     	    hsv_channels[1].setTo(cv::Scalar(255)) ;
// 	     	    hsv_channels[2] = mag;
// 	     	    cv::merge(hsv_channels, 3, hsv);
// 	     	    cv::cvtColor(hsv, cflow, CV_HSV2BGR);
// 	     	    cv::imshow("opt_flow", cflow);
	     
	 
	    }
	    ///end
	    
	    
	    for (uword x = 0 ; x < col ; ++x ){
	      for (uword y = 0 ; y < row ; ++y ) {
		
		vec features_one_pixel(18);
		mat G (2,2);
		mat S;
		u = bb_flow.at<cv::Vec2f>(y, x)[0];
		v = bb_flow.at<cv::Vec2f>(y, x)[1];
		
		//cout << "x= " << x << " - y= " << y << endl;
		// x grad
		//cout << " x y grad" << endl;
		float ix = ixMat.at<float>(y, x);
		//cout << " y grad" << endl;
		float iy = iyMat.at<float>(y, x);
		
		// grad direction &  grad magnitude
		//cout << "grad direction &  grad magnitude" << endl;
		float gd = std::atan2(std::abs(iy), std::abs(ix));
		float gm = std::sqrt(ix * ix + iy * iy);
		
		

		
		// x second grad
		//cout << "x y  second grad " << endl;
		float ixx = ixxMat.at<float>(y, x);
		// y second grad
		float iyy = iyyMat.at<float>(y, x);
		
		//du/dt
		float ut = u - bb_prevflow.at<cv::Vec2f>(y, x)[0];
		// dv/dt
		float vt = v - bb_prevflow.at<cv::Vec2f>(y, x)[1];
		
		if (x>0 && y>0 )
		{
		  ux = u - bb_flow.at<cv::Vec2f>(y, x - 1)[0];
		  uy = u - bb_flow.at<cv::Vec2f>(y - 1, x)[0];
		  vx = v - bb_flow.at<cv::Vec2f>(y, x - 1)[1];
		  vy = v - bb_flow.at<cv::Vec2f>(y - 1, x)[1];
		}
		//int x_submat = x + rec.x;
		//int y_submat = y + rec.y;
		//cout << x_submat << "&" << y_submat << endl;
		
		//new features
		
		float gd_opflow = ang.at<float>(y,x);
		float mg_opflow = mag.at<float>(y,x);
		
		
		//Gradient Tensor
		G   << ux << uy << endr
		<< vx << vy << endr;
		
		//Rate of Stein Tensor  
		S = 0.5*(G + G.t());
		
		float tr_G = trace(G);
		float tr_G2 = trace( square(G) );
		float tr_S = trace(S);
		float tr_S2 = trace(square(S));
		
		//Tensor Invariants  of the optical flow
		Gten = 0.5*( tr_G*tr_G - tr_G2 );
		Sten = 0.5*( tr_S*tr_S - tr_S2 );  
		
		//18 Features
		
		features_one_pixel  << x << y << abs(ix) << abs(iy) << abs(ixx) 
		<< abs(iyy) << gm << gd <<  u << v << abs(ut) 
		<< abs(vt) << (ux + vy)  << (vx - uy) << mg_opflow << gd_opflow << Gten << Sten;
		

		
		
		if (!is_finite( features_one_pixel ) )
		{
		  cout << "It's not FINITE... continue???" << endl;
		  getchar(); 
		}

		  
		  featvec_frame(features_one_pixel);

	      }
	    }
	    
	    
	    string text;
	    vec mean_frame = featvec_frame.mean();
	    //cout << fr << endl;
	    //cout << "Reseting .. new frame.." << endl;
	    featvec_frame.reset();
	    //getchar();
	    
	    ///Es par???
	    if ( (fr % 2 ) == 0 ) 
	    {
	      feat_all_videos_action_i.push_back(mean_frame);
	      
	      if (ismultiAction)     
	      {
		uword lab;
		lab = arma_multi_labels(fr);
		lab_feature_vectors.push_back( lab ); 
		
	      }
	      
	    }

	    
	  }
	  
	}
	
	std::swap(bb_prevgray, bb_gray);
	std::swap(bb_prevflow, bb_flow);
	
    



	/*
	 *    int fontFace = cv::FONT_HERSHEY_PLAIN;
	 *    double fontScale = 0.8;
	 *    int thickness = 1;  
	 *    cv::Point textOrg(10, 100);
	 *    cv::putText(frame, text, textOrg, fontFace, fontScale, cv::Scalar::all(255), thickness,8);
	 */
///Show images
	{
	  //cv::imshow("mask", mask); 
	  //cv::imshow("frame", frame); 
	  //cv::imshow("bb_frame", bb_frame); 
	  //cv::imshow("bb_gray", bb_gray); 
	  
	  //cv::waitKey(10);
	}

  }
  
}

//******************TESTING MULTI ACTIONS***********************************
//***************************************************************************

inline 
void 
features::feature_multi_action( field<string> peo_test, int run )
{
  ismultiAction = true; // per default is false
  
  
  
  
  //peo_test.print("Test_list");
  //getchar();
  
  
  
  for (uword vi = 0; vi <peo_test.n_rows; ++vi ){ //videos.n_rows

      
      feat_all_videos_action_i.clear(); 
      lab_feature_vectors.clear();
      
      std::stringstream tmp_ss3;
      tmp_ss3 << multi_path << peo_test(vi) <<"_multiactions.avi" ;
      //cout << tmp_ss3.str()<< endl;
      
      std::stringstream tmp_ss3_mask;
      tmp_ss3_mask << multi_path_mask << peo_test(vi) <<"_multiactions.avi" ;
      
      std::stringstream tmp_lbs;
      tmp_lbs << multi_path << peo_test(vi) << "_labels.dat";
      //cout << tmp_lbs.str() << endl;
      
      arma_multi_labels.load(tmp_lbs.str(), raw_ascii); //labels are in a frame basis.
      
      
      cout << "Calculating features vector per person ..." << peo_test(vi) << endl;;
      feature_video(tmp_ss3.str(), tmp_ss3_mask.str() ); //feat_all_videos_action_i is calculated inside this method
      
      
      //cout << "Total frames: " << featuresframe_video_i.n_elem << endl;
      //cout << "Total labels: " << lab_feature_vectors.size() << endl;
      
      //cout << "Converting to Arma:" << endl;
      //cout << "# of labels: " << lab_feature_vectors.size() << endl;
      uvec lab_feature_vectors_arma(lab_feature_vectors.size());
      
      //converting labels to arma from std::vector
      mat mat_features(dim,feat_all_videos_action_i.size());
      
      
      for (uword i = 0; i < feat_all_videos_action_i.size(); ++i)
      {
	//cout << i << endl;
	mat_features.col(i) = feat_all_videos_action_i.at(i);
	lab_feature_vectors_arma(i) = lab_feature_vectors.at(i);
	
      }
      
      std::stringstream tmp_ss4;
      tmp_ss4 << "./run" << run << "/features/multi_test/feat_"<< peo_test(vi);  
      
      std::stringstream tmp_vec_lab;
      tmp_vec_lab << "./run" << run <<"/features/multi_test/lab_"<< peo_test(vi);  
      
      
      //cout << tmp_ss4.str() << endl;
      //getchar();
      //cout << "Saving " << endl;
      mat_features.save( tmp_ss4.str() );
      lab_feature_vectors_arma.save( tmp_vec_lab.str(), raw_ascii );
      //cout << "Press a key" << endl;
      //getchar();
      
      
  }
  
}
