
inline
features::features(const std::string in_Spath, 
		   const std::string in_Mpath, 
		   const std::string in_Spath_mask, 
		   const std::string in_Mpath_mask,
		   field<std::string> in_actions,  
		   const uword in_col, 
		   const uword in_row,
		   const int in_n_regions
)
:single_path(in_Spath), multi_path(in_Mpath), single_path_mask(in_Spath_mask), multi_path_mask(in_Mpath_mask), actions(in_actions), col(in_col), row(in_row), n_regions( in_n_regions )
{
  
  THRESH_2 = col*row*1/100;
  ismultiAction = false; // per default is false
  
  
}



inline
void
features::features_per_action_training(const  field<string>  peo_train, int run)
{
  
  feat_all_videos_action_regions.resize(n_regions);
  
  for (uword act = 0; act < actions.n_rows; ++act)
  {
    
    for (uword r=0; r<n_regions; ++r)
    {
      feat_all_videos_action_regions[r].clear();
    }
    
    
    //feat_all_videos_action_i.clear(); 
    
    cout << "Doing for action " << actions(act) << endl;
    //getchar();
    //num_personas
    int peo_tr =  peo_train.n_rows;
    
    for (uword pe = 0; pe< peo_tr; ++pe)
    {
      
      std::stringstream tmp_ss;
      tmp_ss << single_path << actions(act) << "/" << peo_train (pe) << "_" << actions(act) << ".avi";
      //cout << tmp_ss.str()<< endl;
      
      std::stringstream tmp_ss_mask;
      tmp_ss_mask<< single_path_mask << actions(act) << "/" << peo_train (pe) << "_" << actions(act) << ".avi";
      //cout << tmp_ss_mask.str()<< endl;
      
      
      //cout << "Calculating features vector per video ..." << tmp_ss.str() << endl;
      feature_video(tmp_ss.str(), tmp_ss_mask.str() ) ; //feat_all_videos_action_regions is calculated inside this method
      
      
    }
    
    
    for (uword ri=0; ri<n_regions; ++ri)
    {
      
      int tam = feat_all_videos_action_regions[ri].size();
      mat mat_features(18,tam);
      
      for (uword i = 0; i < tam; ++i)
      {
	//cout << i << endl;
	mat_features.col(i) = feat_all_videos_action_regions[ri].at(i);
	
      }
      
      std::stringstream tmp_ss4;
      tmp_ss4 << "./run" << run << "/features/train/feature_vectors_" << actions(act) << "_region"<< ri;
      mat_features.save( tmp_ss4.str() );
      
      
      
    }
    
    
    //w/out regions
    /*
     *      mat mat_features(18,feat_all_videos_action_i.size());
     *     
     *      for (uword i = 0; i < feat_all_videos_action_i.size(); ++i)
     *      {
     *	//cout << i << endl;
     *	mat_features.col(i) = feat_all_videos_action_i.at(i);
     *	
     }
     std::stringstream tmp_ss4;
     tmp_ss4 << "./run" << run << "/features/train/feature_vectors_" << actions(act);
      mat_features.save( tmp_ss4.str() );
      */
    //end w/out regions
    
     }
     
     
     
  }
  
  
  
  
  
  
  //*******************************************************************************************
  //***************************Feature per video: Training and Testing ************************
  
  inline 
  void
  features::feature_video(std::string one_video, std::string one_video_mask)
  {
    
    //std::vector < vec > feat_frame;//comment
    std::vector < std::vector <vec> > feat_region(n_regions);
    //cout << feat_region.size() << endl;
    //getchar();
    
    
    cv::VideoCapture capVideo(one_video);
    //cout << "Doing for video " << one_video << endl;
    
    cv::VideoCapture capVideo_mask(one_video_mask);
    //cout << one_video << endl;
    //double fps = capVideo.get(CV_CAP_PROP_FPS); //get the frames per seconds of the video
    //cout << "Frame per seconds : " << fps << endl;
    
    //cv::namedWindow("MyVideo",CV_WINDOW_AUTOSIZE); //create a window called "MyVideo"
    
    double frmcount = capVideo.get(CV_CAP_PROP_FRAME_COUNT);
    //cout << "# of frames is: " << frmcount << endl;
    
    if( !capVideo.isOpened() )
    {
      cout << "Video couldn't be opened" << endl;
      return;
    }
    
    cv::Mat prevgray, gray,  flow, frame, prevflow, mask, mask_rgb; //gray mask as I don't know how to open a VIDEO IN GRAYSCALES
    cv::Mat ixMat, iyMat, ixMat_mask, iyMat_mask, ixxMat, iyyMat;
    cv::Mat flow_xy[2], mag, ang;
    cv::Mat mask_3ch[3];
    
    
    
    
    //cv::namedWindow("My Video", 1);
    //running_stat_vec<vec> stats_video(true);
    //cout << "Frame: ";
    int t = -1;
    
    //Saving each two frames
    int par_fr = 0;
    int len = ceil(frmcount/2)-1; //0 and 1 not used
    //featuresframe_video_i.set_size( len );
    
    featuresframe_video_region_i.set_size( len,n_regions );
    //cout << "Len " << len << endl;
    //lab_feature_vectors.set_size( frmcount );
    
    
    for(uword fr=0; fr<frmcount; fr++){
      
      // cout << t << " " << endl;
      
      bool bSuccess = capVideo.read(frame); // read a new frame from video
      bool bSuccess2= capVideo_mask.read(mask_rgb); // read a new frame from video
      
      cv::split(mask_rgb, mask_3ch);
      
      mask_rgb.copyTo(mask);
      
      
      //cout << mask.channels() << endl;
      /*
       *    cout << mask.rows << " & " << mask.cols << endl;
       *    cout << frame.rows << " & " << frame.cols << endl;
       *    getchar();
       */
      
      
      if (!bSuccess) //if not success, break loop
	{
	  //cout << "Cannot read the frame from video file" << endl;
	  break;
	}
	t++;
      //cv::cvtColor(frame,gray, cv::COLOR_BGR2GRAY);//For Opencv 3.0 (installed at home)
	
	cv::cvtColor(frame, gray, CV_BGR2GRAY);
	
	cv::Mat cflow(frame.size(), CV_32FC3, cvScalar(0,0,0));
	
	
	
	//gray =  gray2;
	
	if( prevgray.data )
	{
	  //cout << t << " " ;
	  cv::calcOpticalFlowFarneback(prevgray, 
				       gray, 
				flow, 
				0.5, //pyr_scale
				3,   //levels
				9,   //winsize
				1,   //iterations
				5,   //poly_n
				1.1, //poly_sigma
				0);  //flags
	  
	  
	  cv::Sobel(gray, ixMat, CV_32F, 1, 0, 1);
	  cv::Sobel(gray, iyMat, CV_32F, 0, 1, 1);
	  cv::Sobel(gray, ixxMat, CV_32F, 2, 0, 1);
	  cv::Sobel(gray, iyyMat, CV_32F, 0, 2, 1);
	  
	  //mask
	  cv::split(flow, flow_xy);
	  cv::Sobel(mask_3ch[0], ixMat_mask, CV_32F, 1, 0, 1);
	  cv::Sobel(mask_3ch[0], iyMat_mask, CV_32F, 0, 1, 1);
	  
	  
	  float  ux = 0, uy = 0, vx = 0,  vy = 0;
	  float u, v;
	  float Gten, Sten;
	  
	  
	  if( prevflow.data )
	  {
	    
	    cv::split(flow, flow_xy);
	    cv::cartToPolar(flow_xy[0], flow_xy[1], mag, ang, true);
	    
	    
	    //To Plot Optical Flow
	    /*
	     *	     	    cv::Mat hsv_channels[3];
	     *	     	    cv::Mat hsv(frame.size(), CV_32FC3, cvScalar(0,0,0));
	     *	     	    cv::split(hsv,hsv_channels);
	     *	     	    
	     *	     	    double mag_max, mag_min;
	     *	     	    cv::minMaxLoc(mag, &mag_min, &mag_max);
	     *	     	    mag.convertTo(mag, -1, 1.0/mag_max);
	     *	     	    hsv_channels[0] = ang;
	     *	     	    hsv_channels[1].setTo(cv::Scalar(255)) ;
	     *	     	    hsv_channels[2] = mag;
	     *	     	    cv::merge(hsv_channels, 3, hsv);
	     *	     	    cv::cvtColor(hsv, cflow, CV_HSV2BGR);
	     *	     
	     */
	    ///end
	    
	    //Finding the Bounding Box
	    
	    
	    vector<vector<cv::Point> > contours;
	    vector<cv::Vec4i> hierarchy;
	    cv::findContours( mask_3ch[0], contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0) );
	    //cout << "Number of contours: " << contours.size() << endl;
	    cv::Rect bb;
	    if (contours.size())
	    {
	      bb= cv::boundingRect(cv::Mat(contours[0]));
	      //cv::rectangle(mask,bb,cv::Scalar(0,255,0),2);
	      
	      
	      
	      if ( n_regions == 2 )
	      {
		cv::Rect r1 = cvRect(bb.x, bb.y, bb.width, bb.height/2);
		cv::Rect r2 = cvRect(bb.x, bb.y + bb.height/2 , bb.width, bb.height/2);
		
		cv::rectangle(mask,r1,cv::Scalar(255,255,0),2);
		cv::rectangle(mask,r2,cv::Scalar(255,0,0),2);
	      }
	      
	      
	    }
	    
	    //end finding BB
	    
	    
	    for (uword x = 0 ; x < col ; ++x ){
	      for (uword y = 0 ; y < row ; ++y ) {
		
		vec features_one_pixel(18);
		mat G (2,2);
		mat S;
		u = flow.at<cv::Vec2f>(y, x)[0];
		v = flow.at<cv::Vec2f>(y, x)[1];
		
		//cout << "x= " << x << " - y= " << y << endl;
		// x grad
		//cout << " x y grad" << endl;
		float ix = ixMat.at<float>(y, x);
		//cout << " y grad" << endl;
		float iy = iyMat.at<float>(y, x);
		
		// grad direction &  grad magnitude
		//cout << "grad direction &  grad magnitude" << endl;
		float gd = std::atan2(std::abs(iy), std::abs(ix));
		float gm = std::sqrt(ix * ix + iy * iy);
		
		
		//Mask
		float ix_mask = ixMat_mask.at<float>(y, x);
		//cout << " y grad" << endl;
		float iy_mask = iyMat_mask.at<float>(y, x);
		
		// Mask: grad magnitude
		float gm_mask = std::sqrt(ix_mask * ix_mask + iy_mask * iy_mask);
		
		// x second grad
		//cout << "x y  second grad " << endl;
		float ixx = ixxMat.at<float>(y, x);
		// y second grad
		float iyy = iyyMat.at<float>(y, x);
		
		//du/dt
		float ut = u - prevflow.at<cv::Vec2f>(y, x)[0];
		// dv/dt
		float vt = v - prevflow.at<cv::Vec2f>(y, x)[1];
		
		if (x>0 && y>0 )
		{
		  ux = u - flow.at<cv::Vec2f>(y, x - 1)[0];
		  uy = u - flow.at<cv::Vec2f>(y - 1, x)[0];
		  vx = v - flow.at<cv::Vec2f>(y, x - 1)[1];
		  vy = v - flow.at<cv::Vec2f>(y - 1, x)[1];
		}
		//int x_submat = x + rec.x;
		//int y_submat = y + rec.y;
		//cout << x_submat << "&" << y_submat << endl;
		
		//new features
		
		float gd_opflow = ang.at<float>(y,x);
		float mg_opflow = mag.at<float>(y,x);
		
		
		//Gradient Tensor
		G   << ux << uy << endr
		<< vx << vy << endr;
		
		//Rate of Stein Tensor  
		S = 0.5*(G + G.t());
		
		float tr_G = trace(G);
		float tr_G2 = trace( square(G) );
		float tr_S = trace(S);
		float tr_S2 = trace(square(S));
		
		//Tensor Invariants  of the optical flow
		Gten = 0.5*( tr_G*tr_G - tr_G2 );
		Sten = 0.5*( tr_S*tr_S - tr_S2 );  
		
		//18 Features
		
		features_one_pixel  << x << y << abs(ix) << abs(iy) << abs(ixx) 
		<< abs(iyy) << gm << gd <<  u << v << abs(ut) 
		<< abs(vt) << (ux + vy)  << (vx - uy) << mg_opflow << gd_opflow << Gten << Sten;
		
		
		//16 Features
		//features_one_pixel  << x << y << abs(ix) << abs(iy) << abs(ixx) 
		//<< abs(iyy) << gm << gd <<  u << v << abs(ut) 
		//<< abs(vt) << (ux + vy)  << (vx - uy) << mg_opflow << gd_opflow ;
		
		
		//Para poder comparar con KTH_dataset
		//14 Features
		/*features_one_pixel  << x << y << abs(ix) << abs(iy) << abs(ixx) 
		 *		<< abs(iyy) << gm << gd <<  u << v << abs(ut) 
		 *		<< abs(vt) << (ux + vy)  << (vx - uy);
		 */
		//features_one_pixel.t().print("Features Current Pixel: ");
		//getchar();
		
		
		
		
		
		if (!is_finite( features_one_pixel ) )
		{
		  cout << "It's not FINITE... continue???" << endl;
		  getchar(); 
		}
		// Plotting Moving pixels
		//cout << " " << gm;
		
		//if  (mg_opflow > 0.4)
		//if (gm>40) // Empirically set to 40
		//cout << img_bw.at<float>(y, x) << "" ;
		
		if (mask_rgb.at<cv::Vec3b>(y,x)[0]!=0)
		  //if (mask.at<cv::Vec3b>(y,x)[0]!=0)
		//if (gm_mask>40) 
		{
		  
		  frame.at<cv::Vec3b>(y,x)[0] = 0;
		  frame.at<cv::Vec3b>(y,x)[1] = 0;
		  frame.at<cv::Vec3b>(y,x)[2] = 255;
		  
		  
		  ///Comentar feat_frame
		  //feat_frame.push_back(features_one_pixel);
		  
		  int region_index = which_region(x, y,  bb);
		  
		  if  (region_index==0)
		  {
		    
		    frame.at<cv::Vec3b>(y,x)[0] = 0;
		    frame.at<cv::Vec3b>(y,x)[1] = 0;
		    frame.at<cv::Vec3b>(y,x)[2] = 0;
		    
		  }
		  
		  if  (region_index==1)
		  {
		    
		    frame.at<cv::Vec3b>(y,x)[0] = 0;
		    frame.at<cv::Vec3b>(y,x)[1] = 255;
		    frame.at<cv::Vec3b>(y,x)[2] = 0;
		    
		  }
		  
		  if  (region_index==2)
		  {
		    frame.at<cv::Vec3b>(y,x)[0] = 255;
		    frame.at<cv::Vec3b>(y,x)[1] = 0;
		    frame.at<cv::Vec3b>(y,x)[2] = 0;
		    
		  }
		  
		  
		  
		  if  (region_index!=0)
		  {
		    feat_region[region_index-1].push_back(features_one_pixel);
		  }
		  
		  
		  
		  
		  
		  
		  
		  
		  
		}
	      }
	    }
	    
	    
	    string text;
	    
	    
	    // For Single Videos
	    if (!ismultiAction) 
	    {
	      //frames has enough moving pixels
	      
	      int tam = 0;
	      for (uword ri=0;ri<n_regions; ++ri)
	      {
		tam+=feat_region[ri].size();
	      }
	      //cout << tam <<endl;
	      //getchar();
	      if (tam > THRESH_2) 
	      {
		
		//fr es par?
		if ( (fr % 2 ) == 0 ) 
		{
		  //feat_all_videos_action_i.insert(feat_all_videos_action_i.end(), feat_frame.begin(), feat_frame.end()  );
		  
		  for (uword ri=0;ri<n_regions; ++ri)
		  {
		    feat_all_videos_action_regions[ri].insert(feat_all_videos_action_regions[ri].end(), feat_region[ri].begin(), feat_region[ri].end()  );
		    
		  }
		  
		  
		  text = "Frame used";
		  par_fr++;//este contador es solo util en multi-action videos
		  
		}
		
	      }
	      else
	      {
		text = "Frame discarded";
		//cout << "Frame discarded" << endl;
		
	      }
	      
	    }
	    
	    //For multiaction
	    if (ismultiAction)     
	    {
	      //cout << "Frame " << t << ". Feature vectors "<< feat_frame.size() << endl;
	      //cout << "Llega aqui? ";
	      text = "Frame wasn't discarded. Multivideo";
	      
	      if ( (fr % 2 ) == 0 ) 
	      {
		//cout << "fr= " << fr << " par_fr= " << par_fr << " " ;
		
		//No regions
		/*
		 *		mat feat_frame_i( 18,feat_frame.size() );
		 *		
		 *		//converting to arma::from vector
		 *		for (uword l = 0; l<feat_frame.size(); ++l )
		 *		{
		 *		  feat_frame_i.col(l) = feat_frame.at(l);
		 *		  
		 }
		 
		 
		 featuresframe_video_i(par_fr) = feat_frame_i;
		 */
		//end no regions
		
		
		//Regions:
		for (uword ri=0;ri<n_regions; ++ri)
		{
		  mat feat_frame_region_i( 18,feat_region[ri].size() );
		  
		  //converting to feat_regionarma::from vector
		  for (uword l = 0; l<feat_region[ri].size() ; ++l )
		  {
		    feat_frame_region_i.col(l) = feat_region[ri].at(l);
		    
		  }
		  featuresframe_video_region_i(par_fr,ri) = feat_frame_region_i;
		  
		  
		  
		}
		
		
		//end regions
		
		uword lab = arma_multi_labels(fr);
		lab_feature_vectors.push_back( lab ); 
		par_fr++;
		
		//cout << par_fr << " ";
		
		//Adding action label to frame:
		/*
		 *	text = actions(lab);
		 *	int fontFace = cv::FONT_HERSHEY_PLAIN;
		 *	double fontScale = 0.8;
		 *	int thickness = 1;  
		 *	cv::Point textOrg(10, 100);
		 *	cv::putText(frame, text, textOrg, fontFace, fontScale, cv::Scalar::all(255), thickness,8);
		 */
		
	      }
	      
	    }
	    
	    //cv::waitKey();
	  }
	  
	}
	
	if(cv::waitKey(30)>=0)
	  break;
	
	
	std::swap(prevgray, gray);
	std::swap(prevflow, flow);
	
	
	
	
	/*
	 *    int fontFace = cv::FONT_HERSHEY_PLAIN;
	 *    double fontScale = 0.8;
	 *    int thickness = 1;  
	 *    cv::Point textOrg(10, 100);
	 *    cv::putText(frame, text, textOrg, fontFace, fontScale, cv::Scalar::all(255), thickness,8);
	 */
	//cout << "otro tam: " <<feat_video_i.size() << endl;
	
	
	//cv::imshow("mask", mask); 
	//cv::imshow("opt_flow", cflow);
	//cv::imshow("color", frame);
	
	//cv::waitKey();
	//
	
	
	//feat_frame.clear();
	feat_region.clear();
	feat_region.resize(n_regions);
	//cout << feat_region.size() << endl;
	//getchar();
    }
    //cout << "# of frames used " << par_fr<< endl;
    //getchar();
    
  }
  
  //******************TESTING MULTI ACTIONS***********************************
  //***************************************************************************
  
  inline 
  void 
  features::feature_multi_action( field<string> peo_test, int run )
  {
    ismultiAction = true; // per default is false
    
    //peo_test.print("Test_list");
    //getchar();
    
    
    
    for (uword vi = 0; vi <peo_test.n_rows; ++vi ){ //videos.n_rows

      
      //feat_all_videos_action_i.clear(); 
      lab_feature_vectors.clear();
      
      std::stringstream tmp_ss3;
      tmp_ss3 << multi_path << peo_test(vi) <<"_multiactions.avi" ;
      //cout << tmp_ss3.str()<< endl;
      
      std::stringstream tmp_ss3_mask;
      tmp_ss3_mask << multi_path_mask << peo_test(vi) <<"_multiactions.avi" ;
      
      std::stringstream tmp_lbs;
      tmp_lbs << multi_path << peo_test(vi) << "_labels.dat";
      //cout << tmp_lbs.str() << endl;
      arma_multi_labels.load(tmp_lbs.str(), raw_ascii); //labels are in a frame basis.
      
      
      cout << "Calculating features vector per person ..." << peo_test(vi) << endl;;
      feature_video(tmp_ss3.str(), tmp_ss3_mask.str() ); // featuresframe_video_region_i is calculated inside this method
      
      //cout << "Total frames: " << featuresframe_video_i.n_elem << endl;
      //cout << "Total labels: " << lab_feature_vectors.size() << endl;
      
      //cout << "Converting to Arma:" << endl;
      //cout << "# of labels: " << lab_feature_vectors.size() << endl;
      uvec lab_feature_vectors_arma(lab_feature_vectors.size());
      
      //converting labels to arma from std::vector
      for (uword i = 0; i < lab_feature_vectors.size(); ++i)
      {
	//cout << i << endl;
	lab_feature_vectors_arma(i) = lab_feature_vectors.at(i);
	
      }
      
      std::stringstream tmp_vec_lab;
      tmp_vec_lab << "./run" << run <<"/features/multi_test/lab_"<< peo_test(vi);  
      lab_feature_vectors_arma.save( tmp_vec_lab.str(), raw_ascii );
      
      
      for (uword ri=0; ri<n_regions; ++ri)
      {
	std::stringstream tmp_ss4;
	tmp_ss4 << "./run" << run << "/features/multi_test/feat_"<< peo_test(vi)<<"_region"<<ri;  
	//cout << "Saving " << endl;
	
	field<mat> feat_one_region = featuresframe_video_region_i.col(ri);
	
	feat_one_region.save( tmp_ss4.str() );
	
	
	
      }
      
      
      
      
      //without regions
      /*
       *      std::stringstream tmp_ss4;
       *      tmp_ss4 << "./run" << run << "/features/multi_test/feat_"<< peo_test(vi);  
       *      cout << "Saving " << endl;
       *      featuresframe_video_i.save( tmp_ss4.str() );
       */
      //end regions
      
      
      
  }
  
}

inline 
int 
features::which_region(int x, int y, cv::Rect bb)
{
  int region_index = 0;
  cv::Point p(x,y);
  
  if (n_regions==2)
  {
    cv::Rect r1 = cvRect(bb.x, bb.y, bb.width, bb.height/2);
    cv::Rect r2 = cvRect(bb.x, bb.y + bb.height/2 , bb.width, bb.height/2);    
    
    if (r1.contains(p)  )
    {
      region_index = 1;
    }
    
    else if( r2.contains(p) )
    {
      region_index = 2;
    }
    else
    {
      
      //cout << x << " & " << y << " Pixel location doesn't belong to the Bounding Box" << endl;
      //getchar();
    }
    
  }
  
  return region_index;
  
  
  
  
}
