
inline
features::features(const std::string in_Spath, 
		   const std::string in_Mpath, 
		   const std::string in_Spath_mask, 
		   const std::string in_Mpath_mask,
		   field<std::string> in_actions,  
		   const uword in_col, 
		   const uword in_row
)
:single_path(in_Spath), multi_path(in_Mpath), single_path_mask(in_Spath_mask), multi_path_mask(in_Mpath_mask), actions(in_actions), col(in_col), row(in_row)
{
  
  THRESH_2 = col*row*1/100;
  ismultiAction = false; // per default is false
  num_features = 18;///optical flow
  //num_features = 64; ///dct
  //num_features = 16; ///dct2
  
}



inline
void
features::features_per_action_training(const  field<string>  peo_train, int run)
{
  
  //cout << "DCT Features " << endl;
  //for (uword sc = 1; sc <= 4; ++sc)
  //{
    //actions.print();
    //getchar();
     //cout << "Doing for action ";  

    for (uword act = 0; act < actions.n_rows; ++act)
    {
      feat_all_videos_action_i.clear(); 
      
      //cout << actions(act) << "..." ;
      //num_personas
      int peo_tr =  peo_train.n_rows;
      
      for (uword pe = 0; pe< peo_tr; ++pe)
      {
	
	std::stringstream tmp_ss;
	tmp_ss << single_path << actions(act) << "/" << peo_train (pe) << "_" << actions(act) << ".avi";
	//cout << tmp_ss.str()<< endl;
	
	std::stringstream tmp_ss_mask;
	tmp_ss_mask<< single_path_mask << actions(act) << "/" << peo_train (pe) << "_" << actions(act) << ".avi";
	
	
	//cout << "Calculating features vector per video ..." << tmp_ss.str() << endl;
	
	///Which set of features???
	feature_video(tmp_ss.str(), tmp_ss_mask.str() ) ; //feat_all_videos_action_i is calculated inside this method
	//dct_features( tmp_ss.str(), tmp_ss_mask.str() ) ;
	//cout << "# of Vectors: " << feat_all_videos_action_i.size() << endl;
	//getchar();
	
      }
      
      //cout << "Converting to Arma:" << endl;
      //cout << "# of Vectors: " << feat_all_videos_action_i.size() << endl;
      //getchar();
      
      //int len   //using only each 2 frames
      
      int dim = num_features*32; //num_features x 32 regions
      mat mat_features(dim,feat_all_videos_action_i.size());
      
      
      for (uword i = 0; i < feat_all_videos_action_i.size(); ++i)
      {
	//cout << i << endl;
	mat_features.col(i) = feat_all_videos_action_i.at(i);
	
      }
      
      
      std::stringstream tmp_ss4;
      tmp_ss4 << "./run" << run << "/features/train/feature_vectors_" << actions(act);

      //cout << "OJO!!!!!!" << endl;
      
      
      //cout << "Saving at " << tmp_ss4.str() << endl;
      //cout << mat_features.n_rows << " " << mat_features.n_cols << endl;
      
      mat_features.save( tmp_ss4.str() );
      //getchar();
      
    }
    //cout << endl;

}

//*******************************************************************************************
//************************************Dictionary ********************************************

inline 
void
features::dictionary(int run)
{
  mat mat_features_all_actions;
  
  for (uword act = 0; act < actions.n_rows; ++act)
  {
    mat mat_features_action;
    std::stringstream tmp_ss4;
    tmp_ss4 << "./run" << run << "/features/train/feature_vectors_" << actions(act);
    mat_features_action.load( tmp_ss4.str() );
    //getchar();
    
    //cout << mat_features_action.n_cols << endl;
    for (uword i=0; i<mat_features_action.n_cols; ++i)
    {
      mat_features_all_actions	 = join_rows( mat_features_all_actions, mat_features_action.col(i) );
    }
    
  }
  
  //cout << mat_features_all_actions.n_cols << endl;
  
  ///creating a dict of 32 words
  int n_words = 32;
  gmm_diag kmeans;
  kmeans.learn(mat_features_all_actions, n_words, eucl_dist, static_spread	, 20, 0, 1e-5, false);   //Only Kmeans
  
  std::stringstream dict_name;
  dict_name << "./run"<< run << "/Dict_" << n_words << "words"; 
  //cout << "Saving GMM in " << tmp_ss5.str() << endl << endl;
  kmeans.save( dict_name.str() );
  
  
}

//*******************************************************************************************
//*****************************Histograms per action*******************************************
inline 
void
features::histograms_actions(int run)
{
  int n_words = 32;
  int dim = num_features*32; //num_features x 32 regions
  
  std::stringstream dict_name;
  dict_name << "./run"<< run << "/Dict_" << n_words << "words"; 
  
  gmm_diag kmeans;
  kmeans.load( dict_name.str() );
  
  mat mat_Means = kmeans.means;
  vec dist;
  vec hist;
  
   // cout << "Doing for action ";  
  for (uword act = 0; act < actions.n_rows; ++act)
  {
    
    //cout << actions(act) << "..." ;
    
    mat mat_features_action;
    std::stringstream tmp_ss4;
    tmp_ss4 << "./run" << run << "/features/train/feature_vectors_" << actions(act);
    mat_features_action.load( tmp_ss4.str() );
    
    
    mat mat_histogram_action;
    mat_histogram_action.zeros(n_words, mat_features_action.n_cols);
    
    //mat_histogram_action.print();
    //cout << mat_features_action.n_cols << endl;
    //getchar();
    
    for (uword v=0; v<mat_features_action.n_cols; ++v)
    {
      vec vector_v = mat_features_action.col(v);
      dist.zeros( n_words );
      hist.zeros( n_words );
      
      for (uword m=0; m<n_words; ++m)
      {
	dist(m) = norm( vector_v - mat_Means.col(m) , 2);
	
      }
      
      uword index;
      double min_dist= dist.min(index);
      hist(index) = 1;
      
      mat_histogram_action.col(v) = hist;
      
    }
    
    std::stringstream hist_matrix_name;
    hist_matrix_name<< "./run" << run << "/features/train/histogram_vectors_" << actions(act);
 
    //cout << "Saving at " << hist_matrix_name.str() << endl;
    //cout << mat_histogram_action.n_rows << " " << mat_histogram_action.n_cols << endl;
    mat_histogram_action.save( hist_matrix_name.str() );
    //getchar();
 
  }
  //cout << endl;

}


//*******************************************************************************************
//***************************Features per video: Training and Testing ************************


///Optical flow features
inline 
void
features::feature_video(std::string one_video, std::string one_video_mask)
{
  
  
  
  cv::VideoCapture capVideo(one_video);
  
  cv::VideoCapture capVideo_mask(one_video_mask);
  
  double frmcount = capVideo.get(CV_CAP_PROP_FRAME_COUNT);
  //cout << "# of frames is: " << frmcount << endl;
  
  if( !capVideo.isOpened() )
  {
    cout << "Video couldn't be opened" << endl;
    return;
  }
  
  //cv::Mat prevgray, gray,  flow, frame, prevflow, mask, mask_rgb; //gray mask as I don't know how to open a VIDEO IN GRAYSCALES
  
  cv::Mat bb_prevgray, bb_gray,  bb_flow, frame, bb_prevflow, mask, mask_rgb; //gray mask as I don't know how to open a VIDEO IN GRAYSCALES
  
  
  cv::Mat ixMat, iyMat, ixMat_mask, iyMat_mask, ixxMat, iyyMat;
  cv::Mat flow_xy[2], mag, ang;
  cv::Mat mask_3ch[3];
  
  
  
  
  //cv::namedWindow("My Video", 1);
  //running_stat_vec<vec> stats_video(true);
  //cout << "Frame: ";
  int t = -1;

  
  for(uword fr=0; fr<frmcount; fr++){
    
    // cout << t << " " << endl;
    
    bool bSuccess = capVideo.read(frame); // read a new frame from video
    bool bSuccess2= capVideo_mask.read(mask_rgb); // read a new frame from video
    
    cv::split(mask_rgb, mask_3ch);
    
    mask = mask_rgb;
    
    
    //cout << mask.channels() << endl;
    /*
     *    cout << mask.rows << " & " << mask.cols << endl;
     *    cout << frame.rows << " & " << frame.cols << endl;
     *    getchar();
     */
    
    
    if (!bSuccess) //if not success, break loop
	{
	  //cout << "Cannot read the frame from video file" << endl;
	  break;
	}
	t++;
    
    
    //cv::cvtColor(frame,gray, cv::COLOR_BGR2GRAY);//For Opencv 3.0 (installed at home)
	
	///Finding the Bounding Box
	
	
	vector<vector<cv::Point> > contours;
	vector<cv::Vec4i> hierarchy;
	cv::findContours( mask_3ch[0], contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0) );
	//cout << "Number of contours: " << contours.size() << endl;
	cv::Rect bb;
	int largest_area=0;
	int largest_contour_index=0;
	
	for( int i = 0; i< contours.size(); i++ ) // iterate through each contour. 
      {
	double a=contourArea( contours[i],false);  //  Find the area of contour
	
	if(a>largest_area){
	  largest_area=a;
	  largest_contour_index=i;                //Store the index of largest contour
	  bb=boundingRect(contours[i]); // Find the bounding rectangle for biggest contour
	  
	}
   
      }
      
      
      
	  if ( contours.size()>0)
	  {
	    cv::rectangle(mask,bb,cv::Scalar(255,255,0),2);
	  }

	//end finding BB	
	
	
	///Creating bb_frame
	cv::Mat bb_frame(frame, bb);
	
	///Resizing Image
	cv::resize(bb_frame,bb_frame, cv::Size(col,row) );
	
	
	cv::cvtColor(bb_frame, bb_gray, CV_BGR2GRAY);//cambiar por bb_frame
	
	cv::Mat cflow(bb_frame.size(), CV_32FC3, cvScalar(0,0,0));//cambiar por bb_frame
	
	
	
	//gray =  gray2;
	
	if( bb_prevgray.data )
	{
	  //cout << t << " " ;
	  cv::calcOpticalFlowFarneback(bb_prevgray,
				       bb_gray, 
				bb_flow, 
				0.5, //pyr_scale
				3,   //levels
				9,   //winsize
				1,   //iterations
				5,   //poly_n
				1.1, //poly_sigma
				0);  //flags
	  
	  
	  cv::Sobel(bb_gray, ixMat, CV_32F, 1, 0, 1);
	  cv::Sobel(bb_gray, iyMat, CV_32F, 0, 1, 1);
	  cv::Sobel(bb_gray, ixxMat, CV_32F, 2, 0, 1);
	  cv::Sobel(bb_gray, iyyMat, CV_32F, 0, 2, 1);
	  
	  //Mask
	  cv::split(bb_flow, flow_xy);
	  cv::Sobel(mask_3ch[0], ixMat_mask, CV_32F, 1, 0, 1);
	  cv::Sobel(mask_3ch[0], iyMat_mask, CV_32F, 0, 1, 1);
	  
	  
	  float  ux = 0, uy = 0, vx = 0,  vy = 0;
	  float u, v;
	  float Gten, Sten;
	  
	  
	  if( bb_prevflow.data )
	  {
	    
	    cv::split(bb_flow, flow_xy);
	    cv::cartToPolar(flow_xy[0], flow_xy[1], mag, ang, true);
	    
	    
	    ///To Plot Optical Flow
	    {
	       	    cv::Mat hsv_channels[3];
	       	    cv::Mat hsv(bb_frame.size(), CV_32FC3, cvScalar(0,0,0));
	       	    cv::split(hsv,hsv_channels);
	       	    
	       	    double mag_max, mag_min;
	       	    cv::minMaxLoc(mag, &mag_min, &mag_max);
	       	    mag.convertTo(mag, -1, 1.0/mag_max);
	       	    hsv_channels[0] = ang;
	       	    hsv_channels[1].setTo(cv::Scalar(255)) ;
	       	    hsv_channels[2] = mag;
	       	    cv::merge(hsv_channels, 3, hsv);
	       	    cv::cvtColor(hsv, cflow, CV_HSV2BGR);
	    }
	    ///end
	    
	    
	    mat R(num_features,32); // R is the matrix where I'll save all the vectors obtained per region. 32 regions. num_featuresD
	    R.zeros();
	    int n_reg =0;
	    running_stat_vec<vec> stats_region;
	    
	    for (uword w = 0 ; w < col-7 ; w=w+8 ){
	      for (uword h = 0 ; h < row-7 ; h=h+8 ) {
		
		for (uword x = w ; x < w+8 ; ++x ){
		  for (uword y = h ; y < h+8 ; ++y ) {
		    
		    // cout  << x << "," << y << " ";
		    
		    
		    vec features_one_pixel(num_features);
		    mat G (2,2);
		    mat S;
		    u = bb_flow.at<cv::Vec2f>(y, x)[0];
		    v = bb_flow.at<cv::Vec2f>(y, x)[1];
		    
		    //cout << "x grad" << endl;
		    float ix = ixMat.at<float>(y, x);
		    //cout << " y grad" << endl;
		    float iy = iyMat.at<float>(y, x);
		    
		    float gd = std::atan2(std::abs(iy), std::abs(ix));
		    float gm = std::sqrt(ix * ix + iy * iy);
		    
		    //x second grad 
		    float ixx = ixxMat.at<float>(y, x);
		    // y second grad
		    float iyy = iyyMat.at<float>(y, x);
		    
		    //du/dt
		    float ut = u - bb_prevflow.at<cv::Vec2f>(y, x)[0];
		    //dv/dt
		    float vt = v - bb_prevflow.at<cv::Vec2f>(y, x)[1];
		    
		    if (x>0 && y>0 )
		    {
		      ux = u - bb_flow.at<cv::Vec2f>(y, x - 1)[0];
		      uy = u - bb_flow.at<cv::Vec2f>(y - 1, x)[0];
		      vx = v - bb_flow.at<cv::Vec2f>(y, x - 1)[1];
		      vy = v - bb_flow.at<cv::Vec2f>(y - 1, x)[1];
		    }
		    
		    //new features
		    
		    float gd_opflow = ang.at<float>(y,x);
		    float mg_opflow = mag.at<float>(y,x);
		    
		    //Gradient Tensor
		    G   << ux << uy << endr
		    << vx << vy << endr;
		    
		    //Rate of Stein Tensor  
		    S = 0.5*(G + G.t());
		    
		    float tr_G = trace(G);
		    float tr_G2 = trace( square(G) );
		    float tr_S = trace(S);
		    float tr_S2 = trace(square(S));
		    
		    //Tensor Invariants  of the optical flow
		    Gten = 0.5*( tr_G*tr_G - tr_G2 );
		    Sten = 0.5*( tr_S*tr_S - tr_S2 );  
		    
		    //16 Features. Without x & y
		    ///32 Features
		    features_one_pixel  << x << y << abs(ix) << abs(iy) << abs(ixx) 
		    << abs(iyy) << gm << gd <<  u << v << abs(ut) 
		    << abs(vt) << (ux + vy)  << (vx - uy) << mg_opflow << gd_opflow << Gten << Sten;
		    
		    stats_region(features_one_pixel);
		    
		    
		  }
		  //cout << endl;
		}
		//cout << endl;
		
		//sacar la media y reiniciar stat-running
		//cout << stats_region.count() << " ";
		vec mean_region = stats_region.mean();
		stats_region.reset();
		R.col(n_reg) = mean_region;
		
		
		
		n_reg++;
		//getchar();
		
	      }
	    }
	    
	    ///Es par???
	    if ( (fr % 2 ) == 0 ) 
	    {
	      vec v_R = vectorise(R);
	      feat_all_videos_action_i.push_back(v_R);
	      if (ismultiAction)     
	      {
		uword lab;
		lab = arma_multi_labels(fr);
		lab_feature_vectors.push_back( lab ); 
		
	      }
	      
	    }
	    
	  }
	  
	}
	
	if(cv::waitKey(30)>=0)
	  break;
	
	
	std::swap(bb_prevgray, bb_gray);
	std::swap(bb_prevflow, bb_flow);
	
	
	
	
	///putText in frame
	{
// 	  uword lab;
// 	  lab = arma_multi_labels(fr);
// 	  string text;
// 	  stringstream textss;
// 	  textss << actions(lab);
// 	  text = textss.str();
// 	  int fontFace = cv::FONT_HERSHEY_PLAIN;
// 	  double fontScale = 0.8;
// 	  int thickness = 1;  
// 	  cv::Point textOrg(10, 100);
// 	  cv::putText(frame, text, textOrg, fontFace, fontScale, cv::Scalar::all(255), thickness,8);
	}
	
	
	///Show images
	{
	  cv::imshow("mask", mask); 
	  cv::imshow("frame", frame); 
	  cv::imshow("bb_frame", bb_frame); 
	  cv::imshow("opt_flow", cflow);
	  //cv::waitKey();
	}
	
	
  }
  
}


///dct_features

inline 
void
features::dct_features(std::string one_video, std::string one_video_mask)
{
  
  //cout << "DCT Features " << endl;
  cv::Mat frame, mask_rgb, mask,bb_gray;
   cv::Mat mask_3ch[3];
  cv::VideoCapture capVideo(one_video);
  
  cv::VideoCapture capVideo_mask(one_video_mask);
  
  double frmcount = capVideo.get(CV_CAP_PROP_FRAME_COUNT);
  //cout << "# of frames is: " << frmcount << endl;
  
  if( !capVideo.isOpened() )
  {
    cout << "Video couldn't be opened" << endl;
    return;
  }
  

  
    for(uword fr=0; fr<frmcount; fr++){
     //cout << " " << fr ;
    
    bool bSuccess = capVideo.read(frame); // read a new frame from video
    bool bSuccess2= capVideo_mask.read(mask_rgb); // read a new frame from video
    
    cv::split(mask_rgb, mask_3ch);
    
    mask = mask_rgb;
    
    if (!bSuccess) //if not success, break loop
	{
	  //cout << "Cannot read the frame from video file" << endl;
	  break;
	}

    
    
    if (fr>0)
    {

      ///Finding the Bounding Box
	
	vector<vector<cv::Point> > contours;
	vector<cv::Vec4i> hierarchy;
	cv::findContours( mask_3ch[0], contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0) );
	//cout << "Number of contours: " << contours.size() << endl;
	cv::Rect bb;
	int largest_area=0;
	int largest_contour_index=0;
	
	
	for( int i = 0; i< contours.size(); i++ ) // iterate through each contour. 
      {
	double a=contourArea( contours[i],false);  //  Find the area of contour
	
	if(a>largest_area){
	  largest_area=a;
	  largest_contour_index=i;                //Store the index of largest contour
	  bb=boundingRect(contours[i]); // Find the bounding rectangle for biggest contour
	  
	}
   
      }
      
	 
	//end finding BB	
	
	
	///Creating bb_frame
	cv::Mat bb_frame(frame, bb);
	
	///Resizing Image
	cv::resize(bb_frame,bb_frame, cv::Size(col,row) );
	
	
	cv::cvtColor(bb_frame, bb_gray, CV_BGR2GRAY);
	
	//Calculating mean and stddev for current frame
	
	cv::Scalar mean1, stddev1;
	cv::meanStdDev( bb_gray, mean1, stddev1);
	
	mean_frame = mean1.val[0];
	std_frame  =stddev1.val[0];
	
	
	
	
	//cout << mean_frame << " " << std_frame << endl;
	
	///para bb_frame calcular dct... dividir en subregiones
	
	mat R(num_features,32); // R is the matrix where I'll save all the vectors obtained per region. 32 regions. num_featuresD
	R.zeros();
	int n_reg =0;
	mat subface;


	    for (uword w = 0 ; w < col-7 ; w=w+8 ){
	      for (uword h = 0 ; h < row-7 ; h=h+8 ) {
		
		
		
		///Creating sub_region
		cv::Rect bb2 (w,h,8,8);
		cv::Mat sub_region(bb_gray, bb2);
		
		//converting to arma::fmat 
		cv::Mat sub_regionT(sub_region.t());
		fmat sub_regionArma(sub_regionT.ptr<float>(), 8, 8);
		
		mat dct = calc_dct_one_block ( sub_regionArma );
		mat dct2 = dct.submat(0,0,3,3); //Using 15 features
		
				
		//dct.print();
		//getchar();
		R.col(n_reg) = vectorise(dct2);
		
		n_reg++;

		
		
		//showing sub_region, mask, frame, etc
		 //if ( contours.size()>0)
		//{
		  //cv::rectangle(mask,bb,cv::Scalar(255,255,0),2);
		//}
		
		//cv::rectangle(bb_frame,bb2,cv::Scalar(255,255,0),2);		     
		//cv::rectangle(frame,bb,cv::Scalar(255,255,0),2);	
		//cv::imshow("bb_frame", bb_frame); 
		//cv::imshow("frame", frame); 
		//cv::imshow("mask", mask);
		//cv::waitKey();


	      }
	      
	    }
	    
	    ///Es par???
	    if ( (fr % 2 ) == 0 ) 
	    {
	      vec v_R = vectorise(R);
	      feat_all_videos_action_i.push_back(v_R);
	      
	      if (ismultiAction)     
	      {
		uword lab;
		lab = arma_multi_labels(fr);
		lab_feature_vectors.push_back( lab ); 
		
	      }
	      
	    }
      
    }
    
    }
  
  
  
}


inline
mat
features::calc_dct_one_block (fmat sub_region)
{
  int N = 8;
  //cout << sub_region << endl;
  //cout << find_nonfinite( sub_region ) << endl;

  
  mat dct = zeros(N,N);
  double s;
  
  for (uword u = 0; u < N; u++) 
    for (uword v = 0; v < N; v++)
    {
      s = 0;
      for (uword x = 0; x < N; x++)
	for (uword y = 0; y < N; y++)
	{
	  double Cu = ((u == 0) ? 1 / sqrt(2) : 1);
	  double Cv = ((v == 0) ? 1 / sqrt(2) : 1);
	  if ( !is_finite(sub_region(x,y)) )
	  {
	    //cout << "NaN" << endl;	//cout << mean_frame << " " << std_frame << endl;

	    sub_region(x,y) = 0;
	  }
	  
	  //double subframe_xy = (sub_region(x,y) - mean_frame)/std_frame; 
	  double subframe_xy = sub_region(x,y);
	  s += subframe_xy * cos((2 * x + 1) * u * datum::pi / (2*N)) *
	  cos((2 * y + 1) * v * datum::pi / (2*N)) * Cu * Cv ;
	  //cout << s << " ";

	  
	 
	}
	
	dct(u,v) = s*2/N; // = s*2/N
    }
    
    ///comentar si funciona el resto bn
    uvec Dios = find_nonfinite(dct);
		
		if (Dios.n_elem > 0)
		{
		  //cout <<  Dios.t() << endl;
		  cout << dct << endl;
		  cout << sub_region << endl;
		  cout << mean_frame << " " << std_frame << endl;
		  
		  cout << "std = 0???" << endl;
		  getchar();
		  //cv::waitKey();

		}
		
		
    return dct;
}


//******************TESTING MULTI ACTIONS***********************************
//***************************************************************************

inline 
void 
features::feature_multi_action( field<string> peo_test, int run )
{
  ismultiAction = true; // per default is false
  int n_words = 32;
  int dim = num_features*32; //num_features x 32 regions  
  
  
  
  //peo_test.print("Test_list");
  
  std::stringstream dict_name;
  dict_name << "./run"<< run << "/Dict_" << n_words << "words"; 
  
  
  gmm_diag kmeans;
  kmeans.load( dict_name.str() );
  
  mat mat_Means = kmeans.means;
  vec dist;
  vec hist;
  
  
  //getchar();
  
  
  
  for (uword vi = 0; vi <peo_test.n_rows; ++vi ){ //videos.n_rows

      
      feat_all_videos_action_i.clear(); 
      lab_feature_vectors.clear();
      
      std::stringstream tmp_ss3;
      tmp_ss3 << multi_path << peo_test(vi) <<"_multiactions.avi" ;
      //cout << tmp_ss3.str()<< endl;
      
      std::stringstream tmp_ss3_mask;
      
      tmp_ss3_mask << multi_path_mask << peo_test(vi) <<"_multiactions.avi" ;
      
      std::stringstream tmp_lbs;
      tmp_lbs << multi_path << peo_test(vi) << "_labels.dat";
      //cout << tmp_lbs.str() << endl;
      
      arma_multi_labels.load(tmp_lbs.str(), raw_ascii); //labels are in a frame basis.
      
      
     //cout << "Calculating features vector per person ..." << peo_test(vi) << endl;;
      
      ///Which set of features???
      feature_video(tmp_ss3.str(), tmp_ss3_mask.str() ); //feat_all_videos_action_i is calculated inside this method
      
      //dct_features( tmp_ss3.str(), tmp_ss3_mask.str() ) ;
      
      //cout << "lab size" << endl;
      //cout << lab_feature_vectors.size() << endl;
      
      //cout << "mat size" << endl;
      //cout << feat_all_videos_action_i.size() << endl;
      
      mat mat_features(dim,feat_all_videos_action_i.size());
      uvec lab_feature_vectors_arma(lab_feature_vectors.size());
      
      
      for (uword i = 0; i < feat_all_videos_action_i.size(); ++i)
      {

	mat_features.col(i) = feat_all_videos_action_i.at(i);
	lab_feature_vectors_arma(i) = lab_feature_vectors.at(i);
      }
      
     mat mat_histogram_video;
     mat_histogram_video.zeros(n_words, mat_features.n_cols);
     
     for (uword v=0; v<mat_features.n_cols; ++v)
     {
      vec vector_v = mat_features.col(v);
      dist.zeros( n_words );
      hist.zeros( n_words );
      
      for (uword m=0; m<n_words; ++m)
      {
	dist(m) = norm( vector_v - mat_Means.col(m) , 2);
	
      }
      
      uword index;
      double min_dist= dist.min(index);
      hist(index) = 1;
      
      mat_histogram_video.col(v) = hist;
      
    }
    
      std::stringstream tmp_ss4;
      tmp_ss4 << "./run" << run << "/features/multi_test/feat_"<< peo_test(vi);  
      
      std::stringstream tmp_vec_lab;
      tmp_vec_lab << "./run" << run <<"/features/multi_test/lab_"<< peo_test(vi);  
      
      std::stringstream hist_matrix_name;
      hist_matrix_name<< "./run" << run << "/features/multi_test/histogram_vectors_" << peo_test(vi);
      
      
      mat_features.save( tmp_ss4.str() );
      lab_feature_vectors_arma.save( tmp_vec_lab.str(), raw_ascii );
      mat_histogram_video.save( hist_matrix_name.str() );
      
      
  }
  
}
