#include "segment_image_def.hpp"
#include "segment_image_impl.hpp"




inline
features::features(const std::string in_Spath, 
		   const std::string in_Mpath, 
		   const std::string in_actionNames,  
		   const int in_col, 
		   const int in_row,
		   const uvec in_peo_train,
		   const uvec in_peo_test 
)
:single_path(in_Spath), multi_path(in_Mpath), actionNames(in_actionNames), col(in_col), row(in_row), peo_train(in_peo_train), peo_test(in_peo_test)
{

  
  actions.load( actionNames );  //actions.print("All actions");

}


//******************TESTING MULTI ACTIONS***********************************
//***************************************************************************

inline 
void 
features::feature_multi_action()
{
  
  field<string> test_list; 
  field<string> test_label_list;
  test_list.load("test_list_Run1");
  test_label_list.load("test_list_lab_Run1");
  
  test_list.print("Test_list");
  ///ojo!!!!!!!!!
  for (uword vi = 0; vi <test_list.n_rows; ++vi ){ //videos.n_rows

      
      feat_all_videos_action_i.clear(); 
      lab_feature_vectors.clear();
      
      
      std::stringstream tmp_ss3;
      tmp_ss3 << multi_path << test_list(vi);
      cout << tmp_ss3.str()<< endl;
      
      std::stringstream tmp_lbs;
      tmp_lbs << multi_path << test_label_list(vi);
      cout << tmp_lbs.str() << endl;
      arma_multi_labels.load(tmp_lbs.str(), raw_ascii); //labels are in a frame basis.
      
      cout << "Calculating features vector per video ..." << test_label_list(vi) << endl;;
      feature_video(tmp_ss3.str()); //feat_all_video_action_i is calculated inside this method
      
      
      /*
      cout << "Total feature vectors: " << featuresframe_video_i.n_elem << endl;
      //cout << "Total labels: " << lab_feature_vectors.size() << endl;
      
      cout << "Converting to Arma:" << endl;
      cout << "# of Vectors: " << lab_feature_vectors.size() << endl;
      uvec lab_feature_vectors_arma(lab_feature_vectors.size());
      
      
      for (uword i = 0; i < lab_feature_vectors.size(); ++i)
      {
	//cout << i << endl;
	lab_feature_vectors_arma(i) = lab_feature_vectors.at(i);
	
      }
      std::stringstream tmp_ss4;
      tmp_ss4 << "./run1/features/multi_test/feat_"<< test_label_list(vi);  // 17 for training from 18 onwards for testing
      
      std::stringstream tmp_vec_lab;
      tmp_vec_lab << "./run1/features/multi_test/lab_"<< test_label_list(vi);  // 17 for training from 18 onwards for testing
      
      //cout << tmp_ss4.str() << endl;
      //getchar();
      cout << "Saving " << endl;
      featuresframe_video_i.save( tmp_ss4.str() );
      lab_feature_vectors_arma.save( tmp_vec_lab.str(), raw_ascii );
      //cout << "Press a key" << endl;
      //getchar();
      
      */
      
  }
  
}

//*******************************************************************************************
//***************************Feature per video: Training and Testing ************************

inline 
void
features::feature_video(std::string one_video)
{
  segm_img segm_img;
  std::vector < vec > feat_frame;
  
  
  cv::VideoCapture capVideo(one_video);
  //cout << one_video << endl;
  //double fps = capVideo.get(CV_CAP_PROP_FPS); //get the frames per seconds of the video
  //cout << "Frame per seconds : " << fps << endl;
  
  //cv::namedWindow("MyVideo",CV_WINDOW_AUTOSIZE); //create a window called "MyVideo"
  
  double frmcount = capVideo.get(CV_CAP_PROP_FRAME_COUNT);
  //cout << "# of frames is: " << frmcount << endl;
  
  if( !capVideo.isOpened() )
  {
    cout << "Video couldn't be opened" << endl;
    return;
  }
  
  cv::Mat prevgray, gray, flow, cflow, frame, prevflow;
  cv::Mat ixMat, iyMat, ixxMat, iyyMat;
  //cv::namedWindow("My Video", 1);
  //running_stat_vec<vec> stats_video(true);
  //cout << "Frame: ";
  int t = -1;
  
  //Saving each two frames
  int par_fr = 0;
  int len = ceil(frmcount/2)-1; //0 and 1 not used
  featuresframe_video_i.set_size( len );
  //cout << "Len " << len << endl;
  //lab_feature_vectors.set_size( frmcount );
  
  
  for(uword fr=0; fr<frmcount; fr++){
    
    //cout << t << " " ;
    
    bool bSuccess = capVideo.read(frame); // read a new frame from video
    
    if (!bSuccess) //if not success, break loop
	{
	  //cout << "Cannot read the frame from video file" << endl;
	  break;
	}
	
	
	
	t++;
    
    cv::Mat gray2;
    cv::cvtColor(frame, gray2, CV_BGR2GRAY);
    //cv::cvtColor(frame,gray, cv::COLOR_BGR2GRAY);//For Opencv 3.0 (installed at home)
    cv::Scalar mean_gray = mean(gray2);
    double m_gray = mean_gray.val[0];
    
    
    ///haz aca la funcion de contorno de la imagen
    gray = segm_img.get_contour(gray2);
    //gray = gray2;
	
	
	
    if( prevgray.data )
    {
      //cout << t << " " ;
      cv::calcOpticalFlowFarneback(prevgray, 
				   gray, 
				   flow, 
				   0.5, //pyr_scale
				   3,   //levels
				   9,   //winsize
				   1,   //iterations
				   5,   //poly_n
				   1.1, //poly_sigma
				   0);  //flags
      
      
      cv::Sobel(gray, ixMat, CV_32F, 1, 0, 1);
      cv::Sobel(gray, iyMat, CV_32F, 0, 1, 1);
      cv::Sobel(gray, ixxMat, CV_32F, 2, 0, 1);
      cv::Sobel(gray, iyyMat, CV_32F, 0, 2, 1);
      
      float  ux = 0, uy = 0, vx = 0,  vy = 0;
      float u, v;
      
      if( prevflow.data )
      {
	
	for (uword x = 0 ; x < col ; ++x ){
	  for (uword y = 0 ; y < row ; ++y ) {
	    
	    vec features_one_pixel(14);
	    u = flow.at<cv::Vec2f>(y, x)[0];
	    v = flow.at<cv::Vec2f>(y, x)[1];
	    
	    //cout << "x= " << x << " - y= " << y << endl;
	    // x grad
	    //cout << " x y grad" << endl;
	    float ix = ixMat.at<float>(y, x);
	    //cout << " y grad" << endl;
	    float iy = iyMat.at<float>(y, x);
	    
	    // grad direction &  grad magnitude
	    //cout << "grad direction &  grad magnitude" << endl;
	    float gd = std::atan2(std::abs(iy), std::abs(ix));
	    float gm = std::sqrt(ix * ix + iy * iy);
	    
	    // x second grad
	    //cout << "x y  second grad " << endl;
	    float ixx = ixxMat.at<float>(y, x);
	    // y second grad
	    float iyy = iyyMat.at<float>(y, x);
	    
	    //du/dt
	    float ut = u - prevflow.at<cv::Vec2f>(y, x)[0];
	    // dv/dt
	    float vt = v - prevflow.at<cv::Vec2f>(y, x)[1];
	    
	    //// divergence &  vorticity
	    //cout << "divergence &  vorticity" << endl;
	    if (x>0 && y>0 )
	    {
	      ux = u - flow.at<cv::Vec2f>(y, x - 1)[0];
	      uy = u - flow.at<cv::Vec2f>(y - 1, x)[0];
	      vx = v - flow.at<cv::Vec2f>(y, x - 1)[1];
	      vy = v - flow.at<cv::Vec2f>(y - 1, x)[1];
	    }
	    //int x_submat = x + rec.x;
	    //int y_submat = y + rec.y;
	    //cout << x_submat << "&" << y_submat << endl;
	    
	    
	    
	    features_one_pixel  << x << y << abs(ix) << abs(iy) << abs(ixx) 
	    << abs(iyy) << gm << gd <<  u << v << abs(ut) 
	    << abs(vt) << (ux - vy)  << (vx - uy);
	    //features_one_pixel.t().print("Features Current Pixel: ");
	    //getchar();
	    
	    
	    if (!is_finite( features_one_pixel ) )
	    {
	      cout << "It's not FINITE... continue???" << endl;
	      getchar(); 
	    }
	    // Plotting Moving pixels
	    //cout << " " << gm;
	    if (gm>40) // Empirically set to 40
	    //if (gm>m_gray/3)
	    {
	      
	       			      frame.at<cv::Vec3b>(y,x)[0] = 0;
	       			      frame.at<cv::Vec3b>(y,x)[1] = 0;
	       			      frame.at<cv::Vec3b>(y,x)[2] = 255;
	       
	      //feat_frame.push_back(features_one_pixel);
	    }
	  }
	}
	
	
	//string text;
	//text = "Frame wasn't discarded. Multivideo";
	
	/*
	if ( (fr % 2 ) == 0 ) 
	{
	  
	  //cout << "fr= " << fr << " par_fr= " << par_fr << " " ;
	  mat feat_frame_i( 14,feat_frame.size() );
	  
	  for (uword l = 0; l<feat_frame.size(); ++l )
	  {
	    feat_frame_i.col(l) = feat_frame.at(l);
	    
	  }
	  
	  featuresframe_video_i(par_fr) = feat_frame_i;
	  uword lab = arma_multi_labels(fr);
	  lab_feature_vectors.push_back( lab ); 
	  par_fr++;
	  
	}
	
	*/
	
	
      }
      
    }
    
    if(cv::waitKey(30)>=0)
      break;
    
    
    std::swap(prevgray, gray);
    std::swap(prevflow, flow);
    
    
    
      std::stringstream tmp_tx;
      tmp_tx << m_gray;
      string text = tmp_tx.str();
      int fontFace = cv::FONT_HERSHEY_PLAIN;
      double fontScale = 0.8;
      int thickness = 1;  
      cv::Point textOrg(10, 100);
      cv::putText(frame, text, textOrg, fontFace, fontScale, cv::Scalar::all(255), thickness,8);
     
    //cout << "otro tam: " <<feat_video_i.size() << endl;
    
    
    
    cv::imshow("color", frame);
    //cv::waitKey();
    //
    
    
    feat_frame.clear();
  }
  //cout << "# of frames used " << par_fr<< endl;
  //getchar();
  
}




