/*
 * ContextLearner.h
 *
 *  Created on: May 24, 2011
 *      Author:
 *      Johan Granberg
 *      Alper Aydemir
 */

#ifndef CONTEXTLEARNER_H_
#define CONTEXTLEARNER_H_



//OpenCV
#include "cv.h"
#include "highgui.h"

// Our includes
#include "PointCloud.h"
#include "Feature.h"
#include "FeaturePatch.h"
#include "CompositFeature.h"
#include "Log.h"
#include "IntegralImageGeneralFeature.h"
#include "IntegralImageValue.h"

#include "Classifiers/Classifier.h"
#include "Classifiers/SVMClassifier.h"
#include "Classifiers/BoostedClassifier.h"
#include "Classifiers/BoostedClassifierII.h"
#include "Classifiers/BoostedClassifierIII.h"
#include "Classifiers/BoostedClassifierIV.h"
#include "Classifiers/BoostedClassifierV.h"
#include "Classifiers/BoostedClassifierVI.h"
#include "Classifiers/BoostedClassifierVII.h"
#include "Classifiers/EchoClassifier.h"

#include <iostream>
#include <vector>


namespace findobject
{
  class ContextLearner
  {
    public:


      ContextLearner();
      virtual
      ~ContextLearner();

      /* Set the training object label*/
      void
      setTrainingObject(std::string objectLabel);

      /*
       * Specify where are the training images located.
       *
       * 1. The images should have the frame<number>_<rgb or depth>.bmp format
       * e.g. frame21_depth.bmp and frame21_rgb.bmp
       * returns true if there exists such a folder, false if not
       *
       * 2. Each folder must contain a folder named after object label which contains the annotation*/
      bool
      setTrainingFolders(std::vector<std::string> trainingFolderPaths);

      /*
       * A list of each training image and it's absolute path */

      bool
      addTrainingImages(std::string folderPath, std::vector<int> frameNumbers);

      /*
       *  Gets a depth image and returns the point cloud in XYZ
       * */
      PointCloud*
      getXYZFromDepthImage(IplImage* depthImage);

      /*
       * Read mask image and set points which belongs to the object to true
       * */

      void
      setObjectPoints(PointCloud* pointcloud, IplImage* maskImage);

      /*
       * Read rgb image and set rgb data in the pointcloud
       * */

      void
      setRGB(PointCloud* pointcloud, IplImage* rgbImage);


      /* For each point compute distance to nearest object point.
       * Warning: Need to first call markObjectPoints */

      void
        setDistanceToObject(PointCloud* pointcloud);

      /*
           * Set various features that will be used to compute the feature response vector.
           * Each feature selected will be concatenated in the feature response vector */
          void
          setFeatureTypes(int featureFlags);

          /* Prepare feature responses */
          std::pair<FeatureResponse**,FeatureResponse**> prepareFeatureResponse(FeatureResponse** responses, FeatureResponse** values, int numberofResponses);

      /* Evaluate function */
          float ** evaluate_image(Classifier * cl,IntegralImageFeature * iifeature, IplImage* rgbImage,IplImage* depthImage, int widthStep, int heightStep);

           /* Train */
          void
          train();

          /*
           * Test */
          void
          test();

	/*figures out how to reduces the dimensionality of the data by using pcl*/
	void reduce_dimensionality(FeatureResponse **& target, FeatureResponse **& data, int nr_data);
	/*Applies the knowlage from  the previous method to a featureresponse*/
	void apply_pca_basis(FeatureResponse * feature_response,int dims);
	
	float estimate_size_of_object();

	float evaluate_regressor(Classifier ** cl,int nr_classifiers, IntegralImageFeature * iifeature, int widthStep, int heightStep);

    private:

      /* Gets the next image from by trying:
       * 1. m_trainingImagePaths if it's non-empty
       * 2. m_trainingFolderPaths if the above is empty but this one is not
       *
       * @return: false if we've completed the training set*/
      bool
      readNextImageSet(IplImage * &outRGBImage, IplImage * &outDepthImage, IplImage * &outMaskImage);

      /* TODO: Get feature response
       * */
      void getFeatureResponse(FeaturePatch ** patches, int nr_of_patches,IplImage* rgbImage,IplImage* depthImage,IplImage* maskImage, 
int widthStep, int heightStep,FeatureResponse ***& patch_responses, FeatureResponse ***& patch_values,int *& nr_responses_saved, FeatureResponse **& list_of_values,int & nr_of_values);

	/*
	Extracts the base trouth and the estimated trouth by the classifiers. responses[0] contains the ground truth
	*/
	void getClassifiersTruth(Classifier ** classifiers,int nr_classifiers,IplImage* rgbImage,IplImage* depthImage, IplImage* maskImage, int widthStep, int heightStep,float **& responses, int & nr_responses_saved);


      void
      smoothDepthImage(IplImage* outImage);

      struct trainingImageSet
      {
          std::string filePathRGB;
          std::string filePathDepth;
          std::string filePathMask;
      };

      std::string m_objectLabel;
      std::vector<std::string> m_trainingFolderPaths;
      std::vector<trainingImageSet> m_trainingImagePaths;
      int m_nextImageIndex;
      int m_featureFlags;
      PointCloud* m_p;
      CompositFeature* m_compositeFeature;
      IntegralImageFeature* m_IntegralImageFeature;
      IntegralImageValue* m_numberofObjectPixels;
      int m_numberofResponses;
      Classifier * m_classifier;
      float target_object_size;
  };
#endif /* CONTEXTLEARNER_H_ */
};
