/*****************************************************************************
** CS221 Computer Vision Project
** Copyright (c) 2006, Stanford University
**
** FILENAME:    classifier.cpp
** AUTHOR(S):   Stephen Gould <sgould@stanford.edu>
**              Ian Goodfellow <ia3n@stanford.edu>
** DESCRIPTION:
**  See classifier.h
**
*****************************************************************************/

#include <cassert>
#include <iostream>
#include <fstream>
#include <algorithm>
#include <fstream>
#include <iostream>

#include "cv.h"
#include "ml.h"
#include "cxcore.h"
#include "highgui.h"

#include "classifier.h"

using namespace std;


#define WINDOW_SIZE 32
#define RESIZE_FACTOR 1.2
#define MOVING_SPEED 1
#define VALID_RECT_SIZE 7
#define MUG_HOLD .9999//9//9//999999999

typedef struct Window {

  int x;
  int y;
  vector<float> features;
  CvMat *mat;

} Window;



// Classifier class ---------------------------------------------------------

// default constructor
Classifier::Classifier()
{
    // initalize the random number generator (for sample code)
    rng = cvRNG(-1);



    _features.load("dict.xml");


    // CS221 TO DO: add initialization for any member variables
}

// destructor
Classifier::~Classifier()
{

    // CS221 TO DO: free any memory allocated by the object
}

// loadState
// Configure the classifier from the given file.
bool Classifier::loadState(const char *filename)
{
  cout << "LOAD STATE!" << endl;
    assert(filename != NULL);

    ifstream *in = new ifstream(filename);
    if ((in == NULL) || (!in->good())) {
            cerr << "ERROR: could not open input file" << endl;
            return false;
    }

    float currValue;
    while(true) {
      *in >> currValue;

      if(in->fail()) break;
      weights.push_back(currValue);
    }

    return true;
}

// saveState
// Writes classifier configuration to the given file
bool Classifier::saveState(const char *filename)
{
    assert(filename != NULL);

    ofstream *out = new ofstream(filename);
    if ((out== NULL) || (!out->good())) {
            cerr << "ERROR: could not open output file" << endl;
            return false;
    }

    for(int i=0; i<weights.size(); i++) {

      *out << weights[i] << endl;
    }
    out->close();

    return true;
}



void showImage(const IplImage *image) {

  cvNamedWindow("IMAGE", CV_WINDOW_AUTOSIZE);

  cvShowImage("IMAGE", image); // display on screen
cvWaitKey(0); // wait for key press

 cvDestroyWindow("IMAGE");

}



// run
// Runs the classifier over the given frame and returns a list of
// objects found (and their location).
// you only need to populate "objects" if "scored" is true. if you do
// populate "objects" when "scored" is false, it will not affect your
// score but it may be helpful to you for debugging (to visualize your
// results on more frames)
bool Classifier::run(const IplImage *frame, CObjectList *objects, bool scored)
{
	return runBoost(frame, objects, scored);
    if (!scored)
    return true;

    for(int i = 0; i < weights.size(); i++) {
      //cout << "Weights[" << i<<"]: " << weights[i] << endl;
    }
  assert((frame != NULL) && (objects != NULL));

  // CS221 TO DO: replace this with your own code

  // Example code which returns up to 10 random objects, each object
  // having a width and height equal to half the frame size.
  const char *labels[5] = {
    "mug", "stapler", "keyboard", "clock", "scissors"
  };



  //convert to grayscale
  IplImage *grayFrame = convertToGrayscale(frame);

  //cout << "Point A \n";
  //resize in half since all mugs are at least 64x64
  IplImage *halfSizedFrame  = resizeImage(grayFrame, .5);

  IplImage *currImage = halfSizedFrame;//clusterAndEdgeDetect(halfSizedFrame);

  //cout << "CURR IMAGEDEPTH: " << currImage->depth << " CHANNELS: " <<
  //currImage->nChannels << endl;


    CObject largestWindow;
    float largestArea = 0;

  int numTimesScaled = 0;
  //cout << "Point B \n";
  while(currImage->width > WINDOW_SIZE && currImage->height >
  WINDOW_SIZE){

    std::vector<Window> windows;

     matcher.loadFrame(currImage);
    for(int i = 0; i< _features.numFeatures(); i++) {
      //Get feature, scale relative to window size
        FeatureDefinition *featureDef = _features.getFeature(i);
	const IplImage *currTemplate = featureDef->getTemplate();//clusterAndEdgeDetect(featureDef->getTemplate());
	//	cout <<"BEFORE" << endl;
	//showImage(currTemplate);

	//cout << "AFTER" << endl;
	//showImage(scaledTemplate);
    // cout << "Point C \n";
    //Get response image
        float responseWidth = currImage->width - currTemplate->width + 1;
    float responseHeight = currImage->height - currTemplate->height + 1;
    // cout << "Point C2 \n";




    IplImage *response = cvCreateImage(cvSize(responseWidth,
                            responseHeight),
                         IPL_DEPTH_32F, 1);


    // cout << "RESPONSE DEPTH: " << response->depth << " CHANNELS: " << response->nChannels << endl;
    // cout << "Point C3 \n";
    // cout << "1: " << scaledTemplate << "2: " << response << endl;
    matcher.makeResponseImage(currTemplate, response);


    // cout << "Point D \n";
    //Ger valid rect and scale to window size.
    CvRect validRect = featureDef->getValidRect();
    CvRect movedValidRect;
    movedValidRect.width = validRect.width;
    movedValidRect.height = validRect.height;
    int windowNum = 0;
    //  cout << "Point E \n";
    //Moving window....
    for(int x = 0; x < currImage->width - WINDOW_SIZE;
        x+=(MOVING_SPEED)) {

        movedValidRect.x = validRect.x + x;
      for(int y = 0; y < currImage->height -WINDOW_SIZE;
          y+=(MOVING_SPEED)) {
	// cout << "Point F \n";
        movedValidRect.y = validRect.y + y;

	/*
        IplImage *clipped = cvCreateImage(cvSize(actualValidRect.width,
						       actualValidRect.height),
						response->depth,
						       response->nChannels);
	      cvSetImageROI(response, actualValidRect);
	      cvCopyImage(response, clipped);
	      cvResetImageROI(response);

	      float maxValue = getMaxFeatureValue(clipped);*/
	      float maxValue = getMaxFeatureValueFromRect(response,
		  &movedValidRect);

        if(i==0)
        {
            Window new_window;
            new_window.x = x;
            new_window.y = y;
            new_window.features.push_back(1);
	    // cout << "PUSH: " << maxValue << endl;
            new_window.features.push_back(maxValue);
            windows.push_back(new_window);
        }
        else
        {
            windows[windowNum].features.push_back(maxValue);
        }

        windowNum ++;


      }
    }




    cvReleaseImage(&response);

    }

    for(int i=0;i<windows.size();i++)
    {
        Window w = windows[i];


        float z = 0;
        for(int j=0;j<weights.size();j++)
	  {
	    //  cout << "WEIGHTS: " << weights[j] << " FEATURE: " << w.features[j] << endl;
            z+=(weights[j]*w.features[j]);
	    //cout << "Z: " << z << endl;
        }

	//	cout << " Z: " << z;
        z = 1/(1+exp(-z));


	//cout << " Z: " << z;
        if(z>MUG_HOLD)
        {
	  cout << "FOUND MUG! Z: " << z <<  endl;

	   for(int q = 0; q < weights.size() ; q++) {
	     //cout << "FEATURE " << q << ": " << w.features[q] << " | " ;

	   }

	   CObject obj;
            obj.rect = cvRect(0, 0, 2*32*(pow(1.2, numTimesScaled)),
            2*32*(pow(1.2, numTimesScaled)));
	    int x = w.x*(2*pow(1.2, numTimesScaled));
            obj.rect.x = w.x*(2*pow(1.2, numTimesScaled));
            obj.rect.y =  w.y*(2*pow(1.2, numTimesScaled));
            obj.label = "mug";
	    float area = (obj.rect.width) * (obj.rect.height);
	    if(area > largestArea) {
	      largestArea = area;
	      largestWindow = obj;
	    }
            //objects->push_back(obj);

        }


    }

    IplImage *resizedImage = resizeImage(currImage, 1/RESIZE_FACTOR);
    //cvReleaseImage(&currImage);
    currImage = resizedImage;
    // cout << "RESIZING" << endl;
    numTimesScaled++;
  }
  if(largestArea != 0)
    objects->push_back(largestWindow);


/*

  //pre-written code
  int n = cvRandInt(&rng) % 10;

  while (n-- > 0) {
    CObject obj;
    obj.rect = cvRect(0, 0, frame->width / 2, frame->height / 2);
    obj.rect.x = cvRandInt(&rng) % (frame->width - obj.rect.width);
    obj.rect.y = cvRandInt(&rng) % (frame->height - obj.rect.height);
    obj.label = string(labels[cvRandInt(&rng) % 5]);
    objects->push_back(obj);
  }
*/
  return true;
}




bool Classifier::runBoost(const IplImage *frame, CObjectList *objects, bool scored)
{
    if (!scored)
    return true;

    cout << "START BOOST PREDICTION" << endl;
    CvBoost *model = new CvBoost();
    cout << "A" << endl;
	CvFileStorage *file = cvOpenFileStorage("boost100.xml", 0, CV_STORAGE_READ);

    cout << "B" << endl;
	CvFileNode* node = cvGetFileNodeByName(file, 0, "mug");

    cout << "C" << endl;
	model->read(file, node);

	int length = cvSliceLength(CV_WHOLE_SEQ, model->get_weak_predictors());

    cout << "D" << endl;
	CvMat *weakResponses = cvCreateMat(length, 1, CV_32FC1);

	cout << "DONE SETUP" << endl;
  assert((frame != NULL) && (objects != NULL));

  // CS221 TO DO: replace this with your own code

  // Example code which returns up to 10 random objects, each object
  // having a width and height equal to half the frame size.
  const char *labels[5] = {
    "mug", "stapler", "keyboard", "clock", "scissors"
  };



  //convert to grayscale
  IplImage *grayFrame = convertToGrayscale(frame);

  //cout << "Point A \n";
  //resize in half since all mugs are at least 64x64
  IplImage *halfSizedFrame  = resizeImage(grayFrame, .5);

  IplImage *currImage = equalize(halfSizedFrame);//clusterAndEdgeDetect(halfSizedFrame);

  //cout << "CURR IMAGEDEPTH: " << currImage->depth << " CHANNELS: " <<
  //currImage->nChannels << endl;


    CObject largestWindow;
    float largestArea = 0;
    float maxScore = 0;

  int numTimesScaled = 0;
  //cout << "Point B \n";
  while(currImage->width > WINDOW_SIZE && currImage->height >
  WINDOW_SIZE){

    std::vector<Window> windows;

     matcher.loadFrame(currImage);
     //showImage(currImage);
    for(int i = 0; i< _features.numFeatures(); i++) {
      //Get feature, scale relative to window size
        FeatureDefinition *featureDef = _features.getFeature(i);
	const IplImage *currTemplate = equalize(featureDef->getTemplate());//clusterAndEdgeDetect(featureDef->getTemplate());
	//	cout <<"BEFORE" << endl;
	//showImage(currTemplate);

	//cout << "AFTER" << endl;
	//showImage(scaledTemplate);
    // cout << "Point C \n";
    //Get response image
        float responseWidth = currImage->width - currTemplate->width + 1;
    float responseHeight = currImage->height - currTemplate->height + 1;
    // cout << "Point C2 \n";




    IplImage *response = cvCreateImage(cvSize(responseWidth,
                            responseHeight),
                         IPL_DEPTH_32F, 1);


    // cout << "RESPONSE DEPTH: " << response->depth << " CHANNELS: " << response->nChannels << endl;
    // cout << "Point C3 \n";
    // cout << "1: " << scaledTemplate << "2: " << response << endl;
    matcher.makeResponseImage(currTemplate, response);


    // cout << "Point D \n";
    //Ger valid rect and scale to window size.
    CvRect validRect = featureDef->getValidRect();
    CvRect movedValidRect;
    movedValidRect.width = validRect.width;
    movedValidRect.height = validRect.height;
    int windowNum = 0;
    //  cout << "Point E \n";
    //Moving window....
    for(int x = 0; x < currImage->width - WINDOW_SIZE;
        x+=(MOVING_SPEED)) {

        movedValidRect.x = validRect.x + x;
      for(int y = 0; y < currImage->height -WINDOW_SIZE;
          y+=(MOVING_SPEED)) {
	// cout << "Point F \n";
        movedValidRect.y = validRect.y + y;

	/*
        IplImage *clipped = cvCreateImage(cvSize(actualValidRect.width,
						       actualValidRect.height),
						response->depth,
						       response->nChannels);
	      cvSetImageROI(response, actualValidRect);
	      cvCopyImage(response, clipped);
	      cvResetImageROI(response);

	      float maxValue = getMaxFeatureValue(clipped);*/
	      float maxValue = getMaxFeatureValueFromRect(response,
		  &movedValidRect);

        if(i==0)
        {
            Window new_window;
            new_window.x = x;
            new_window.y = y;
            new_window.features.push_back(1);
            new_window.mat = cvCreateMat(1, _features.numFeatures(), CV_32FC1);

            cvmSet(new_window.mat, 0, 0, maxValue);
	    // cout << "PUSH: " << maxValue << endl;
            new_window.features.push_back(maxValue);
            windows.push_back(new_window);
        }
        else
        {
            windows[windowNum].features.push_back(maxValue);
            cvmSet(windows[windowNum].mat, 0, i, maxValue);
        }

        windowNum ++;


      }
    }




    cvReleaseImage(&response);

    }

    for(int i=0;i<windows.size();i++)
    {
    	Window w = windows[i];

    	/*
    	float z = 0;
    	for(int j=0;j<weights.size();j++)
    	{
    		//  cout << "WEIGHTS: " << weights[j] << " FEATURE: " << w.features[j] << endl;
    		z+=(weights[j]*w.features[j]);
    		//cout << "Z: " << z << endl;
    	}

    	//	cout << " Z: " << z;
    	z = 1/(1+exp(-z));
		*/

    	int prediction = model->predict(w.mat, NULL, weakResponses, CV_WHOLE_SEQ);
    	double score = cvSum(weakResponses).val[0];


    	if(prediction == 1 && score > 30)
    	{

        	cout << " PREDICTION: " << prediction << "SCORE: " << score << endl;
    		//cout << "FOUND MUG!" << endl;

    		for(int q = 0; q < weights.size() ; q++) {
    			//cout << "FEATURE " << q << ": " << w.features[q] << " | " ;

    		}

    		CObject obj;
    		obj.rect = cvRect(0, 0, 2*32*(pow(1.2, numTimesScaled)),
    				2*32*(pow(1.2, numTimesScaled)));
    		int x = w.x*(2*pow(1.2, numTimesScaled));
    		obj.rect.x = w.x*(2*pow(1.2, numTimesScaled));
    		obj.rect.y =  w.y*(2*pow(1.2, numTimesScaled));
    		obj.label = "mug";
    		float area = (obj.rect.width) * (obj.rect.height);
    		if(area > largestArea) {
    		//if(score > maxScore) {
    			maxScore = score;
    			largestArea = area;
    			largestWindow = obj;
    		}
    		//objects->push_back(obj);

    	}


    }

    IplImage *resizedImage = resizeImage(currImage, 1/RESIZE_FACTOR);
    //cvReleaseImage(&currImage);
    currImage = resizedImage;
    // cout << "RESIZING" << endl;
    numTimesScaled++;
  }
  if(maxScore != 0)
    objects->push_back(largestWindow);

  cvReleaseMat(&weakResponses);

/*

  //pre-written code
  int n = cvRandInt(&rng) % 10;

  while (n-- > 0) {
    CObject obj;
    obj.rect = cvRect(0, 0, frame->width / 2, frame->height / 2);
    obj.rect.x = cvRandInt(&rng) % (frame->width - obj.rect.width);
    obj.rect.y = cvRandInt(&rng) % (frame->height - obj.rect.height);
    obj.label = string(labels[cvRandInt(&rng) % 5]);
    objects->push_back(obj);
  }
*/
  return true;
}





//Only works on grayscale images
IplImage* Classifier::resizeImage(const IplImage *original, float factor) {
  IplImage *resizedImage = cvCreateImage(cvSize(original->width*factor,
  original->height*factor), IPL_DEPTH_8U, 1);
  cvResize(original, resizedImage);
  return resizedImage;


}




// train
// Trains the classifier to recognize the objects given in the
// training file list.
bool Classifier::train(TTrainingFileList& fileList)
{
    // CS221 TO DO: replace with your own training code




    // example code to show you number of samples for each object class
    cout << "Classes:" << endl;
    for (int i = 0; i < (int)fileList.classes.size(); i++) {
	cout << fileList.classes[i] << " (";
	int count = 0;
	for (int j = 0; j < (int)fileList.files.size(); j++) {
	    if (fileList.files[j].label == fileList.classes[i]) {
		count += 1;
	    }
	}
	cout << count << " samples)" << endl;
    }
    cout << endl;

    // example code for loading and resizing image files--
    // you may find this useful for the milestone
    IplImage *image, *smallImage;

    cout << "Processing images..." << endl;

    for (int i = 0; i < (int)fileList.files.size(); i++) {
      smallImage = cvCreateImage(cvSize(32, 32), IPL_DEPTH_8U, 1);
      //cout << "PROCCESSING: " << i << endl;
	// show progress
      if(i > 2000) {
		continue;
      }
	if (i % 100 == 0) {
	  cout << "DONE: " << i << endl;
	  //showProgress(i, fileList.files.size());
	}

	// skip non-mug and non-other images (milestone only)
	if ((fileList.files[i].label == "mug") ||
						 (fileList.files[i].label == "other")) {

	    // load the image
	    image = cvLoadImage(fileList.files[i].filename.c_str(), CV_LOAD_IMAGE_GRAYSCALE);
	    if (image == NULL) {
		cerr << "ERROR: could not load image "
		     << fileList.files[i].filename.c_str() << endl;
		continue;
	    } else {
	      cout << "LOADED: " << fileList.files[i].filename.c_str() << endl;
	    }

	    if(fileList.files[i].label == "mug")
	    	labels.push_back(1);
	    else labels.push_back(0);

	    IplImage* edgedImage = image;//clusterAndEdgeDetect(image);
	    //cout << "POINT C \n";
	    // resize to 64 x 64
	    cvResize(edgedImage, smallImage);
	    IplImage *graySmallImage = equalize(smallImage);//convertToGrayscale(smallImage);
	    //cout << "POINT D\n";
	    //showImage(graySmallImage);

	    matcher.loadFrame(graySmallImage);

	    std::vector<float> currImageFeatures;
	    currImageFeatures.push_back(1);
	    //cout << "POINT F\n";

	    for(int t = 0; t < _features.numFeatures(); t++) {
	      FeatureDefinition *featureDef = _features.getFeature(t);
	      const IplImage *currTemplate =
	      featureDef->getTemplate();
	      const IplImage *grayTemplate = equalize(currTemplate);//clusterAndEdgeDetect(currTemplate);//convertToGrayscale(currTemplate);

	      float responseWidth = smallImage->width -
	      featureDef->getTemplateWidth() + 1;
	      float responseHeight = smallImage->height -
	      featureDef->getTemplateHeight() + 1;
	      //showImage(grayTemplate);
	      IplImage *response = cvCreateImage(cvSize(responseWidth,
							responseHeight),
						 IPL_DEPTH_32F, 1);


	      CvRect validRect = featureDef->getValidRect();
	      //cout << "Point H \n";
	      //cout << "DEPTHS " << graySmallImage->depth << " " <<
	      //	currTemplate->depth << " " << response->depth << endl;

	      // cout << "CHANNELS " << graySmallImage->nChannels << " " <<
	      //	currTemplate->nChannels << " " << response->nChannels << endl;
	      matcher.makeResponseImage(grayTemplate, response);
	     // showImage(response);
	      //cout << "Point I \n";
	      /*IplImage *clipped = cvCreateImage(cvSize(validRect.width,
						       validRect.height),
						response->depth,
						       response->nChannels);
	      cvSetImageROI(response, validRect);
	      cvCopyImage(response, clipped);
	      cvResetImageROI(response);
	      */

	      //showImage(grayTemplate);

	      // float maxFeatureValue = getMaxFeatureValue(clipped);
	      float maxFeatureValue = getMaxFeatureValueFromRect(response, &validRect);
	      //cout << "MAX: " << maxFeatureValue << endl;
	      // if(i==0) cout << maxFeatureValue << endl;
	      currImageFeatures.push_back(maxFeatureValue);








	      //cvReleaseImage(&clipped);
	      cvReleaseImage(&response);

	    }
	    features.push_back(currImageFeatures);


	    // CS221 TO DO: extract features from image here

	    // free memory
	    cvReleaseImage(&image);
	    cvReleaseImage(&smallImage);
	}
    }


    for(int i = 0; i <= _features.numFeatures(); i++) {
      weights.push_back(1);
    }

    for(int i=0;i<features.size();i++)
    {
    	 calculated_probabilities.push_back(CalcProbability(i));
    }

    //logisticRegression();
    boost();
    // free memory
    //cvReleaseImage(&smallImage);
    cout << endl;

    // CS221 TO DO: train you classifier here
    return true;
}

IplImage* Classifier::equalize(const IplImage* img) {
	IplImage* out = cvCreateImage( cvGetSize(img), IPL_DEPTH_8U, 1 );
	cvEqualizeHist( img, out );
	return out;
}

void Classifier::boost(){
	cout << "BOOOSTING!!" << endl;
	//Initialize matrices
	CvMat *boost_data = cvCreateMat(features.size(), _features.numFeatures(), CV_32FC1);
	CvMat *boost_labels = cvCreateMat(labels.size(), 1, CV_32FC1);
	cout << "INIT DATA" << endl;
	for(int frag = 0; frag < features.size(); frag++) {
		std::vector<float> image_features = features[frag];
		for(int feat = 0; feat < _features.numFeatures(); feat++) {
			cvmSet(boost_data, frag, feat, image_features[feat+1]);
		}

	}
	cout << "INIT LABELS" << endl;
	for(int label_num = 0; label_num < labels.size(); label_num++) {
		cvmSet(boost_labels, label_num, 0, labels[label_num]);
	}
	cout << "INIT TYPES" << endl;
	CvMat *varType = cvCreateMat(boost_data->width + 1, 1, CV_8UC1);
	for(int j = 0; j < boost_data->width; j++) {
		CV_MAT_ELEM(*varType, unsigned char, j, 0) = CV_VAR_NUMERICAL;
	}
	CV_MAT_ELEM(*varType, unsigned char, boost_data->width, 0) = CV_VAR_CATEGORICAL;

	CvBoostParams parameters(CvBoost::GENTLE, 100, .95, 5, false, NULL);
	parameters.split_criteria = CvBoost::DEFAULT;
	CvBoost *model = new CvBoost();
	model->train(boost_data, CV_ROW_SAMPLE, boost_labels, NULL, NULL, varType, NULL, parameters);

	CvFileStorage *file = cvOpenFileStorage("boost100.xml", 0, CV_STORAGE_WRITE);
	model->write(file, "mug");
	cvReleaseFileStorage( &file );

}


IplImage* Classifier::convertToGrayscale(const IplImage *original) {
  cout << "gray" << endl;
  // allocate memory for grayscale image
  IplImage *modified = cvCreateImage(cvGetSize(original), IPL_DEPTH_8U, 1);
  // color convert the image (source, destination)
  cvCvtColor(original, modified, CV_BGR2GRAY);
  cout << "done gray" << endl;
  return modified;
}





IplImage* Classifier::EdgeDetect(const IplImage *original) {
    IplImage *edge_detected = cvCreateImage(cvSize(original->width,
      original->height), IPL_DEPTH_8U, 1);

      for(int x = 0; x < original->height; x++) {
        for(int y = 0; y < original->width; y++) {

            if(x==0||y==0||x==(original->height-1)||y==(original->width-1))
            {
                uchar b = CV_IMAGE_ELEM(original, uchar, x, y);
                ((uchar *)(edge_detected->imageData + x*edge_detected->widthStep))[y]= 0;
            }
            else
            {
                float final_pixel_x = 0;
                float final_pixel_y = 0;
                for(int k=x-1;k<=x+1;k++)
                {
                    for(int l=y-1;l<=y+1;l++)
                    {
                        uchar b = CV_IMAGE_ELEM(original, uchar, k, l);
                        float new_b = (float) b;
                        if(k==x-1)
                        {
                            if(l==y-1)
                            {
                                final_pixel_x += (-1)*new_b;
                                final_pixel_y += new_b;
                            }
                            else if(l==y)
                            {
                                final_pixel_x += (-2)*new_b;
                            }
                            else
                            {
                                final_pixel_x += (-1)*new_b;
                                final_pixel_y += (-1)*new_b;
                            }
                        }
                        else if(k==x)
                        {
                            if(l==y-1)
                            {
                                final_pixel_y += (2)*new_b;
                            }
                            else if(l==y)
                            {

                            }
                            else
                            {
                                final_pixel_y += (-2)*new_b;
                            }
                        }
                        else
                        {
                            if(l==y-1)
                            {
                                final_pixel_x += (1)*new_b;
                                final_pixel_y += new_b;
                            }
                            else if(l==y)
                            {
                                final_pixel_x += (2)*new_b;
                            }
                            else
                            {
                                final_pixel_x += (1)*new_b;
                                final_pixel_y += (-1)*new_b;
                            }
                        }
                    }
                }
                float final_pixel_comb = fabs(final_pixel_x) + fabs(final_pixel_y);
                ((uchar *)(edge_detected->imageData + x*edge_detected->widthStep))[y]= final_pixel_comb;
            }

        }
    }

      return edge_detected;
}


IplImage* Classifier::clusterAndEdgeDetect(const IplImage *img) {
	KMeansBW k(img, 3);
	std::vector<KClusterBW> clusters = k.findClusters();

	IplImage *clustered = k.getClusterImage();
	IplImage *edge = EdgeDetect(clustered);
	cvReleaseImage(&clustered);
	return edge;
}


static int mcount = 0;


float Classifier::getMaxFeatureValue(IplImage *validRegion) {
  float currMax = -1;
  //cout << "nChannels: " << validRegion->nChannels << endl;
  for(int x =0; x < validRegion->width; x++) {
    for(int y = 0; y < validRegion->height; y++) {
      float currValue = CV_IMAGE_ELEM(validRegion, uchar, y, x);
      if(mcount == 0) cout << "[" << currValue << "]";
      if(currValue > currMax) currMax = currValue;
      if(currValue > 1) cout << "WTF? " << "x: " << x << " y: " << y <<
			  " CCURR: " << currValue << endl;
    }
  }
  if(mcount == 0) cout << "RESULT: " << currMax << endl;
  mcount ++;
  return currMax;
}

float Classifier::getMaxFeatureValueFromRect(IplImage *response, CvRect
*validRect) {
  // cout << "RESPONSE WIDTH: " << response->width << " HEIGHT: " <<
  //  response->height << endl;
  //cout << "VALID X: " << validRect->x << " Y: " << validRect->y << endl;
  float currMax = -1;
  for(int x =0; x < validRect->width; x++) {
    for(int y = 0; y < validRect->height; y++) {
      float currValue = CV_IMAGE_ELEM(response, float, y + validRect->y, x
  + validRect->x);
      // if(currValue < 0) currValue = 0;
      //if(currValue > 1) currValue = 1;
      if(!cvIsInf(currValue) && currValue > currMax) currMax = currValue;
      /* if(currValue > 1 || currValue < 0){

	//cout << "OMGDEPTH: " << response->depth << " CHANNELS: " << response->nChannels << endl;
	cout << "WTF: " << currValue << endl;
	cout << "X: " << x + validRect->x << " Y: " << y + validRect->y <<
	endl;
      //cout << "HELLLLOOO" << endl;
      //cout << "RESPONSE WIDTH: " << response->width << " HEIGHT: " <<
      // response->height << endl;
      }*/
    }
  }

  return currMax;
}


int Classifier::has_converged(){
	float error = 0;
	float threshold = 9*pow((double)10,-3);
	for(int i=0;i<features.size();i++)
	{
	  // cout << "ERROR: " << error << "LABEL[i]: " << labels[i] << "CP:" << calculated_probabilities[i] << endl;
		error+=pow((labels[i]-calculated_probabilities[i]),2);
	}
	error = error/features.size();
	cout << "ERROR: " << error << endl;
	//cout << "THRESHOLD: " << threshold << endl;
	if(error<=threshold)
		return 1;
	else return 0;
}

float Classifier::CalcProbability(int i){

	std::vector<float> image_features = features[i];
	double z = 0;
	double ret = 0;
	for(int j=0;j<weights.size();j++)
	{
	  z+= ((weights[j])*(image_features[j]));
		//	cout << "IF[j]: " << image_features[j] << endl;
		//cout << "Weight: " << weights[j] << endl;
		//cout << "Z[ "<<j<<"]: " << z << endl;
	}

	//	cout << "Z: " << z << " RET: " << ret << endl;
	ret  = 1+exp(-z);
	//cout << "RET: " << ret << endl;
	ret = 1/ret;
	//cout << "RET: " << ret << endl;
	return ret;

}

void Classifier::logisticRegression() {
  int numIter = 0;
	while(has_converged()==0 && numIter < 1000)
	{
	  //cout << "Iteration: " <<  numIter << endl;
	  numIter++;

		for(int i=0;i<weights.size();i++)
		{
			float add_val = 0;
			for(int j=0;j<features.size();j++)
			{
				std::vector<float> image_features = features[j];
				add_val+=(image_features[i]*(labels[j]-calculated_probabilities[j]));
			}
			add_val = add_val*learningRate;
			//cout << "Add val: " << add_val << endl;
			weights[i]+=add_val;
		}
		for(int i=0;i<features.size();i++)
	    {
	    	 calculated_probabilities[i] = CalcProbability(i);
	    }

	}
}

