#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <cv.h>
#include <highgui.h>
#include <stdlib.h>
#include <math.h>
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>

#define IMGFOLDER "images/"
#define IMGPREFIX "img"
#define IMGEXT ".jpg"
#define IMGCOUNT 100


using namespace std;
using namespace cv;
int search(uchar unique[][3],int count,uchar r, uchar g, uchar b)
{
  for(int i = 0;i<count;i++)
  {
    if(unique[i][0]==r && unique[i][1]==g && unique[i][2]==b)
      return i;
  }
  return -1;
}

void getMode(uchar borderR[],uchar borderG[],uchar borderB[],int len,uchar bgcolor[],int channels)
{
  uchar unique[len][3];
  int uniqueCount[len];
  int count=0;
  int maxfreq=1;
  int maxfreqind=0;
  for(int i=0;i<len;i++)
  {
    int pos;
    if((pos = search(unique,count,borderR[i],borderG[i],borderB[i]))>=0)
    {
      uniqueCount[pos]++;
      if(uniqueCount[pos]>maxfreq)
      {
	maxfreq = uniqueCount[pos];
	maxfreqind = i;
      }
    }
    else
    {
      unique[count][0]=borderR[i];
      unique[count][1]=borderG[i];
      unique[count][2]=borderB[i];
      uniqueCount[count++] = 1;
    }
  }
  bgcolor[0]=borderR[maxfreqind];
  bgcolor[1]=borderG[maxfreqind];
  bgcolor[2]=borderB[maxfreqind];
}

void getBackgroundColor(IplImage *img, uchar bgcolor[])
{
  int height    = img->height;
  int width     = img->width;
  int step      = img->widthStep;
  int channels  = img->nChannels;
  uchar *data      = (uchar *)img->imageData;
  uchar borderR[2*(height+width-1)];
  uchar borderG[2*(height+width-1)];\
  uchar borderB[2*(height+width-1)];
  int count=0;
  // Top Border
  for(int i=0;i<width;i++)
  {
    borderR[count] = data[i*channels];
    borderG[count] = data[i*channels+1];
    borderB[count] = data[i*channels+2];
    count++;
  }
  // Bottom Border
  for(int i=0;i<width;i++)
  {
    borderR[count] = data[(height-1)*step+i*channels];
    borderG[count] = data[(height-1)*step+i*channels+1];
    borderB[count] = data[(height-1)*step+i*channels+2];
    count++;
  }
  // Left Border
  for(int i=0;i<height;i++)
  {
    borderR[count] = data[i*step];
    borderG[count] = data[i*step+1];
    borderB[count] = data[i*step+2];
    count++;
  }
  // Right Border
  for(int i=0;i<height;i++)
  {
    borderR[count] = data[i*step+(width-1)*channels];
    borderG[count] = data[i*step+(width-1)*channels+1];
    borderB[count] = data[i*step+(width-1)*channels+2];
    count++;
  }
  getMode(borderR,borderG,borderB,count,bgcolor,channels);
}

bool inRange(uchar val1,uchar val2)
{
  int diff = abs(val1-val2);
  return (diff<RANGE);
}

void subtractBackground(IplImage *img)
{
  int height    = img->height;
  int width     = img->width;
  int step      = img->widthStep;
  int channels  = img->nChannels;
  uchar *data      = (uchar *)img->imageData;
  uchar bgcolor[channels];
  getBackgroundColor(img,bgcolor);
  for(int i=0;i<height;i++)
    for(int j=0;j<width;j++)
      if(inRange(data[i*step+j*channels],bgcolor[0]) && inRange(data[i*step+j*channels+1],bgcolor[1]) && inRange(data[i*step+j*channels+2],bgcolor[2]))
      {
	data[i*step+j*channels]=0;
	data[i*step+j*channels+1]=0;
	data[i*step+j*channels+2]=0;
      }
}

int main(int argc, char *argv[])
{
  IplImage *img=NULL;
  int i=atoi(argv[1]);
//   for(i=1;i<=IMGCOUNT;i++)
//   {
//     string st = "images/img";
    stringstream ss;
    ss<<"images/img";
    ss<<i;
    ss<<".jpg";
    string file;
    ss>>file;
    img=cvLoadImage(file.c_str());
  if(!img){
    printf("Could not load image file\n");
    exit(0);
  }
//   }
  
  subtractBackground(img);

  IplImage* newimg;
  newimg=cvCreateImage(cvSize(img->width,img->height),img->depth,img->nChannels);
  cvCopy(img,newimg,NULL);
//   cvSmooth(img,newimg,CV_MEDIAN);
//   newimg=img;
  
  // Canny
  
//   IplImage* grayImg = cvCreateImage( cvSize(newimg->width, newimg->height), IPL_DEPTH_8U, 1 );
//   //convert original color image (3 channel rgb color image) to gray-level image
//   cvCvtColor( newimg, grayImg, CV_BGR2GRAY );
//   
//   IplImage* cannyImg = cvCreateImage(cvGetSize(newimg), IPL_DEPTH_8U, 1);
//   cvCanny(grayImg, cannyImg, 50, 150, 3);
  
  // Pyrdown and Up
//   IplImage* down = cvCreateImage( cvSize(img->width/2, img->height/2), img->depth, img->nChannels);
//   IplImage* up = cvCreateImage( cvSize(img->width, img->height), img->depth, img->nChannels);
//   cvPyrDown(img,down);
//   cvPyrUp(down,up);

  // RGB to Grayscale
  IplImage* grayImg = cvCreateImage( cvSize(newimg->width, newimg->height), IPL_DEPTH_8U, 1 );
  cvCvtColor(newimg, grayImg, CV_BGR2GRAY );
  
  IplImage* dst = cvCreateImage( cvGetSize(img), 8, 3 );
  CvMemStorage* storage = cvCreateMemStorage(0);
  CvSeq* contour = 0;

  cvThreshold(grayImg, grayImg, 1, 255, CV_THRESH_BINARY );
  cvFindContours(grayImg, storage, &contour, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
  cvZero( dst );
  int count=0;
//   CvRect rect[6];
  for( ; contour != 0; contour = contour->h_next )
  {
//       rect[count] = cvBoundingRect(contour,0);
      CvScalar color = CV_RGB( 255, 255, 255 );
      CvScalar color1 = CV_RGB( 0, 0, 0);
      /* replace CV_FILLED with 1 to see the outlines */
      cvDrawContours( dst, contour, color, color, -1, 1, 8 );
      cvDrawContours( newimg, contour, color1, color1, -1, 1, 8 );
//       cvRectangle(dst,cvPoint(rect[count].x,rect[count].y),cvPoint(rect[count].x+rect[count].width,rect[count].x+rect[count].height),cvScalar(255));
      count++;
  }
  cout<<"Contours:"<<count<<endl;
  
  //Sift
  
//   Ptr<FeatureDetector> featureDetector = FeatureDetector::create("SIFT");
//   vector<KeyPoint> keypoints;
// 
//   // Detect the keypoints
//   featureDetector->detect(img, keypoints); // NOTE: featureDetector is a pointer hence the '->'.
// 
//   //Similarly, we create a smart pointer to the SIFT extractor.
//   Ptr<DescriptorExtractor> featureExtractor = DescriptorExtractor::create("SIFT");
// 
//   // Compute the 128 dimension SIFT descriptor at each keypoint.
//   // Each row in "descriptors" correspond to the SIFT descriptor for each keypoint
//   Mat descriptors;
//   featureExtractor->compute(img, keypoints, descriptors);
// 
//   // If you would like to draw the detected keypoint just to check
//   Mat outputImage;
//   Scalar keypointColor = Scalar(255, 0, 0);     // Blue keypoints.
//   drawKeypoints(img, keypoints, outputImage, keypointColor, DrawMatchesFlags::DEFAULT);
// 
//   namedWindow("Output");
//   imshow("Output", outputImage);




  // create a window
  cvNamedWindow("mainWin", CV_WINDOW_AUTOSIZE); 
  cvMoveWindow("mainWin", 100, 100);
  cvNamedWindow("mainWin1", CV_WINDOW_AUTOSIZE); 
  cvMoveWindow("mainWin1", 400, 100);
  cvNamedWindow("mainWin2", CV_WINDOW_AUTOSIZE); 
  cvMoveWindow("mainWin2", 700, 100);

  // show the image
  cvShowImage("mainWin", img );
  cvShowImage("mainWin1", newimg );
  cvShowImage("mainWin2", dst);

  // wait for a key
  cvWaitKey(0);

  // release the image
  cvReleaseImage(&img );
  return 0;
}
