#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/core/core.hpp>
#include <iostream>
#include "mytest.h"

void my_convertToGray8Bit(cv::Mat& image)
{
    if(image.channels() > 1)
      cv::cvtColor(image, image, CV_BGR2GRAY);
    if(image.depth() > 1)
      image.convertTo(image, CV_8UC1);
}

void my_find_contours(cv::Mat image)
{
   cv::Mat greyimage, edges; 
   std::vector<std::vector<cv::Point> > contours;
   cv::cvtColor(image, greyimage, CV_BGR2GRAY);
   cv::blur(greyimage, greyimage, cv::Size(3,3));
  int threshold1=100;
  int threshold2=255;
    cv::Canny(greyimage, edges, threshold1, threshold2);
    cv::findContours(edges, contours, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE);
    
    cv::Mat result = cv::Mat::zeros(edges.size(), CV_8UC3);
      cv::Scalar color = cv::Scalar(255, 255, 255);
   for(int i=0; i<contours.size(); ++i)
   {
     cv::drawContours(result, contours, i, color);
   }
   cv::String windowName = "Contours";
   cv::namedWindow(windowName, CV_WINDOW_NORMAL);
   cv::imshow(windowName, result);
   cv::waitKey();
}

void my_template_sum_naive(cv::Mat source, cv::Mat temp)
{
  cv::cvtColor(temp, temp, CV_BGR2GRAY);
  cv::cvtColor(source, source, CV_BGR2GRAY);
     
  int rows = source.rows;
  int cols = source.cols;
 
  int temp_rows= temp.rows;
  int temp_cols= temp.cols;
  cv::Mat result;
  result.create(rows-temp_rows, cols-temp_cols, CV_32F);
  uchar *p, *q;
  for(int i=0; i<result.rows; ++i)
  {
     //p = source.ptr<uchar>(i);
     q = result.ptr<uchar>(i);
     uchar* my_ptr = (uchar*)(source.data+i*source.step);
     //std::cerr << (int)my_ptr[10] << std::endl;
     for(int j=0; j<result.cols; ++j)
     {
	 //match template
	 result.at<float>(i, j)=0;
	 float c=0;
	 for(int ii=0; ii<temp_rows; ++ii)
	 {
	    for(int jj=0; jj<temp_cols; ++jj)
	    {
	       c += source.at<uchar>(i+ii, j+jj) - temp.at<uchar>(ii, jj);
	       //result.at<float>(i, j) = source.at<float>(i, j);
	    }
	 }
	 //c= std::min(255, c);
	 float ans=(float)c;  //purely for debugging purposes
	 result.at<float>(i, j)=std::abs(c);
     }
  }
  
    double minVal, maxVal; 
    cv::Point minLoc, maxLoc, matchPoint;
    cv::minMaxLoc(result, &minVal, &maxVal, &minLoc, &maxLoc, cv::Mat());
    matchPoint = minLoc;
    cv::rectangle(source, matchPoint, cv::Point(matchPoint.x + temp_rows, matchPoint.y + temp_cols), cv::Scalar::all(100), 2, 8, 0);
    
    cv::Mat src;//(source, cv::Rect( 0, 0, 100, 100));
    cv::Scalar mean, stdDev;
    cv::meanStdDev(source, mean, stdDev);
 cv::namedWindow("Result", CV_WINDOW_NORMAL); 
 cv::imshow("Result",source);
 cv::waitKey();
}


void my_integralImage(cv::Mat image, cv::Mat &result, int mode=0)
{
    if(image.depth() > 1)
      cv::cvtColor(image, image, CV_BGR2GRAY);
//     if(image.type() != CV_32F)
//       image.convertTo(image, CV_32F);
    result.create(image.rows+1, image.cols+1, CV_32F);
    for(int i=1; i<image.rows+1; ++i)
    {
      for(int j=1; j<image.cols+1; ++j)
      {
//  	 float dbg1=result.at<float>(i, j-1);
//  	 float dbg2=result.at<float>(i-1, j);
//  	 float dbg3=result.at<float>(i-1, j-1);
//  	 uchar dbg4=image.at<uchar>(i-1, j-1);
//  	 float dbg5=dbg1+dbg2-dbg3+dbg4;
	if(mode==1)
	  result.at<float>(i, j) = result.at<float>(i, j-1) + result.at<float>(i-1, j) - result.at<float>(i-1, j-1) + image.at<uchar>(i-1, j-1)*image.at<uchar>(i-1, j-1);
	else
	  result.at<float>(i, j) = result.at<float>(i, j-1) + result.at<float>(i-1, j) - result.at<float>(i-1, j-1) + image.at<uchar>(i-1, j-1);
	
	  //std::cerr << dbg5 << std::endl;
      } }
   
}

void my_template_sum(cv::Mat image, cv::Mat temp, cv::Mat &result)
{
    my_convertToGray8Bit(image);
    my_convertToGray8Bit(temp);
  
    result.create(image.rows-temp.rows, image.cols-temp.cols, CV_32F);
    cv::Mat integral;
    my_integralImage(image, integral);
    float sum=0;
    for(int i=0; i<temp.rows; ++i)
    {
      uchar* ptr = temp.ptr<uchar>(i);
      for(int j=0; j<temp.cols; ++j)
	sum += ptr[j];
    } 
    float c; float* ptr;
    for(int i=0; i<image.rows-temp.rows; ++i)
    {
      ptr = result.ptr<float>(i);
      for(int j=0; j<image.cols-temp.cols; ++j)
      {
	 c = integral.at<float>(i+1, j+1) - integral.at<float>(i+1+temp.rows, j+1) - integral.at<float>(i+1, j+1+temp.cols) + integral.at<float>(i+1+temp.rows,j+1+temp.cols);
	 c = std::abs(c-sum);
	 ptr[j] = c;
      }
    }
}

void my_crosscorr(cv::Mat image, cv::Mat temp, cv::Mat &result)
{
  my_convertToGray8Bit(image);
  my_convertToGray8Bit(temp);
  int depth = CV_32F; 
  image.convertTo(image, depth);
  temp.convertTo(temp, depth);
  
  cv::Mat temp2(image.size(), temp.depth());
  cv::Mat temp_copy(temp2, cv::Rect(0, 0, temp.cols, temp.rows));
  temp.convertTo(temp_copy, temp_copy.depth()); 
   
  cv::Size dftSize;
  dftSize = image.size();
//   dftSize.height = cv::getOptimalDFTSize(image.rows+temp.rows-1);
//   dftSize.width = cv::getOptimalDFTSize(image.cols+temp.cols-1);
  cv::Mat dftImg(dftSize, depth);
  cv::Mat dftTemp(dftSize, depth);
  result.create(dftSize, depth);
  //calculate dft of images 
  cv::dft(image, dftImg);
  cv::dft(temp2, dftTemp);
  
  //CV_Assert( dftImg.type() == dftTemp.type() && dftImg.size == dftTemp.size );
  //multiply 
   cv::mulSpectrums(dftImg, dftTemp, dftImg, 0, true);
   cv::dft(dftImg, result, cv::DFT_INVERSE + cv::DFT_SCALE);
}

void my_template_cross(cv::Mat image, cv::Mat temp, cv::Mat &result, int normed)
{
  my_convertToGray8Bit(image);
  my_convertToGray8Bit(temp);
   my_crosscorr(image, temp, result); 
  //cv::matchTemplate(image, temp, result, CV_TM_CCORR );
    cv::Scalar tempMean, tempStdDev;
   cv::meanStdDev(temp,tempMean, tempStdDev); 
   if(normed == 1)
   {
      float tempNorm=0;
      for(int i=0; i<temp.rows; ++i)
      {
	uchar* ptr = temp.ptr<uchar>(i);
	for(int j=0; j<temp.cols; ++j)
	 tempNorm += ptr[j]*ptr[j];
	  //tempNorm += temp.at<uchar>(j, i)*temp.at<uchar>(j, i);
      } 
      tempNorm = std::sqrt(tempNorm);
      
//       tempNorm = tempMean[0]*tempMean[0] + tempStdDev[0]*tempStdDev[0];  
//       tempNorm = std::sqrt(tempNorm);
//       tempNorm /= std::sqrt(1./((double)temp.rows*temp.cols)); 
      
     cv::Mat integral;
     my_integralImage(image, integral, 1);
     
     for(int i=0; i<image.rows - temp.rows; ++i)
       for(int j=0; j<image.cols - temp.cols; ++j)
       {
	   float c = integral.at<float>(i, j) - integral.at<float>(i+temp.rows, j) - integral.at<float>(i, j+temp.cols) + integral.at<float>(i+temp.rows,j+temp.cols);
	   c = std::sqrt(c) * tempNorm;
	   float num = result.at<float>(i, j);
	   if(num < c)
	      num /= c;
	   else if (num < 1.25*c)
	     num = 1;
	   else 
	     num=0;
	   result.at<float>(i, j) = num;
       }
   }
   
   for(int i=image.rows-temp.rows; i<result.rows; ++i)
     for(int j=0; j<result.cols; ++j)
       result.at<float>(i, j)=0;
     
    for(int i=0; i<result.rows; ++i)
      for(int j=image.cols-temp.cols; j<result.cols; ++j)
       result.at<float>(i, j)=0;
}


void my_template_sqr(cv::Mat image, cv::Mat temp, cv::Mat &result)
{
    result.create(image.rows-temp.rows, image.cols-temp.cols, CV_32F);
    my_convertToGray8Bit(image);
    my_convertToGray8Bit(temp);
    
    cv::Mat integral;
    my_integralImage(image, integral, 1);
    
    cv::Mat cross;
    my_crosscorr(image, temp, cross);
    
    float sum=0;
    for(int i=0; i<temp.rows; ++i)
    {
      uchar* ptr = temp.ptr<uchar>(i);
      for(int j=0; j<temp.cols; ++j)
	sum += ptr[j]*ptr[j];
    } 
    
    float c; float* ptr;  
    for(int i=0; i<image.rows-temp.rows; ++i)
    {
      ptr = result.ptr<float>(i);
      for(int j=0; j<image.cols-temp.cols; ++j)
      {
	 c = integral.at<float>(i+1, j+1) - integral.at<float>(i+1+temp.rows, j+1) - integral.at<float>(i+1, j+1+temp.cols) + integral.at<float>(i+1+temp.rows,j+1+temp.cols);
	 c = c - 2*cross.at<float>(i, j) + sum;
	 ptr[j] = c;
      }
    }
}

void my_templateMatch(cv::Mat &image, cv::Mat &temp, cv::Mat &result, int method)
{
    if(image.channels() > 1)
      cv::cvtColor(image, image, CV_BGR2GRAY);
    if(temp.channels() > 1)
      cv::cvtColor(temp, temp, CV_BGR2GRAY);
    
    if(method == 0) //absolute differences
    {
	my_template_sum(image, temp, result);
    }
    else if(method == 1) //square differences
    {
	if(MY_IMP)
	  my_template_sqr(image, temp, result);
	else
	  cv::matchTemplate(image, temp, result, CV_TM_SQDIFF);
    }
    else if(method == 2) //cross corr
    {
	 if(MY_IMP)
	   my_template_cross(image, temp, result, 0);
	 else
	   cv::matchTemplate(image, temp, result, CV_TM_CCORR);
    }
    else if(method == 3) //cross corr normed
    {
	if(MY_IMP)
	  my_template_cross(image, temp, result, 1);
	else
	  cv::matchTemplate(image, temp, result, CV_TM_CCORR_NORMED);
    }
}

void my_templateMatchShow(cv::Mat &image, cv::Mat &temp, int method)
{
    if(method < 0 || method > 3) 
      return;
    cv::Mat result; 
    my_templateMatch(image, temp, result, method);
     
    double minVal, maxVal; 
    cv::Point minLoc, maxLoc, matchPoint;
    cv::minMaxLoc(result, &minVal, &maxVal, &minLoc, &maxLoc, cv::Mat());
    matchPoint = (method < 2) ? minLoc : maxLoc;
    cv::rectangle(image, matchPoint, cv::Point(matchPoint.x+temp.rows, matchPoint.y+temp.cols), cv::Scalar::all(100), 2, 8, 0);

    cv::String window = "templateMatch test";
    cv::namedWindow(window, CV_WINDOW_NORMAL);
    cv::imshow(window, image);
    cv::waitKey();
}

void my_convertToBinary(cv::Mat &image, cv::Mat &result, int threshold)
{
    my_convertToGray8Bit(image);
//     cv::threshold( image, result, 0, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);
    cv::adaptiveThreshold(image, result, 255, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY, 31, 5);
    // 31, 3 are really good values
}

void my_openImage(cv::Mat &image, int morph_size)
{
      int maxKernel = 15;
      if(morph_size == 0)
	morph_size = std::min((image.rows + image.cols)/40, maxKernel); //median of rows and cols divided by 20
      cv::Mat elem = cv::getStructuringElement(cv::MORPH_RECT, cv::Size( 2*morph_size+1, 2*morph_size+1), cv::Point(morph_size,morph_size));
      cv::morphologyEx(image, image, cv::MORPH_OPEN, elem );
}

void my_templateMatchMultiScale(cv::Mat &image, cv::Mat &temp, cv::Mat &result, int method, double confidence)
{
    int steps = 10;
    int startWidth = temp.cols*4;
    int startHeigth = temp.rows*4;
    int endWidth = temp.cols*0.6;
    int endHeigth = temp.rows*0.6;
    result = cv::Scalar::all(0);
    
    if(startWidth < endWidth || startHeigth < endHeigth)
    {
      startWidth = endWidth; endWidth = temp.cols*2;
      startHeigth = endHeigth; endHeigth = temp.rows*2;
    }
    
    cv::Mat temp_res = temp, result_res = result;
    int sizex, sizey;
    double max_value=0, min_value=1, newmax=0, newmin=1;
    for(int i=0; i<=steps; ++i)
    {
      sizex = startWidth + i*(endWidth - startWidth)/steps;
      sizey = startHeigth + i*(endHeigth - startHeigth)/steps;
      cv::resize(temp, temp_res, cv::Size(sizex, sizey));
      //try pyramid method
      my_templateMatchPyramid(image, temp_res, result_res, 3, confidence);
      cv::minMaxLoc(result_res, &newmin, &newmax);
      if(method < 2 && newmin < min_value)
      {
	 min_value = newmin; result = result_res;
      }
      if(method >=2 && newmax > max_value)
      {
	  max_value = newmax; result = result_res;
      }
    }
}

void my_equalizeHistByTemplate(cv::Mat &image, cv::Mat &temp)
{
    int width = image.cols/4;
    int heigth = image.rows/4;
    int imwidth = image.cols;
    int imheigth = image.rows;
    int sizex, sizey, x, y;
    for(int i=0; i<=imwidth/width; ++i)
      for(int j=0; j<=imheigth/heigth; ++j)
      {
	 x = width*i;
	 y = heigth*j;
// 	 sizex = std::min(width*(i+1), imwidth);
// 	 sizey = std::min(heigth*(j+1), imheigth);
	 sizex = (x + width > imwidth) ? imwidth - x : width;
	 sizey = (y + heigth > imheigth) ? imheigth - y : heigth;
	 if(x>=imwidth || y >= imheigth || sizex==0 || sizey == 0)
	   continue;
	 cv::Mat part(image, cv::Rect(x, y, sizex, sizey));
	 cv::equalizeHist(part, part);
      }
}

void my_edgeDetector(cv::Mat &image, cv::Mat &dst)
{
    //cv::Canny();
}


void my_templateMatchPyramid(cv::Mat &image, cv::Mat &temp, cv::Mat &result, int maxlevel, double confidence)
{
    std::vector<cv::Mat> refs, tpls, results;

    // Build Gaussian pyramid
    cv::buildPyramid(image, refs, maxlevel);
    cv::buildPyramid(temp, tpls, maxlevel);

    cv::Mat ref, tpl, res;

    // Process each level
    for (int level = maxlevel; level >= 0; level--)
    {
        ref = refs[level];
        tpl = tpls[level];
        res = cv::Mat::zeros(ref.size() + cv::Size(1,1) - tpl.size(), CV_32FC1);

        if (level == maxlevel)
        {
            // On the smallest level, just perform regular template matching
            cv::matchTemplate(ref, tpl, res, CV_TM_CCORR_NORMED);
        }
        else
        {
            // On the next layers, template matching is performed on pre-defined 
            // ROI areas.  We define the ROI using the template matching result 
            // from the previous layer.

            cv::Mat mask;
            cv::pyrUp(results.back(), mask);

            cv::Mat mask8u;
            mask.convertTo(mask8u, CV_8U);

            // Find matches from previous layer
            std::vector<std::vector<cv::Point> > contours;
            cv::findContours(mask8u, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);

            // Use the contours to define region of interest and 
            // perform template matching on the areas
            for (int i = 0; i < contours.size(); i++)
            {
                cv::Rect r = cv::boundingRect(contours[i]);
                cv::matchTemplate(
                    ref(r + (tpl.size() - cv::Size(1,1))), 
                    tpl, 
                    res(r), 
                    CV_TM_CCORR_NORMED
                );
            }
        }

        // Only keep good matches
        cv::threshold(res, res, confidence, 1., CV_THRESH_TOZERO);
        results.push_back(res);
    }
    res.copyTo(result);
}