/* Simple interface to alvar library
 * provided by the alvar development team 
 * VTT Technical Research Centre of Finland
 * Modified by Faraz Mirzaei, University of Minnesota
 * For use with SWIG interface in python
 * faraz@cs.umn.edu */


#include <iostream>
#include <stdio.h>
#include "math.h"
#include "cv.h"
#include "highgui.h"
#include "MarkerDetector.h"
#include "Camera.h"

#define MAX_FILENAME_LEN 128
/*
 * The threshold for blur detection : markers with a gradient
 * across the edge less than this number will be discarded.
 * A value of 255 corresponds to a sharp transition from white to 
 * black across one pixel
 */
#define BLUR_THRESHOLD 65

using namespace alvar;
using namespace std;

bool init=true;
const int marker_size=15;
Camera cam;
std::stringstream calibrationFilename;
IplImage* src;
char filename[MAX_FILENAME_LEN];
IplImage *gradX, *gradY, *greySrc;

typedef struct marker_type {
	std::vector<PointDouble> corners;
	int id;
} marker_t;

double sign(double x) {
	return x < 0 ? -1.0 : 1.0;
}

void release_images() {
	//cvReleaseImage(&src);
	cvReleaseImage(&gradX);
	cvReleaseImage(&gradY);
	cvReleaseImage(&greySrc);
}

bool isBlurred(std::vector<PointDouble> corners_in) {
	// Check for blurring by calculating the average derivative
	// of the image intensity across two edges of the image. If it is 
	// less than a threshold, we consider the image blurred.

	static bool sobelized = false;
	
	// round the input corners to the nearest integer pixel
	CvPoint corners[4];
	for (int i = 0; i < 4; i++) {
		corners[i].x = (int)(corners_in[i].x + .5);
		corners[i].y = (int)(corners_in[i].y + .5);
	}
	
	// Calculate the x and y components of the gradient of the image, if not
	// already done for this image.
	
	if (!sobelized) {
		sobelized = true;
		//src = cvLoadImage(filename);
		greySrc = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
		gradX = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
		gradY = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
		IplImage* gradXRaw = cvCreateImage(cvGetSize(src), IPL_DEPTH_16S, 1);
		IplImage* gradYRaw = cvCreateImage(cvGetSize(src), IPL_DEPTH_16S, 1);
		
		cvCvtColor(src, greySrc, CV_BGR2GRAY);

		cvSobel(greySrc, gradXRaw, 1, 0, 3);
		cvConvertScaleAbs(gradXRaw, gradX, .2);

		cvSobel(greySrc, gradYRaw, 0, 1, 3);
		cvConvertScaleAbs(gradYRaw, gradY, .2);

		cvReleaseImage(&gradXRaw);
		cvReleaseImage(&gradYRaw);
	}
	
	//IplImage* debugImg = cvLoadImage(filename);
	//cvNamedWindow("debug");
	
	// calculate vectors along and perpendicular to edges 0 and 1.
	// perpendicular: (x,y) -> (-y,x)
	
	double perp[2][2], edgeVec[2][2];
	double magnitude;
	
	for (int i = 0; i < 2; i++) {
		perp[i][0] = corners[i+1].y - corners[i].y;
		perp[i][1] = corners[i].x - corners[i+1].x;
		edgeVec[i][0] = corners[i+1].x - corners[i].x;
		edgeVec[i][1] = corners[i+1].y - corners[i].y;
		
		// normalize the vectors
		magnitude = sqrt(perp[i][0] * perp[i][0] + perp[i][1] * perp[i][1]);
		perp[i][0] /= magnitude;
		perp[i][1] /= magnitude;
		
		magnitude = sqrt(edgeVec[i][0] * edgeVec[i][0]
						+ edgeVec[i][1] * edgeVec[i][1]);
		edgeVec[i][0] /= magnitude;
		edgeVec[i][1] /= magnitude;
	}
	
	/* debugging */
	/*
	cvNamedWindow("sobelX");
	cvShowImage("sobelX", gradX);
	cvNamedWindow("sobelY");
	cvShowImage("sobelY", gradY);
	cvWaitKey();
	*/
	
	
	// now we're ready to calculate the actual directional derivative across
	// the edges. it's equal to the dot product of the gradient and 
	// the perpendicular vector.
	int x, y;
	int iterations = 0;
	double dX, dY, xStep, yStep;
	double gradientSum = 0;
	int imgstep = gradX->widthStep;
	uchar* gradXData = (uchar *)gradX->imageData;
	uchar* gradYData = (uchar *)gradY->imageData;
	for (int i = 0; i < 2; i++) {
		dX = corners[i].x;
		dY = corners[i].y;
		xStep = edgeVec[i][0];
		yStep = edgeVec[i][1];
		gradientSum = 0;
		iterations = 0;
		double xGrad, yGrad;
		double sideColor1 = 0, sideColor2 = 0;
		while (sign(edgeVec[i][0]) * (dX - corners[i+1].x) < 0) {
			++iterations;
			x = (int) (dX + .5);
			y = (int) (dY + .5);
			
			xGrad = (double)gradXData[y*imgstep + x];
			yGrad = (double)gradYData[y*imgstep + x];
			gradientSum += abs(perp[i][0]*xGrad) + abs(perp[i][1]*yGrad);
			//printf("Gradient here: (%f, %f); sum: %f\n", xGrad, yGrad, gradientSum);
			dX += xStep;
			dY += yStep;
			
			// compensate for low lighting conditions. Low light will make the
			// gradient less than normal, so it isn't an accurate indication of blurriness...
			// Compensate for this by scaling the gradient by the "dimness" of the picture.
			uchar* imgData = (uchar*)greySrc->imageData;
			
			// sorry for this mess of code. it's just getting the color to either side of the edge
			// and adding it to the corresponding value.
			sideColor1 += (double)imgData[(y + (int)(5*perp[i][1]))*imgstep + x + (int)(5*perp[i][0])];
			sideColor2 += (double)imgData[(y - (int)(5*perp[i][1]))*imgstep + x - (int)(5*perp[i][0])];
			
			/*
			// draw marking circles & lines for debugging
			cvCircle(debugImg, cvPoint(x,y), 1, CV_RGB(0,0,100));
			
			if (iterations % 10 == 0) {
				cvLine(debugImg, cvPoint(x - 10*perp[i][0], y - 10*perp[i][1]),
					   cvPoint(x + 10*perp[i][0], y + 10*perp[i][1]), CV_RGB(150, 100*i, 0), 2);
			}
			*/
		}
		// calculate averages and take absolute values.
		gradientSum /= (double)iterations;
		gradientSum *= gradientSum < 0 ? -1 : 1;
		
		sideColor1 /= (double)iterations;
		sideColor2 /= (double)iterations;
		
		double scaleFactor = 255.0 / abs(sideColor1 - sideColor2);
		gradientSum *= scaleFactor;
		//fprintf(stderr, "scale: %f, color1: %f, color2: %f\n", scaleFactor, sideColor1, sideColor2);
		//printf("gradient sum: %f\n", gradientSum);
		
		//cvShowImage("debug", debugImg);
		
		if (gradientSum < BLUR_THRESHOLD) return true;
	}
	return false;
}

int DetectMarker (CvPoint2D64f** pnt, char* fname, int *id, int max_length)
{
   //int id[50];
   //int max_length = 50;
   //strncpy(filename, fname, MAX_FILENAME_LEN);
   //load the image
   IplImage* image = cvLoadImage(fname);
   src = image;

   // use the calibration file, if available
   calibrationFilename << "camera_calibration_webcam.xml";

   if (init) {
        init = false;
        cout<<"Loading calibration: "<<calibrationFilename.str();
        if (cam.SetCalib(calibrationFilename.str().c_str(), image->width, image->height)) {
            cout<<" [Ok]"<<endl;
        } else {
            cam.SetRes(image->width, image->height);
            cout<<" [Fail]"<<endl;
	    cout<<"Image size set to: " << image->width << "x" << image->height << endl;
        }
        double p[16];
        cam.GetOpenglProjectionMatrix(p,image->width,image->height);
    }

    // instantiate the marker holder
    static MarkerDetector<MarkerData> marker_detector;
    marker_detector.SetMarkerSize(marker_size); 

    // detect the markers
    marker_detector.Detect(image, &cam);

    //number of detected markers
    int num_id = marker_detector.markers->size();
	// don't detect more landmark than the available memory
    //if (num_id > max_length) num_id = max_length;
    
    vector<marker_t> markers;
    
    // check all markers to see if they're blurred. if not, add to the markers vector.
    // keeping the index i in the marker structure is necessary to later get the
    // ID of the marker.
    for (int i = 0; i < num_id; i++) {
		vector<PointDouble> corners = (*(marker_detector.markers))[i].marker_corners_img;
		
		// check for any blurred markers
		if (!isBlurred(corners)) {
			marker_t m = { corners, i };
			markers.push_back(m);
		}
	}
	
	// now return the non-blurred markers
	
	for (uint i = 0; i < markers.size(); i++) {
		// get the detected marker
        id[i] = (*(marker_detector.markers))[markers[i].id].GetId();

		for (uint j = 0; j < markers[i].corners.size(); j++) {
			pnt[i][j].x = markers[i].corners[j].x;
			pnt[i][j].y = markers[i].corners[j].y;
		}
    }
    cout << "number of markers detected: " << markers.size() << endl;

    cvReleaseImage(&image);


    return markers.size();

}


