#include "stdafx.h"
#include "opencv2\highgui\highgui_c.h"
#include "opencv2\legacy\compat.hpp"
#include "opencv2\objdetect\objdetect.hpp"
#include "opencv2\imgproc\imgproc_c.h"
#include "utils.h"

#define USE_VIDEO 1

void crop(IplImage* src, IplImage* dest, CvRect rect)
{
    cvSetImageROI(src, rect);
    cvCopy(src, dest);
    cvResetImageROI(src);
}

struct Lane
{
    Lane(){}
    Lane(CvPoint a, CvPoint b, float angle, float kl, float bl): p0(a), p1(b), angle(angle), votes(0), visited(false), found(false), k(kl), b(bl) {}
    
    CvPoint p0, p1;
    int votes;
    bool visited, found;
    float angle, k, b;
};

struct Status
{
	Status():reset(true), lost(0){}
	ExpMovingAverage k, b;
	bool reset;
	int lost;
};

#define GREEN CV_RGB(0,255,0);
#define RED CV_RGB(255,0,0);
#define BLUE CV_RGB(255,0,255);
#define PURPLE CV_RGB(255,0,255);

Status laneR, laneL;
//std::vector<Vehicle> vehicles;
//std::vector<VehicleSample> samples;

enum {
    SCAN_STEP = 5,
    LINE_REJECT_DEGREES = 10,
    // edge response strength to recognize for 'WHITE'
    BW_TRESHOLD = 250,
    // edge response strength to recognize for 'WHITE'
	BORDERX = 10,
	// px
    MAX_RESPONSE_DIST = 5,
    // edge detector minimum hysteresis threshold
    CANNY_MIN_TRESHOLD = 1,
    // edge detector maximum hysteresis threshold
    CANNY_MAX_TRESHOLD = 100,
    // line approval vote threshold
    HOUGH_TRESHOLD = 50,
    // remove lines shorter than this threshold
    HOUGH_MIN_LINE_LENGTH = 50,
    // join lines to one with smaller than this gaps
    HOUGH_MAX_LINE_GAP = 100,
    // minimum lines for a region to pass validation as a 'CAR'
    CAR_DETECT_LINES = 4,
    // minimum horizontal line length from car body in px
    CAR_H_LINE_LENGTH = 10,
    // max vehicle detection sampling history
    MAX_VEHICLE_SAMPLES = 30,
    // probability positive matches for valid car
    CAR_DETECT_POSITIVE_SAMPLES = MAX_VEHICLE_SAMPLES - 2,
    // remove car after this much no update frames
    MAX_VEHICLE_NO_UPDATE_FREQ = 15
};

#define K_VARY_FACTOR 0.2f
#define B_VARY_FACTOR 20
#define MAX_LOST_FRAMES 30

void FindResponses(IplImage *img, int startX, int endX, int y, std::vector<int>& list)
{
    // scans for single response: /^\_

	const int row = y * img->width * img->nChannels;
	unsigned char* ptr = (unsigned char*)img->imageData;

    int step = (endX < startX) ? -1: 1;
    int range = (endX > startX) ? endX-startX+1 : startX-endX+1;

    for(int x = startX; range>0; x += step, range--)
    {
        if(ptr[row + x] <= BW_TRESHOLD) continue; // skip black: loop until white pixels show up

        // first response found
        int idx = x + step;

        // skip same response(white) pixels
        while(range > 0 && ptr[row+idx] > BW_TRESHOLD){
            idx += step;
            range--;
        }

		// reached black again
        if(ptr[row+idx] <= BW_TRESHOLD) {
            list.push_back(x);
        }

        x = idx; // begin from new pos
    }
}

void processSide(std::vector<Lane> lanes, IplImage *edges, bool right) {

	Status* side = right ? &laneR : &laneL;

	// response search
	int w = edges->width;
	int h = edges->height;
	const int BEGINY = 0;
	const int ENDY = h-1;
	const int ENDX = right ? (w-BORDERX) : BORDERX;
	int midx = w/2;
	int midy = edges->height/2;
	unsigned char* ptr = (unsigned char*)edges->imageData;

	// show responses
	int* votes = new int[lanes.size()];
	for(int i=0; i<lanes.size(); i++) votes[i++] = 0;

	for(int y=ENDY; y>=BEGINY; y-=SCAN_STEP) {
		std::vector<int> rsp;
		FindResponses(edges, midx, ENDX, y, rsp);

		if (rsp.size() > 0) {
			int response_x = rsp[0]; // use first reponse (closest to screen center)

			float dmin = 9999999;
			float xmin = 9999999;
			int match = -1;
			for (int j=0; j<lanes.size(); j++) {
				// compute response point distance to current line
				float d = dist2line(
						cvPoint2D32f(lanes[j].p0.x, lanes[j].p0.y), 
						cvPoint2D32f(lanes[j].p1.x, lanes[j].p1.y), 
						cvPoint2D32f(response_x, y));

				// point on line at current y line
				int xline = (y - lanes[j].b) / lanes[j].k;
				int dist_mid = abs(midx - xline); // distance to midpoint

				// pick the best closest match to line & to screen center
				if (match == -1 || (d <= dmin && dist_mid < xmin)) {
					dmin = d;
					match = j;
					xmin = dist_mid;
					break;
				}
			}

			// vote for each line
			if (match != -1) {
				votes[match] += 1;
			}
		}
	}

	int bestMatch = -1;
	int mini = 9999999;
	for (int i=0; i<lanes.size(); i++) {
		int xline = (midy - lanes[i].b) / lanes[i].k;
		int dist = abs(midx - xline); // distance to midpoint

		if (bestMatch == -1 || (votes[i] > votes[bestMatch] && dist < mini)) {
			bestMatch = i;
			mini = dist;
		}
	}

	if (bestMatch != -1) {
		Lane* best = &lanes[bestMatch];
		float k_diff = fabs(best->k - side->k.get());
		float b_diff = fabs(best->b - side->b.get());

		bool update_ok = (k_diff <= K_VARY_FACTOR && b_diff <= B_VARY_FACTOR) || side->reset;

		printf("side: %s, k vary: %.4f, b vary: %.4f, lost: %s\n", 
			(right?"RIGHT":"LEFT"), k_diff, b_diff, (update_ok?"no":"yes"));
		
		if (update_ok) {
			// update is in valid bounds
			side->k.add(best->k);
			side->b.add(best->b);
			side->reset = false;
			side->lost = 0;
		} else {
			// can't update, lanes flicker periodically, start counter for partial reset!
			side->lost++;
			if (side->lost >= MAX_LOST_FRAMES && !side->reset) {
				side->reset = true;
			}
		}

	} else {
		printf("no lanes detected - lane tracking lost! counter increased\n");
		side->lost++;
		if (side->lost >= MAX_LOST_FRAMES && !side->reset) {
			// do full reset when lost for more than N frames
			side->reset = true;
			side->k.clear();
			side->b.clear();
		}
	}

	delete[] votes;
}

void processLanes(CvSeq* lines, IplImage* edges, IplImage* temp_frame)
{
    //classify lines to left/right side
    std::vector<Lane> left, right;
    
    for (int i = 0; i < lines->total; i++)
    {
        CvPoint* line = (CvPoint*)cvGetSeqElem(lines, i);
        int dx = line[1].x - line[0].x;
        int dy = line[1].y - line[0].y;
        float angle = atan2f(dy, dx) * 180/CV_PI;
        
        if (fabs(angle) <= LINE_REJECT_DEGREES)
        {
            //reject near horizontal lines
            continue;
        }

		// assume that vanishing point is close to the image horizontal center
		// calculate line parameters: y = kx + b;
		//prevent DIV/0;
		dx = (dx == 0) ? 1 : dx;
		float k = dy/(float)dx;
		float b = line[0].y - k*line[0].x;

		// assign lane's side based by its midpoint position
		int midx = (line[0].x + line[2].x) / 2;
		if (midx < temp_frame->width / 2)
		{
			left.push_back(Lane(line[0], line[1], angle, k, b));
		}
		else if (midx > temp_frame->width / 2)
		{
			right.push_back(Lane(line[0], line[1], angle, k, b));
		}

		// show Hough lines
		for (int i = 0; i < right.size(); i++)
		{
			cvLine(temp_frame, right[i].p0, right[i].p1, CV_RGB(0,0,255),2);
		}

		for (int i = 0; i < left.size(); i++)
		{
			cvLine(temp_frame, left[i].p0, left[i].p1, CV_RGB(255,0,0), 2);
		}

		processSide(left, edges, false);
		processSide(right, edges, true);
    }
}

int main(void)
{
    
#ifdef USE_VIDEO
    //CvCapture *input_video = cvCreateFileCapture("video/road.avi");
	//CvCapture *input_video = cvCreateFileCapture("video/road_001.mp4");
	//CvCapture *input_video = cvCreateFileCapture("video/road_002.mp4");
	//CvCapture *input_video = cvCreateFileCapture("video/road_003.mp4");
	CvCapture *input_video = cvCreateFileCapture("video/road_004.mp4");
#else
    CvCapture *input_video = cvCaptureFromCAM(0);
#endif
    
    if (input_video == NULL)
    {
        fprintf(stderr, "Error: Can't open video");
        
        return -1;
    }
    
    CvFont font;
    cvInitFont(&font, CV_FONT_VECTOR0, 0.25f, 0.25f);
    CvSize video_size;
    
    //get captured video width & height
    video_size.width = (int)cvGetCaptureProperty(input_video, CV_CAP_PROP_FRAME_WIDTH);
    video_size.height = (int)cvGetCaptureProperty(input_video, CV_CAP_PROP_FRAME_HEIGHT);
    
    long current_frame = 0;
    int key_pressed = 0;
    IplImage * frame = NULL;
    //cvSize is a OpenCV structure that contains a width and a height.
    CvSize frame_size = cvSize(video_size.width, video_size.height/2);
    //create a grayscale image with the same dimensions as the original image.
    //Function cvCreateImage(size, depth, channels - used to create and allocate image data
    IplImage *temp_frame = cvCreateImage(frame_size, IPL_DEPTH_8U, 3);
    IplImage *grey = cvCreateImage(frame_size, IPL_DEPTH_8U, 1);
    IplImage *edges = cvCreateImage(frame_size, IPL_DEPTH_8U, 1);
    //get half_frame
    IplImage *half_frame = cvCreateImage(cvSize(video_size.width/2, video_size.height/2), IPL_DEPTH_8U, 3);
    
    CvMemStorage* houghStorage = cvCreateMemStorage(0);
    CvMemStorage* haarStorage = cvCreateMemStorage(0);
	//CvHaarClassifierCascade* cascade = (CvHaarClassifierCascade*)cvLoad("haar/cars3.xml");
    cvSetCaptureProperty(input_video, CV_CAP_PROP_POS_FRAMES, current_frame);
    
	// while key pressed not esc
    while(key_pressed != 27)
    {
        frame = cvQueryFrame(input_video);
        if (frame == NULL)
        {
            fprintf(stderr, "Error: null frame received");
            return -1;
        }
        
        //reduce the image by 2
        cvPyrDown(frame, half_frame, CV_GAUSSIAN_5x5);
        
        //convert to grayscale
//        cvCvtColor(temp_frame, grey, CV_BGR2GRAY);
        
        //we're interested only in road below horizont - so crop top image portion off
        crop(frame, temp_frame, cvRect(0, frame_size.height, frame_size.width, frame_size.height));
        //convert to grayscale
        cvCvtColor(temp_frame, grey, CV_BGR2GRAY);
        
        //Perform a Gaussian blur (convolving ith 5x5 Gaussian) & detect edges
        cvSmooth(grey, grey, CV_GAUSSIAN, 5, 5);
        cvCanny(grey, edges, CANNY_MIN_TRESHOLD, CANNY_MAX_TRESHOLD);
        
        //do Hough transform to find langes
        double rho = 1;
        double theta = CV_PI/180;
        CvSeq* lines = cvHoughLines2(edges, houghStorage, CV_HOUGH_PROBABILISTIC, rho, theta, HOUGH_TRESHOLD, HOUGH_MIN_LINE_LENGTH, HOUGH_MAX_LINE_GAP);
        processLanes(lines, edges, temp_frame);
        
        // process vehicles
//        vehicleDetection(half_frame, cascade, haarStorage);
//        drawVehicles(half_frame);
        cvShowImage("Half-frame", half_frame);
        cvShowImage("Grey", grey);
        cvShowImage("Edges", edges);
        // show middle line
        cvLine(temp_frame, cvPoint(frame_size.width / 2, 0), cvPoint(frame_size.width / 2, frame_size.height), CV_RGB(255, 255, 0), 1);
        cvShowImage("Color", temp_frame);
		
		// moves window to the specified position
		cvMoveWindow("Half-frame", half_frame->width * 2 + 10, 0);
        cvMoveWindow("Grey", 0, 0); 
        cvMoveWindow("Edges", 0, frame_size.height+25);
		cvMoveWindow("Color", 0, 2*(frame_size.height+25)); 

		key_pressed = cvWaitKey(5);

		//cvReleaseHaarClassifierCascade(&cascade);
		/*cvReleaseMemStorage(&haarStorage);
		cvReleaseMemStorage(&houghStorage);

		cvReleaseImage(&grey);
		cvReleaseImage(&edges);
		cvReleaseImage(&temp_frame);
		cvReleaseImage(&half_frame);

		cvReleaseCapture(&input_video);*/
    }
}