#include "img processing.h"
#include "com.h" // remove this sometime after debugging
#include <stdlib.h>
#include "operators.h"
#include "led.h"
#include "cos_constants.h"
// Settings used for img processing. Modify these defines to adjust and tune the behaviour.

// Process 1: Color&Wall detection.

// Camera properties
#define CAMERA_CHIEF_RAY_ANGLE ((float)(25.0/180.0*3.14159))
#define CAMERA_FIELD_OF_VIEW ((float)(2.0f*CAMERA_CHIEF_RAY_ANGLE))
#define CAMERA_ONE_OVER_TAN_CRA 2.14450692051f // 1/tanf(CAMERA_CHIEF_RAY_ANGLE)

// Color detection. If a pixel is within these (inclusive) bounds, a color is detected.
#define COLOR_DETECTION_LOW_BLUE 0x60
#define COLOR_DETECTION_HIGH_BLUE 0xA0
#define COLOR_DETECTION_LOW_RED 0x60
#define COLOR_DETECTION_HIGH_RED 0xA0

// Segmentation. If a pixel is below this value, it will be set to fail value, if above, it will be succes value.
#define SEGMENTATION_THRESHOLD 0x5A
#define SEGMENTATION_SUCCESS 0xFF
#define SEGMENTATION_FAIL 0x00

// Feature extraction. Objects need to be at least this many pixels big to be considered valid.
#define FEATURE_EXTRACTION_MINIMUM_SIZE 4
#define FEATURE_EXTRACTION_MAX_WALLS 64

// Classification.
#define CLASSIFICATION_DIFF_MAX 4.f
#define CLASSIFICATION_DIFF_MIN 0.25f
#define CLASSIFICATION_NEG_DIFF_MAX -2.5f
#define CLASSIFICATION_NEG_DIFF_MIN -0.4f
#define CLASSIFICATION_MIN_LENGTH 2
#define CLASSIFICATION_MAX_WALLS 8
#define CLASSIFICATION_MAX_SIZE_ERROR 3
#define CLASSIFICATION_WALL_TO_FLOAT 2.f
#define CLASSIFICATION_WALL_SIZE_TO_DISTANCE ((CLASSIFICATION_WALL_TO_FLOAT)*0.5f*CAMERA_ONE_OVER_TAN_CRA)
#define CLASSIFICATION_ANGLE_COMPENSATION 0.9f


float cheapCosf(float a) {
	if(a < 0) return cheapCosf(-a);
	if(a > 3.14159f) return -cheapCosf(a-3.14159f);
	if(a > 0.5f*3.14159f) return cheapCosf(3.14159f-a);
	float index = a / (3.14159f * 0.5f) * cosConstantsCount;
	return cosConstants[(int)index];
}

float cheapSinf(float a) {
	return cheapCosf(a - 0.5f*3.14159f);
}

// Globals
int imgPColorDetected;

// Init
void imgPInit(void) {
	imgPColorDetected = -1;
	// Whatever it is that needs to be initialized.
}


// img process 1: Wall detection
// Passes an image for processing. After calling this, the contents of img are modified.
void imgP1(image_t* img) {
	yuv32_t* pixelPtr = img->data;
	unsigned int count = img->pixels/2; // YUV is 4bytes/2px, so /2 to get the amount of YUV's.
	
	// First we scan for colour.
	imgPColorDetected = 0;
	
	// Enhancement
	vCalculateMeanOfThreeRows(img);
	
	// First we scan for colour.
	imgPColorDetected = iScanRowForColors(img, NULL, 0);
	
	// Segmentation
	vThresholdIsoData(img, img, DARK);
	
	// Feature extraction
	count = img->width/2;
	pixelPtr = img->data;
	unsigned int currentWall = 0;
	unsigned int currentWallSize = 0;
	unsigned int currentWallPos = 0;
	struct wall {
		unsigned int start, end, state;
	} wall[FEATURE_EXTRACTION_MAX_WALLS];
	unsigned int currentColor = (*pixelPtr).Y0;
	while(count--) {
		yuv32_t pixel = *(pixelPtr++);
		if(currentColor != pixel.Y0) {
			if(currentWallSize >= FEATURE_EXTRACTION_MINIMUM_SIZE && currentWallPos != currentWallSize) {
				wall[currentWall].start = currentWallPos-currentWallSize;
				wall[currentWall].end = currentWallPos;
				wall[currentWall].state = currentColor;
				currentWall++;
				if(currentWall == FEATURE_EXTRACTION_MAX_WALLS) break;
			}
			currentWallSize = 1;
			currentColor = pixel.Y0;
		} else {
			currentWallSize++;
		}
		currentWallPos++;
		
		if(currentColor != pixel.Y1) {
			if(currentWallSize >= FEATURE_EXTRACTION_MINIMUM_SIZE && currentWallPos != currentWallSize) {
				wall[currentWall].start = currentWallPos-currentWallSize;
				wall[currentWall].end = currentWallPos;
				wall[currentWall].state = currentColor;
				currentWall++;
				if(currentWall == FEATURE_EXTRACTION_MAX_WALLS) break;
			}
			currentWallSize = 1;
			currentColor = pixel.Y1;
		} else {
			currentWallSize++;
		}
		currentWallPos++;
	}
	
	comWrite(TYPE_WALLS,(void*)wall,currentWall*sizeof(int)*3);
	
	// Classification
	const int wallCount = currentWall;
	float wallFreq[FEATURE_EXTRACTION_MAX_WALLS];
	for(int i = 0; i<wallCount; i++) {
		wallFreq[i] = 1.f / (float)(wall[i].end - wall[i].start);
	}
	// Step 1: Detect peaks in frequency changes (pass 0)
	int peak[FEATURE_EXTRACTION_MAX_WALLS];
	int peakCount = 0;
	for(int i = 0; i+2<wallCount; i++) {
		int j = i+1;
		int k = i+2;
		if(wall[i].state == wall[j].state || wall[j].state == wall[k].state || wall[i].state != wall[k].state) {
			peak[peakCount] = j;
			peakCount++;
		} else {
			float diff1 = wallFreq[i]-wallFreq[j];
			float diff2 = wallFreq[j]-wallFreq[k];
			if(diff1 != 0 && diff2 != 0) {
				if(diff1*diff2 < 0) {
					if(diff1/diff2 < CLASSIFICATION_NEG_DIFF_MAX || diff1/diff2 > CLASSIFICATION_NEG_DIFF_MIN) {
						peak[peakCount] = j;
						peakCount++;
					}
				} else if(diff1/diff2 > CLASSIFICATION_DIFF_MAX || diff1/diff2 < CLASSIFICATION_DIFF_MIN) {
					peak[peakCount] = j;
					peakCount++;
				}
			}
		}
	}
	peak[peakCount] = wallCount-1;
	peakCount++;
	// Step 2: Convert these into walls
	struct finalizedWall {
		int start;
		int end;
		int startWall;
		int endWall;
		float faceAngle;
		float dist;
		float length;
		float offset;
		vec2 startPos;
		vec2 endPos;
	} fWall[CLASSIFICATION_MAX_WALLS];
	int fWallCount = 0;
	for(int i = 0; i<peakCount; i++) {
		int size; // size in blocks
		if(i == 0) {
			size = peak[i] - 3;
		} else {
			size = peak[i] - peak[i-1] - 3;
		}
		if(size >= CLASSIFICATION_MIN_LENGTH) {
			if(i == 0) {
				fWall[fWallCount].startWall = 2;
			} else {
				fWall[fWallCount].startWall = peak[i-1] + 2;
			}
			fWall[fWallCount].endWall = peak[i]-1;
			fWall[fWallCount].start = wall[fWall[fWallCount].startWall].start;
			fWall[fWallCount].end = wall[fWall[fWallCount].endWall].end;
			fWall[fWallCount].faceAngle = 0;
			fWall[fWallCount].startPos = vec2XY(0,0);
			fWall[fWallCount].endPos = vec2XY(0,0);
			fWallCount++;
		}
	}
	// Step 3: Calculate wall angle
	for(int i = 0; i<fWallCount; i++) {
		// Method 1: See if we are directly facing this wall.
		int center = -1;
		int size = 0;
		int largestError = 0;
		for(int j = fWall[i].startWall; j+2<=fWall[i].endWall; j++) {
			int wallSize[3] = {
				wall[j].end-wall[j].start,
				wall[j+1].end-wall[j+1].start,
				wall[j+2].end-wall[j+2].start,
			};
			int diff[2] = {
				abs(wallSize[0] - wallSize[1]),
				abs(wallSize[1] - wallSize[2]),
			};
			if(diff[0] > largestError) largestError = diff[0];
			if(diff[1] > largestError) largestError = diff[1];
			if(size < wallSize[1]) {
				size = wallSize[1];
				center = j+1;
			}
		}
		
		if(center != -1 && largestError <= CLASSIFICATION_MAX_SIZE_ERROR) {
			//fWall[i].faceAngle = (wall[center].start - wall[center].end)/2;
			float wallCenter = ((float)(wall[center].start + wall[center].end))*0.5f;
			float viewPos = wallCenter / (float)img->width;
			float angle = (viewPos * CAMERA_FIELD_OF_VIEW) - CAMERA_CHIEF_RAY_ANGLE;
			fWall[i].faceAngle = angle * CLASSIFICATION_ANGLE_COMPENSATION;
			continue;
		}
		
		// Method 2: Stretch the wall out towards infinity.
		int wallSize[2] = {
			wall[fWall[i].startWall].end - wall[fWall[i].startWall].start,
			wall[fWall[i].endWall].end - wall[fWall[i].endWall].start,
		};
		//float diff = (wallSize[0] > wallSize[1]) ? (wallSize[0]/wallSize[1]) : (wallSize[1]/wallSize[0]);
		size = wall[fWall[i].endWall].start - wall[fWall[i].startWall].start;
		
		int heightDiff = 
			(wall[fWall[i].startWall].end - wall[fWall[i].startWall].start) -
			(wall[fWall[i].endWall].end - wall[fWall[i].endWall].start);
		
		float ratio = ((float)heightDiff)/((float)size); // heightChange/px
		
		float dist = ((float)wallSize[0]) / ratio;
		
		if(heightDiff == 0 || fabsf(dist) > (((float)img->width) / CAMERA_FIELD_OF_VIEW)*3.14159f*0.5f) {
			// the infinity is >90deg away, which should be impossible, so we disregard it and assume we're looking flat at the wall.
			int pos = (wall[fWall[i].startWall].start + wall[fWall[i].endWall].end) / 2;
			float angle = (((float)pos) / ((float)img->width)) * CAMERA_FIELD_OF_VIEW - CAMERA_CHIEF_RAY_ANGLE;
			fWall[i].faceAngle = angle * CLASSIFICATION_ANGLE_COMPENSATION;
		} else {
			int infinityLoc = wall[fWall[i].startWall].start + dist;
			
			float viewPos = ((float)infinityLoc) / (float)img->width;
			float angle = viewPos  * CAMERA_FIELD_OF_VIEW - CAMERA_CHIEF_RAY_ANGLE;
			if(wallSize[0] > wallSize[1]) {
				angle -= 3.14159f*0.5f;
			} else {
				angle += 3.14159f*0.5f;
			}
			angle = fmodf(angle,3.14159f*2.f);
			fWall[i].faceAngle = angle * CLASSIFICATION_ANGLE_COMPENSATION;
		}
	}
	// Step 4: Calculate distance to wall
	for(int i = 0; i<fWallCount; i++) {
		// Get the (supposedly) pixel coordinate of where the camera looks flat against the wall.
		float angle = fWall[i].faceAngle;
		int anglePx = (angle + CAMERA_CHIEF_RAY_ANGLE) / CAMERA_FIELD_OF_VIEW * img->width;
		
		// Get the largest wall section.
		int wallUsed = -1;
		int wallSize = -1;
		for(int j = fWall[i].startWall; j<=fWall[i].endWall; j++) {
			int currSize = wall[j].end - wall[j].start;
			if(currSize > wallSize) {
				wallUsed = j;
				wallSize = currSize;
			}
		}
		
		// Compare pixel coordinates
		int diffStart = anglePx - wall[wallUsed].start;
		int diffEnd = anglePx - wall[wallUsed].end;
		diffStart = abs(diffStart);
		diffEnd = abs(diffEnd);
		if(diffStart > diffEnd) {
			int tmp = diffStart;
			diffStart = diffEnd;
			diffEnd = tmp;
		}
		if(diffStart > cosConstantsCount || diffEnd > cosConstantsCount) {
			// Our results is out of bounds, which should never happen, so we assume it's far away.
			fWall[i].dist = 1024.f;
		} else {
			float sinEnd = cheapSinf((((float)diffEnd)/((float)cosConstantsCount)) * 3.14159f*0.5f);
			float sinStart = cheapSinf((((float)diffStart)/((float)cosConstantsCount)) * 3.14159f*0.5f);
			float cosEnd = cheapCosf((((float)diffEnd)/((float)cosConstantsCount)) * 3.14159f*0.5f);
			float cosStart = cheapCosf((((float)diffStart)/((float)cosConstantsCount)) * 3.14159f*0.5f);
			
			float cosDiff = cosStart / cosEnd;
			//float totalDist = cosDiff / ((cosDiff - 1.f) / CLASSIFICATION_WALL_TO_FLOAT);
			float totalDist = cosDiff * CLASSIFICATION_WALL_TO_FLOAT;
			
			float distance = totalDist / sinEnd * cosEnd;
			
			//float sinDiff = sinEnd / cheapSinf(((float)diffStart)/((float)cosConstantsCount));
			//float cosDiff = cosConstants[diffEnd] / cosConstants[diffStart];
			//float totalDist = sinDiff / ((sinDiff - 1.f)  / CLASSIFICATION_WALL_TO_FLOAT);
			//float distance = totalDist/sinEnd * cosConstants[diffEnd];
			
			//float realWallSize = ((float)wallSize) * invCosConstants[diffEnd] / invCosConstants[diffStart];
			
			//float distance = CLASSIFICATION_WALL_SIZE_TO_DISTANCE / realWallSize;
			fWall[i].dist = distance;
			//fWall[i].dist = realWallSize / CLASSIFICATION_WALL_SIZE_TO_DISTANCE;
		}
	}
	// Step 5: Calculate length of wall and offset to calculated angle
	for(int i = 0; i<fWallCount; i++) {
		fWall[i].length = (fWall[i].endWall - fWall[i].startWall + 1) * CLASSIFICATION_WALL_TO_FLOAT;
	}
	// Step 6: Obtain real world coordinates.
	for(int i = 0; i<fWallCount; i++) {
		// If condition for whether the wall is on our left or right
		float wallAngle[2] = {
			(wall[fWall[i].startWall].start * (CAMERA_FIELD_OF_VIEW / img->width)) - CAMERA_CHIEF_RAY_ANGLE,
			(wall[fWall[i].endWall].end * (CAMERA_FIELD_OF_VIEW / img->width)) - CAMERA_CHIEF_RAY_ANGLE,
		};
		vec2 wallOrigin = vec2XY(fWall[i].dist * cheapCosf(fWall[i].faceAngle),fWall[i].dist * cheapSinf(fWall[i].faceAngle));
		if(fWall[i].faceAngle < wallAngle[0]) {
			float angleDiff = wallAngle[0] - fWall[i].faceAngle;
			float dist = fWall[i].dist / cheapCosf(angleDiff) * cheapSinf(angleDiff);
			vec2 wallOffset = vec2XY(dist*cheapCosf(fWall[i].faceAngle+0.5f*3.14159f),dist*cheapSinf(fWall[i].faceAngle+0.5f*3.14159f));
			fWall[i].startPos = vec2Add(wallOrigin,wallOffset);
			float vecLen = vec2Len(wallOffset);
			wallOffset = vec2Scale(wallOffset,1.f/vecLen*(float)fWall[i].length);
			fWall[i].endPos = vec2Add(fWall[i].startPos,wallOffset);
		} else {
			float angleDiff =  fWall[i].faceAngle - wallAngle[1];
			float dist = fWall[i].dist / cheapCosf(angleDiff) * cheapSinf(angleDiff);
			vec2 wallOffset = vec2XY(dist*cheapCosf(fWall[i].faceAngle-0.5f*3.14159f),dist*cheapSinf(fWall[i].faceAngle-0.5f*3.14159f));
			fWall[i].startPos = vec2Add(wallOrigin,wallOffset);
			float vecLen = vec2Len(wallOffset);
			wallOffset = vec2Scale(wallOffset,1.f/vecLen*(float)fWall[i].length);
			fWall[i].endPos = vec2Add(fWall[i].startPos,wallOffset);
		}
	}
	
	// Send it to app
	struct sendWall {
		float angle,dist;
		float x1,y1,x2,y2;
	} sW[CLASSIFICATION_MAX_WALLS];
	for(int i = 0; i<fWallCount; i++) {
		sW[i].angle = fWall[i].faceAngle;
		sW[i].dist = fWall[i].dist;
		sW[i].x1 = fWall[i].startPos.x;
		sW[i].y1 = fWall[i].startPos.y;
		sW[i].x2 = fWall[i].endPos.x;
		sW[i].y2 = fWall[i].endPos.y;
	}
	
	comWrite(TYPE_MAP,(void*)sW,sizeof(struct sendWall) * fWallCount);
	
	/*
	// Step 1: Detecting walls facing the camera in vision
	for(int i = 0; i<wallCount; i++) {
		float size = wall[i].end - wall[i].start;
		float low = size*CLASSIFICATION_FACING_MAX_DIFF_LOW;
		float high = size*CLASSIFICATION_FACING_MAX_DIFF_HIGH;
		
		int largestSegment = i;
		float largestSegmentSize = size;
		
		int j;
		for(j = i+1; j<wallCount; j++) {
			float sizeCmp = wall[j].end - wall[j].start;
			if(sizeCmp < low || sizeCmp > high) break;
			if(sizeCmp > largestSegmentSize) {
				largestSegmentSize = sizeCmp;
				largestSegment = i;
			}
		}
		int segments = j - i;
		if(segments > CLASSIFICATION_FACING_MIN_SEGMENTS) {
			// Generate finalized wall from segment i to (j-1)
			
			// Calculate real world positions
				// Grab middle of largest segment
				// Multiply every segment's position by the acosfactor (based off of pixel position)
				// Calculate distance between middle of largest segment and camera
				// Calculate angle from camera to segment
				// Add and substract 90deg to this angle
				// Create a normal of this angle, and extend it to how long the wall is in that direction
				// Using the distance of the middle of the largest segment and these normals, calculate the two vec2's that determine the real world position
				
				// Alternatively, after determining the angle of the wall to the camera:
				// Grab any segment (actually, grab all of them and average results of the following calculations)
				// Calculate the distance between the camera and start, and distance between camera and end
				// Create normal vectors in the directions the wall is going, extend them to the remaining length
				// Calculate the real world coordinates from this data
		}
	}
	*/
	// for every wall
		// if it is larger than the walls around it
			// if the difference between the walls is not major (<200% of the difference between middle and them)
				// visible wall, merge as far as possible
				// calculate the distance between the wall and the camera
				
	// Step 2: Detecting walls that are not facing the camera in vision
	// for every wall
		// if it is smaller than the walls around it
			// determine merge direction
				// merge as far as possible
				// after merging, stretch the wall out in the vision by adding wall pieces that decrease in size
				// if size <= 0, the infinity point is reached. That direction, +-90 degrees (towards the wall), is the wall face
				// calculate the distance between the wall and the camera
}
// Gets the amount of walls detected
int imgP1GetWallCount(void);
// Fetches a pointer to the detected walls. The pointer points to an array of size imgP1GetWallCount().
mapWall* imgP1GetWalls(void);
// Whether a colour was detected on the wall, and if so, what it's coordinate is. (returns -1 if nothing detected, x coord otherwise)
int imgP1ColorDetected(void) {
	return imgPColorDetected;
}

// img process 2: Obj detection
// Passes an image for processing. After calling this, the contents of img are modified.
void imgP2(image_t* img);
// Gets the object type detected. Returns 0 if nothing was found.
int imgP2GetObjType(void);
// Gets the object position on the image
unsigned int imgP2GetObjPos(void);

