package birdsVSZombiesClient;

import java.awt.Rectangle;

import hypermedia.video.OpenCV;

import org.openkinect.processing.Kinect;

import com.esotericsoftware.kryonet.Client;

import processing.core.PApplet;
import processing.core.PConstants;
import processing.core.PImage;
import processing.core.PVector;
import shared.BVZConstants;

public class KinectTracker {

	public static PApplet parent;

	// Size of kinect image
	public int kw = 640;
	public int kh = 480;
	public int threshold = 505;
	
//	public int kOffset = 100;
	public PVector kOffset;

	// Raw location
	public PVector loc;

	// Interpolated location
	public PVector lerpedLoc;

	// Depth data
	public int[] depth;

	Kinect kinect;
	
	public OpenCV opencv;
	
	public PImage display;
	
	//	For the bounding box
	public GestureControl gesture;

	KinectTracker(Kinect _kinect, OpenCV cv) {
		kinect = _kinect;
		kinect.start();
		kinect.enableDepth(true);
		//	Enable the RGB camera to use opencv detection.
		kinect.enableRGB(true);

		// We could skip processing the grayscale image for efficiency
		// but this example is just demonstrating everything
		kinect.processDepthImage(false);

		display = parent.createImage(kw, kh, PConstants.RGB);

		loc = new PVector(0, 0);
		lerpedLoc = new PVector(0, 0);
		
		gesture = new GestureControl();
		
		opencv = cv;
		opencv.allocate(kw,kh);
	}

	void track() {

		// Get the raw depth as array of integers
		depth = kinect.getRawDepth();

		// Being overly cautious here
		if (depth == null)
			return;

		float sumX = 0;
		float sumY = 0;
		float count = 0;

		for (int x = 0; x < kw; x++) {
			for (int y = 0; y < kh; y++) {
				// Mirroring the image
				int offset = kw - x - 1 + y * kw;
				// Grabbing the raw depth
				int rawDepth = depth[offset];

				// Testing against threshold
				if (rawDepth < threshold) {
					sumX += x;
					sumY += y;
					count++;
					
				}
			}
		}
		// As long as we found something
		if (count != 0) {
			loc = new PVector(sumX / count, sumY / count);
		}

		// Interpolating the location, doing it arbitrarily for now
		lerpedLoc.x = PApplet.lerp(lerpedLoc.x, loc.x, 0.3f);
		lerpedLoc.y = PApplet.lerp(lerpedLoc.y, loc.y, 0.3f);
		
//		parent.println("tracking");
	}

	PVector getLerpedPos() {
		return lerpedLoc;
	}

	PVector getPos() {
		return loc;
	}

	void display(int clientStat, Client bvzClient, int playerRole) {
		PImage img = kinect.getDepthImage();
		int pixelsInThreshold = 0;
		float stdDvSum = 0;
		float stdDv = 0;
		//	Must be called at the beginning of every frame
		gesture.getBoundingBox().setInitUpdate(true);
		
		// Being overly cautious here
		if (depth == null || img == null)
			return;

		// Going to rewrite the depth image to show which pixels are in
		// threshold
		display.loadPixels();
		for (int x = 0; x < kw; x++) {
			for (int y = 0; y < kh; y++) {
				// mirroring image
				int offset = kw - x - 1 + y * kw;
				// Raw depth
				int rawDepth = depth[offset];

				int pix = x + y * display.width;
				if (rawDepth < threshold) {
					// A red color instead
					display.pixels[pix] = parent.color(255,0,0);
					
					//	Get the min and max point for the bounding box
					gesture.getBoundingBox().update(x, y);
					
					stdDvSum += Math.sqrt((loc.x - x)*(loc.x - x) + (loc.y - y)*(loc.y - y));
					
					pixelsInThreshold++;
					
				} else {
					display.pixels[pix] = parent.color(0,0,0);
				}
			}
		}
		
		stdDv = stdDvSum/pixelsInThreshold;
		
		display.updatePixels();
		
		switch (playerRole) {
			case BVZConstants.BIRDSSHOOTER:
				kOffset = new PVector(0, 100);
				break;
				
			case BVZConstants.ZOMBIESDEFENDER:
				kOffset = new PVector(560, 100);
				break;
	
			default:
				break;
		}
		
		try {
			// Draw the image
			parent.imageMode(PConstants.CORNER);
			
//			parent.image(display, 0, 100);
//			parent.image(kinect.getVideoImage(),0,0);
			gesture.standardDeviationCircle(stdDv, loc, kOffset);
//			gesture.getBoundingBox().draw();
			gesture.detect(clientStat, bvzClient, playerRole);
			
//			detectHolding();
		} catch (Exception e) {
			e.printStackTrace();
		}
		
	}
	
	public void detectHolding(){
		opencv.copy(kinect.getVideoImage());
		
		opencv.cascade("/Users/yangliu/Documents/workspace/opencvTest/src/data/aGest.xml");
		// proceed detection
	    Rectangle[] faces = opencv.detect( (float) 1.2, 2, OpenCV.HAAR_DO_CANNY_PRUNING, 40, 40 );
		 // draw face area(s)
	    parent.noFill();
	    parent.stroke(255,0,0);
	    for( int i=0; i<faces.length; i++ ) {
	    	parent.rectMode(PConstants.CORNER);
	        parent.rect( faces[i].x, faces[i].y, faces[i].width, faces[i].height ); 
	    }
	}
	
	void quit() {
		kinect.quit();
	}

	int getThreshold() {
		return threshold;
	}

	void setThreshold(int t) {
		threshold = t;
	}
}