class KinectTracker {



  // Size of kinect image
  int kw = 640;
  int kh = 480;
  int threshold = 745;
  boolean noTracked;

  // Raw location
  PVector locRight;
  PVector locLeft;
  PVector locHandsCentre;
  // Interpolated location
  PVector lerpedLoc;

  // Depth data
  int[] depth;


  PImage display;

  KinectTracker() {
    kinect.start();
    kinect.enableDepth(true);
    kinect.enableRGB(true);
    // We could skip processing the grayscale image for efficiency
    // but this example is just demonstrating everything
    kinect.processDepthImage(true);

    display = createImage(kw,kh,PConstants.RGB);

    locRight = new PVector(0,0);
    locLeft = new PVector(0,0);
    locHandsCentre = new PVector(0,0);
    lerpedLoc = new PVector(0,0);
    noTracked = true;
  }
  
  boolean getTracked(){
    return noTracked;
  }

  void track() {

    // Get the raw depth as array of integers
    depth = kinect.getRawDepth();

    // Being overly cautious here
    if (depth == null) return;

    float pos = 0;
    float sumX1 = 0;
    float sumY1 = 0;
    float sumX2 = 0;
    float sumY2 = 0;
    float count = 0;
    float count1 = 0;
    float count2 = 0;
    

    for(int x = 0; x < kw; x++) {
      for(int y = 0; y < kh; y++) {
        // Mirroring the image
        int offset = kw-x-1+y*kw;
        // Grabbing the raw depth
        int rawDepth = depth[offset];

        // Testing against threshold
        if (rawDepth < threshold) {
            pos+= x;
            count++;
        }
      }
    }
    if(count!=0)
    {
      noTracked = false;
      for(int x = 0; x < kw; x++) {
        for(int y = 0; y < kh; y++) {
          // Mirroring the image
          int offset = kw-x-1+y*kw;
          // Grabbing the raw depth
          int rawDepth = depth[offset];
  
          // Testing against threshold
          if (rawDepth < threshold) {
            if(x<pos/count)
            {
              sumX1 += x;
              sumY1 += y;
              count1++;
            }
            else{
              sumX2 += x;
              sumY2 += y;
              count2++;
            }
          }
        }
      }
    }
    else
      noTracked = true;
    
    // As long as we found something
    if (count1 != 0) {
      locRight = new PVector(sumX1/count1,sumY1/count1);
    }
    
    if (count2 != 0) {
      locLeft = new PVector(sumX2/count2,sumY2/count2);
    }
    locHandsCentre = new PVector(pos/count,50);
    // Interpolating the location, doing it arbitrarily for now
    lerpedLoc.x = PApplet.lerp(lerpedLoc.x, locRight.x, 0.3f);
    lerpedLoc.y = PApplet.lerp(lerpedLoc.y, locRight.y, 0.3f);
  }

  PVector getLerpedPos() {
    return lerpedLoc;
  }

  PVector getPosRight() {
    return locRight;
  }
  PVector getHandsCentre() {
    return locHandsCentre;
  }
  
  PVector getPosLeft() {
    return locLeft;
  }

  void display() {
    PImage img = kinect.getVideoImage();

    // Being overly cautious here
    if (depth == null || img == null) return;

    // Going to rewrite the depth image to show which pixels are in threshold
    // A lot of this is redundant, but this is just for demonstration purposes
    display.loadPixels();
    for(int x = 0; x < kw; x++) {
      for(int y = 0; y < kh; y++) {
        // mirroring image
        int offset = kw-x-1+y*kw;
        // Raw depth
        int rawDepth = depth[offset];

        int pix = x+y*display.width;
        if (rawDepth < threshold) {
          // A red color instead
          display.pixels[pix] = color(150,50,50);
        } 
        else {
          display.pixels[pix] = img.pixels[offset];
        }
      }
    }
    display.updatePixels();

    // Draw the image
    image(display,0,0);
  }

  void quit() {
    kinect.quit();
  }

  int getThreshold() {
    return threshold;
  }

  void setThreshold(int t) {
    threshold =  t;
  }
}

