import cv
import numpy as np
import cPickle
import rescale
import freenect
import time
import calibkinect as ck
import moments
import math

# initialize display windows
cv.NamedWindow('Depth')
cv.MoveWindow('Depth',640,50)
#cv.NamedWindow('RGB')
cv.NamedWindow('Tracker')
cv.MoveWindow('Tracker',640,50+480)
#cv.NamedWindow('bboxA')
#cv.MoveWindow('bboxA',0,50+480)
# Prefix for path to data
pathPrefix = '/media/disk/'

# Use camera, or read data file
mode = 'async'

# Initialize time limit
start_time = 0

# Store the indices of the pixels found by the tracker
trackIndices = []
meanA, meanB = (0,0),(0,0)
handA = np.array([])
handB = np.array([])
confidenceA = 0.0
confidenceB = 0.0

def dist(tupleA,tupleB):
    return math.sqrt((tupleA[0]-tupleB[0])**2 + (tupleA[1]-tupleB[1])**2)

def cluster(trackIndices):
    global meanA
    global meanB
    # Generate samples for clustering
    samples = cv.fromarray(trackIndices.astype('float32'))
    # Generate matrix of labels for clustering
    labels = cv.CreateMat(1,len(trackIndices),cv.CV_32S)
    cv.Set(labels,0.0)
    # Do 2-means clustering
    cv.KMeans2(samples,2,labels,(cv.CV_TERMCRIT_ITER, 5, 0))
    # Convert the labels back to a numpy array (CvMatrices are an impossible format)
    labels = np.asarray(labels)[0]
   
    # Separate data into two hands as found by the clustering
    handA = []
    handB = []
    for i in range(len(labels)):
        if labels[i]:
            handA.append(trackIndices[i])
        else:
            handB.append(trackIndices[i])
    handA = np.array(handA)
    handB = np.array(handB)

    oldMeanA = meanA
    oldMeanB = meanB
  
    # Find the mean of the hands for deciding which is the left and which is the right
    meanA = handA.mean(axis=0)
    meanB = handB.mean(axis=0)
   # print meanA, meanB

    if dist(meanA,oldMeanA) > 250:
        handA = np.array([])
    if dist(meanB,oldMeanB) > 250:
        handB = np.array([])

    if dist(meanA,oldMeanB) < dist(meanA,oldMeanA):
        tmp = handB.copy()
        handB = handA.copy()
        handA = tmp.copy()
        tmp = meanA
        meanA = meanB
        meanB = tmp

    return handA,handB

# Does tracking in the depth image
# Modifies globals trackIndices, handA, handB
def track(data):
    global confidenceA,confidenceB
    global trackIndices
    global handA,handB
    # Find indices to track
    trackIndices = rescale.frame(data)
    
    # No idea why this is neccessary, but it's neccessary
    tmp = []
    for chunk in trackIndices:
        tmp.append(chunk)
    trackIndices = np.array(tmp)

    # Get u,v
    #u = np.array([x for (x,y) in trackIndices]) 
    #v = np.array([y for (x,y) in trackIndices]) 
   
    #if ((confidenceA + confidenceB)/2.0) < 0.7:
    handA, handB = cluster(trackIndices)
  
    # Find the mean of the hands for deciding which is the left and which is the right
    if len(handA) > 0:
        #meanA = handA.mean(axis=0)
        bboxA = [(np.min(handA[:,1]), np.min(handA[:,0])), (np.max(handA[:,1]),np.max(handA[:,0]))]
        binImageA = np.zeros((bboxA[1][1]-bboxA[0][1] + 1, bboxA[1][0]-bboxA[0][0]+1))

        for (i,j) in handA:
            binImageA[i-bboxA[0][1]][j-bboxA[0][0]] = 1 
       
        surfImage = binImageA.astype('uint8')
        surfImage = surfImage*128
        image2 = cv.CreateImageHeader((surfImage.shape[1], surfImage.shape[0]),
                                 cv.IPL_DEPTH_8U,
                                 1)
        cv.SetData(image2, surfImage.tostring(),
               surfImage.dtype.itemsize * surfImage.shape[1])

        
        (keypoints, descriptors) = cv.ExtractSURF(image2, None, cv.CreateMemStorage(), (0, 10000, 4, 4))
        for ((x, y), laplacian, size, dir, hessian) in keypoints:
            surfImage[y][x] = 255

        cv.SetData(image2, surfImage.tostring(),
               surfImage.dtype.itemsize * surfImage.shape[1])

        if (binImageA.shape[0]/float(binImageA.shape[1])< 3) and (binImageA.shape[0]/float(binImageA.shape[1])) > 1/3.0:
            confidenceA = min(confidenceA+0.1,1.0)
        else:
            confidenceA = max(0.0,confidenceA-0.1)


        
        # Find features
        featuresA = moments.findMoments(binImageA) 
        #binImageA = binImageA.astype('uint8')
        #binImageA = binImageA * 20000
        #image2 = cv.CreateImageHeader((binImageA.shape[1], binImageA.shape[0]),
        #                         cv.IPL_DEPTH_8U,
        #                         1)
        #cv.SetData(image2, binImageA.tostring(),
        #       binImageA.dtype.itemsize * binImageA.shape[1])
        cv.ShowImage('bboxA',image2)
        cv.WaitKey(30)

    # Check if a second hand was found
    if len(handB) > 0:
        # meanB = handB.mean(axis=0)
        bboxB = [(np.min(handB[:,1]), np.min(handB[:,0])), (np.max(handB[:,1]),np.max(handB[:,0]))]
        binImageB = np.zeros((bboxB[1][1]-bboxB[0][1] + 1, bboxB[1][0]-bboxB[0][0]+1))
        if (binImageB.shape[0]/float(binImageB.shape[1])< 3) and (binImageB.shape[0]/float(binImageB.shape[1])) > 1/3.0:
            confidenceB = min(confidenceB+0.1,1.0)
        else:
            confidenceB = max(0.0,confidenceB-0.1)

        for (i,j) in handB:
            binImageB[i-bboxB[0][1]][j-bboxB[0][0]] = 1 
        featuresB = moments.findMoments(binImageB) 




# Displays the depth image
def display_depth(dev, data, timestamp):
    global trackIndices
    global handA, handB
    data -= np.min(data.ravel())
    data *= 65536 / np.max(data.ravel())

    track(data)

    # Project points to correct coordinates
    #points = ck.projectPoints(data[u,v],u,v)
    #_,points = ck.depth2xyzuv(data[u,v],u,v)
    #trackIndices = []
    #for point in points:
    #    trackIndices.append((point[0],point[1]))

    image = cv.CreateImageHeader((data.shape[1], data.shape[0]),
                                 cv.IPL_DEPTH_16U,
                                 1)
    cv.SetData(image, data.tostring(),
               data.dtype.itemsize * data.shape[1])
    
    cv.ShowImage('Depth', image)
    cv.WaitKey(30)

# Displays the tracker image (copy of RGB image with some pixels modified)
def display_tracker(dev,data,timestamp):
    global trackIndices, handA, handB
    u = trackIndices[:,0] 
    v = trackIndices[:,1] 

    rvA,rvB,bvA,bvB = 255,0,0,255
        
    for [i,j] in handA:
        data[i][j][0] = rvA
        data[i][j][1] = 0
        data[i][j][2] = bvA
    for [i,j] in handB:
        data[i][j][0] = rvB
        data[i][j][1] = 0
        data[i][j][2] = bvB

    image = cv.CreateImageHeader((data.shape[1], data.shape[0]),
                                 cv.IPL_DEPTH_8U,
                                 3)
    # Note: We swap from RGB to BGR here
    cv.SetData(image, data[:, :, ::-1].tostring(),
               data.dtype.itemsize * 3 * data.shape[1])

    if len(handA) > 0:
        bboxA = [(np.min(handA[:,1]), np.min(handA[:,0])), (np.max(handA[:,1]),np.max(handA[:,0]))]
        cv.Rectangle(image,bboxA[0],bboxA[1],cv.CV_RGB(rvA,0.,bvA))
    
    if len(handB) > 0:
        bboxB = [(np.min(handB[:,1]), np.min(handB[:,0])), (np.max(handB[:,1]),np.max(handB[:,0]))]
        cv.Rectangle(image,bboxB[0],bboxB[1],cv.CV_RGB(rvB,0.,bvB))
   
    # Morphological close
    #element = cv.CreateStructuringElementEx(5,5,3,3,cv.CV_SHAPE_ELLIPSE)
    #cv.MorphologyEx(image,image,None,element,cv.CV_MOP_CLOSE)
    cv.ShowImage('Tracker', image)
    cv.WaitKey(30)

# Displays the normal RGB camera image
def display_rgb(dev, data, timestamp):
    image = cv.CreateImageHeader((data.shape[1], data.shape[0]),
                                 cv.IPL_DEPTH_8U,
                                 3)
    # Note: We swap from RGB to BGR here
    cv.SetData(image, data[:, :, ::-1].tostring(),
               data.dtype.itemsize * 3 * data.shape[1])
    cv.ShowImage('RGB', image)
    cv.WaitKey(30)
    display_tracker(dev,data.copy(),timestamp)

# Misc operations that are called by the kinect
def body(dev, ctx):
    if time.time() - start_time > 60.:
        print('60 sec passed, killing')
        raise freenect.Kill

# Find index closest to index idx of array with length lenB in an array of length lenA
def mapIndex(lenA, lenB, idx):
    ratio = float(lenA)/lenB
    return int(round(ratio*idx))

# Read data from file
if mode == 'passive':
    dData = cPickle.load(open(pathPrefix + 'two_hands_spread_fingers_depth.data'))
    iData = cPickle.load(open(pathPrefix + 'two_hands_spread_fingers_image.data'))
    
    for i in range(len(dData)):
        frame = dData[i]
        #frame = rescale.frame(frame)
        display_depth(None, frame, None)
        
        frame = iData[mapIndex(len(iData),len(dData),i)]
        display_tracker(None, frame, None)

# Use kinect as source (kills after 60 seconds)
elif mode == 'async':
    start_time = time.time()
    freenect.runloop(depth=display_depth,
                 video=display_tracker, body=body)

else:
    while(True):
        display_depth(None,freenect.sync_get_depth(),None)
        display_tracker(None,freenect.sync.get.video(),None)


