import cv
import numpy as np
import cPickle
import freenect
import time
import calibkinect as ck
import moments
import math
import features
import os.path
import os

# initialize display windows
cv.NamedWindow('Depth')
cv.MoveWindow('Depth',640,50)
#cv.NamedWindow('RGB')
cv.NamedWindow('Tracker')
cv.MoveWindow('Tracker',640,50+480)
cv.NamedWindow('bboxA')
cv.MoveWindow('bboxA',0,50+480)
# Prefix for path to data
pathPrefix = '/media/disk/'

# Training frames
buffer = []

# Use camera, or read data file
mode = 'async'

# Initialize time limit
start_time = 0

# Time before tracking starts
startup_time = 3

# Store the indices of the pixels found by the tracker
trackIndices = []

# Center of mass
massCenter = (0,0)

# Isovalue for depth
depthIso = 0

# Bounding box
bBox = [(0,0),(1,1)]

# Classification
classification = 'None'

# Gesture dict
gestures = {}

# Indicates whether or not a hand is probably being tracked
isTrackingHand = False

#
meanSimpleFeature = (0,0,0,0)

def loadGestures():
    global gestures
    for g in os.listdir('data/'):
        n = g.split('.')[0]
        if len(n) == 0:
            continue
        gestures[n] = cPickle.load(open('data/'+ g))
    meanFeature(gestures)

def meanFeature(gestures):
    global meanSimpleFeature
    fs = []
    for key in gestures:
        for t2 in gestures[key]:
            fs.append(t2[0])
    fs = np.array(fs)
    meanSimpleFeature =  np.mean(fs,axis=0)


def dist(tupleA,tupleB):
    return math.sqrt((tupleA[0]-tupleB[0])**2 + (tupleA[1]-tupleB[1])**2)

def cluster(trackIndices,k):
    tmp = []
    for chnk in trackIndices:
        tmp.append(chnk)
    trackIndices = np.array(tmp)
    # Generate samples for clustering
    samples = cv.fromarray(trackIndices.astype('float32'))

    # Generate matrix of labels for clustering
    labels = cv.CreateMat(1,len(trackIndices),cv.CV_32S)
    cv.Set(labels,0.0)

    # Do 2-means clustering
    cv.KMeans2(samples,k,labels,(cv.CV_TERMCRIT_ITER, 5, 0))

    # Convert the labels back to a numpy array (CvMatrices are an impossible format)
    labels = np.asarray(labels)[0]

    clusters = []
    for i in range(k):
        clusters.append(trackIndices[labels == float(i)])

    return clusters

def centerOfMass(frame):
    af = np.sum(frame)
    xs = np.size(frame,1)
    ys = np.size(frame,0)
    xi = np.repeat([np.arange(xs)],ys,axis=0)
    yi = np.repeat([np.arange(ys)],xs,axis=0).transpose()
    xc = np.sum(xi*frame)/af
    yc = np.sum(yi*frame)/af
    return (xc,yc)

def threshold(f,iso,tol):
    indices = np.argwhere(np.abs(f-iso) < tol)
    return indices

def threshold_bin(f,iso,tol):
    a = np.zeros(f.shape)
    a[abs(f - iso) < tol] = 1
    return a

# Finds the most likely classification given the recorded known gestures
# KNN Classifier
def classify(frame,k):
    global isTrackingHand
    t1 = features.getFeatures(frame.astype('uint8'),'Unknown')
    minD = 99999
    maxKey = "None"
    a = (1,1,1)
    candidates = []

    if frame.shape[0] * frame.shape[1] > 200*200:
        isTrackingHand = False
        return "Noise"

    if features.dist(t1,(meanSimpleFeature,(),(),'thing'),(1,0,0)) < 0.5:
        isTrackingHand = True
    else:
        isTrackingHand = False

    for key in gestures:
        for t2 in gestures[key]:
            t2Dist = features.dist(t1,t2,a)
            candidates.append((t2Dist,t2[3]))
    candidates.sort()
    candidates = candidates[:k]

    maxDict = {}
    for (_,c) in candidates:
        if not c in maxDict:
            maxDict[c] = 1
        else:
            maxDict[c] = maxDict[c] + 1

    maxKey, maxValue = '',0
    for key in maxDict:
        if maxDict[key] > maxValue:
            maxValue = maxDict[key]
            maxKey = key
            

    return maxKey
    

# Does tracking in the depth image
# Modifies globals trackIndices, handA, handB
def track(data):
    global trackIndices, massCenter, depthIso, bBox
    oldMassCenter = massCenter 
    MAX_PIXEL_MOVE = 40
    
    # Find indices to track
    if len(trackIndices) == 0:
        #trackIndices = threshold(data,numpy.min(data),1200)
        #u = trackIndices[:,0]
        #v = trackIndices[:,1] 
        #bBox =[(np.min(u),np.min(v)),(np.max(u),np.max(v))]
        #print "Doing initial segmentation!"
        depthIso = np.min(data)
        binImage = threshold_bin(data,depthIso,1200)
        trackIndices = np.argwhere(binImage)
        massCenter = centerOfMass(binImage)
       
        # Compute bounding box
        u = trackIndices[:,0]
        v = trackIndices[:,1] 
        bBox =[(np.min(u),np.min(v)),(np.max(u),np.max(v))]
        imgBox = data[bBox[0][0]:bBox[1][0],bBox[0][1]:bBox[1][1]]
        depthIso = np.mean(imgBox)
        #print depthIso

    else:
        # Find bounding box of previous hand position
        # Get average depth in that bounding box
        
        # Create a window in which we look for the new hand position
        window = (max(bBox[0][0]-MAX_PIXEL_MOVE,0),min(bBox[1][0]+1+MAX_PIXEL_MOVE,data.shape[0]),\
                 max(bBox[0][1]-MAX_PIXEL_MOVE,0),min(bBox[1][1]+1+MAX_PIXEL_MOVE,data.shape[1]))
        
        # imgBox contains the data in the current frame where the hand was last seen + bounding pixels
        imgBox = data[window[0]:window[1],window[2]:window[3]]
               
        # Display the tracking window...
        #imageR = cv.CreateImageHeader((imgBox.shape[1], imgBox.shape[0]),cv.IPL_DEPTH_16U,1)
        #cv.SetData(imageR, imgBox.tostring(),imgBox.dtype.itemsize * imgBox.shape[1])
    
        #cv.ShowImage('ImageR', imageR)
        #cv.WaitKey(30)

        #depthIso = np.mean(imgBox[np.abs(imgBox-depthIso)<1200])
        #binImage = threshold_bin(data,np.min(data),1200)
        binImage = threshold_bin(imgBox,np.min(imgBox),1200)

        trackIndices = np.argwhere(binImage)
        trackIndices += (window[0],window[2])
        massCenter = centerOfMass(binImage)
        massCenter = (massCenter[0]+window[2],massCenter[1]+window[0])

        '''
        # If a center of mass far away from the previous one was found...
        if dist(massCenter,oldMassCenter) > MAX_PIXEL_MOVE:
            print 'Detected sudden change!'
            # do clustering
            clusters = cluster(trackIndices,2)
            trackIndices = np.array([])
            for clus in clusters:
                mean = np.round(np.mean(clus,axis=0))
                mean = (mean[1],mean[0])
                #print 'mean: ',mean
                #print 'oldMassCenter: ',oldMassCenter
                # If the mean of a cluster is close to the previous image, use that
                if dist(mean,oldMassCenter) <= MAX_PIXEL_MOVE:
                    #print 'Cluster ',mean,' is close to ',oldMassCenter
                    massCenter = mean
                    trackIndices = clus
        '''
        
        if len(trackIndices) > 0:
            u = trackIndices[:,0]
            v = trackIndices[:,1] 
            bBox =[(np.min(u),np.min(v)),(np.max(u),np.max(v))]


# Displays the depth image
def display_depth(dev, data, timestamp):
    global trackIndices
    global bBox,buffer,classification
    data -= np.min(data.ravel())
    data *= 65536 / np.max(data.ravel())

    if time.time() - start_time > startup_time:
        track(data.copy())
    else:
        return
    
    if len(trackIndices) > 0:
        binImageA = np.zeros((bBox[1][0]-bBox[0][0] + 5, bBox[1][1]-bBox[0][1]+5))
        for (i,j) in trackIndices:
            binImageA[i-bBox[0][0]+2][j-bBox[0][1]+2] = 1

        #buffer.append(binImageA)
        classification = classify(binImageA,5)

        binImageA = binImageA.astype('uint8')
        binImageA = binImageA * 20000
        image2 = cv.CreateImageHeader((binImageA.shape[1], binImageA.shape[0]),
                                 cv.IPL_DEPTH_8U,
                                 1)
        cv.SetData(image2, binImageA.tostring(),binImageA.dtype.itemsize * binImageA.shape[1])
        cv.ShowImage('bboxA',image2)
        cv.WaitKey(30)
    
    #points = ck.projectPoints(data[v,u],u,v)
    #_,points = ck.depth2xyzuv(data[v,u],u,v)
    #trackIndices = []
    #for point in points:
    #    trackIndices.append([point[0],point[1]])

    image = cv.CreateImageHeader((data.shape[1], data.shape[0]),cv.IPL_DEPTH_16U,1)
    cv.SetData(image, data.tostring(),data.dtype.itemsize * data.shape[1])
    
    cv.ShowImage('Depth', image)
    cv.WaitKey(3)
    


# Displays the tracker image (copy of RGB image with some pixels modified)
def display_tracker(dev,data,timestamp):
    global trackIndices, handA, handB
    #u = trackIndices[:,0] 
    #v = trackIndices[:,1] 

    #print '184 ', trackIndices 
    if len(trackIndices) > 0:
        u = trackIndices[:,1] 
        v = trackIndices[:,0] 
        bBox =[(np.min(u),np.min(v)),(np.max(u),np.max(v))]
    image = cv.CreateImageHeader((data.shape[1], data.shape[0]),
                                 cv.IPL_DEPTH_8U,
                                 3)
    # Note: We swap from RGB to BGR here
    cv.SetData(image, data[:, :, ::-1].tostring(),
               data.dtype.itemsize * 3 * data.shape[1])

    #print '196 ', trackIndices 
    if len(trackIndices) > 0:
        if isTrackingHand:
            cv.Rectangle(image,bBox[1],bBox[0],cv.CV_RGB(0,255,0))
            cv.Circle(image,(int(massCenter[0]),int(massCenter[1])),4,(0,255,0),thickness=-1)
            put_text(image,classification,bBox[1],col=(0,255,0))
        else:
            cv.Rectangle(image,bBox[1],bBox[0],cv.CV_RGB(255,0,0))
            cv.Circle(image,(int(massCenter[0]),int(massCenter[1])),4,(0,0,255),thickness=-1)
            put_text(image,classification,bBox[1])
    
    put_text(image,str(round(time.time()-start_time)),(20,20))
    
    # Morphological close
    #element = cv.CreateStructuringElementEx(5,5,3,3,cv.CV_SHAPE_ELLIPSE)
    #cv.MorphologyEx(image,image,None,element,cv.CV_MOP_CLOSE)
    cv.ShowImage('Tracker', image)
    cv.WaitKey(3)


def put_text(image,text,pos,col=(0,0,255)):
    cv.PutText(image,text,pos,cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX,0.5,0.5),col)

# Displays the normal RGB camera image
def display_rgb(dev, data, timestamp):
    image = cv.CreateImageHeader((data.shape[1], data.shape[0]),
                                 cv.IPL_DEPTH_8U,
                                 3)
    # Note: We swap from RGB to BGR here
    cv.SetData(image, data[:, :, ::-1].tostring(),
               data.dtype.itemsize * 3 * data.shape[1])
    cv.ShowImage('RGB', image)
    cv.WaitKey(3)
    display_tracker(dev,data.copy(),timestamp)

# Misc operations that are called by the kinect
def body(dev, ctx):
    name = sys.argv[1]
    if len(buffer)>50:
        filename = 'data/' + name + '.features'
        
        if os.path.isfile(filename):
            file = open(filename,'r')
            fv = cPickle.load(file)
            file.close()
        else:
            fv = []
        
        file = open(filename,'w')
        for frame in buffer:
            fv.append(features.getFeatures(frame.astype('uint8'),name))
        cPickle.dump(fv,file)
        file.close()

        #infoFile = open('gestures.info','rw')
        #infoFile.write(name+""" = 'data/""" +name+'.features'+"""'""")
        #infoFile.write("")

        print('50 frames saved, killing')
        raise freenect.Kill
    return

def body2(dev, ctx):
    return


# Find index closest to index idx of array with length lenB in an array of length lenA
def mapIndex(lenA, lenB, idx):
    ratio = float(lenA)/lenB
    return int(round(ratio*idx))

import select
import sys

# Read data from file
if mode == 'passive':
    loadGestures()
    dData = cPickle.load(open(pathPrefix + 'fingers_spread_depth.data'))
    iData = cPickle.load(open(pathPrefix + 'fingers_spread_image.data'))
    start_time = time.time()
    for i in range(len(dData)):
        frame = dData[i]
        #frame = rescale.frame(frame)
        display_depth(None, frame, None)
        
        frame = iData[mapIndex(len(iData),len(dData),i)]
        display_tracker(None, frame, None)

# Use kinect as source (kills after 60 seconds)
elif mode == 'async':
    loadGestures()
    start_time = time.time()
    #try:
    freenect.runloop(depth=display_depth, video=display_tracker, body=body2)
    #except TypeError:
    #    print 'Oh God'

else:
    loadGestures()
    start_time = time.time()
    while(True):
        display_depth(None,freenect.sync_get_depth()[0],None)
        display_tracker(None,freenect.sync_get_video()[0],None)
        i,o,e = select.select([sys.stdin],[],[],0.0001)
        for s in i:
            if s == sys.stdin:
                input = sys.stdin.readline()
                raise freenect.Kill

