import cv
import numpy as np
import cPickle
import freenect
import time
import calibkinect as ck
import moments
import math
import features
import os.path
import os
import select
import sys

class KinectTracker:

    def __init__(self,arg1=True):
        # initialize display windows
        #cv.NamedWindow('RGB')

        # Prefix for path to data
        self.pathPrefix = '/media/disk/'

        # Initialize time limit
        self.start_time = 0

        # Time before tracking starts
        self.startup_time = 5

        # Store the indices of the pixels found by the tracker
        self.trackIndices = []

        # Center of mass
        self.massCenter = (0,0)
        self.avgDepth = 0

        # Bounding box
        self.bBox = [(0,0),(1,1)]

        # Classification
        self.classification = 'None'

        # Gesture dict
        self.gestures = {}

        # Indicates whether or not a hand is probably being tracked
        self.isTrackingHand = False

        # Dictionary storing the mean feature for each gesture
        self.gestureMeans = {}

        # Whether or not to render depth image
        self.render_depth = arg1

        if self.render_depth:
            cv.NamedWindow('Depth')
            cv.MoveWindow('Depth',640,50)
            cv.NamedWindow('bboxA')
            cv.MoveWindow('bboxA',0,50+480)

        # Whether or not to render tracker image
        self.render_tracker = True
        if self.render_tracker:
            cv.NamedWindow('Tracker')
            cv.MoveWindow('Tracker',640,50+480)

        self.loadGestures()
        self.start_time = time.time()

    # loads the gesture feature files
    def loadGestures(self):
        for g in os.listdir('data/'):
            n = g.split('.')[0]
            if len(n) == 0:
                continue
            self.gestures[n] = cPickle.load(open('data/'+ g))[50:61]
        self.meanFeature(self.gestures)

    # Calculates the mean feature vector for gesture
    def meanFeature(self,gestures):
        for key in gestures:
            fs = []
            for t2 in gestures[key]:
                fs.append(t2[0])
            fs = np.array(fs)
            self.gestureMeans[key] = np.mean(fs,axis=0)

    # Euclidean distance between 2-tuples
    def dist(self,tupleA,tupleB):
        return math.sqrt((tupleA[0]-tupleB[0])**2 + (tupleA[1]-tupleB[1])**2)

    # Clustering wrapper for openCV cluster algorithm
    def cluster(self,trackIndices,k):
        tmp = []
        for chnk in trackIndices:
            tmp.append(chnk)
        trackIndices = np.array(tmp)
        # Generate samples for clustering
        samples = cv.fromarray(trackIndices.astype('float32'))

        # Generate matrix of labels for clustering
        labels = cv.CreateMat(1,len(trackIndices),cv.CV_32S)
        cv.Set(labels,0.0)

        # Do 2-means clustering
        cv.KMeans2(samples,k,labels,(cv.CV_TERMCRIT_ITER, 5, 0))

        # Convert the labels back to a numpy array (CvMatrices are an impossible format)
        labels = np.asarray(labels)[0]

        clusters = []
        for i in range(k):
            clusters.append(trackIndices[labels == float(i)])

        return clusters

    # Calculates the center of mass for a binary image
    def centerOfMass(self,frame):
        af = np.sum(frame)
        xs = np.size(frame,1)
        ys = np.size(frame,0)
        xi = np.repeat([np.arange(xs)],ys,axis=0)
        yi = np.repeat([np.arange(ys)],xs,axis=0).transpose()
        xc = np.sum(xi*frame)/af
        yc = np.sum(yi*frame)/af
        return (xc,yc)

    # Thresholds an image for isovalue iso, with tolerance tol
    # Return indices of pixels in range
    def threshold(self,f,iso,tol):
        indices = np.argwhere(np.abs(f-iso) < tol)
        return indices

    # Same as above, but returns binary thresholded image instead of indices
    def threshold_bin(self,f,iso,tol):
        a = np.zeros(f.shape)
        a[np.abs(f - iso) < tol] = 1
        return a

    # Finds the most likely classification given the recorded known gestures
    # KNN Classifier
    def classify(self,frame,k):
        t1 = features.getFeatures(frame.astype('uint8'),'Unknown')
        minD = 99999
        maxKey = "None"
        a = (1,1,1)
        candidates = []

        # If the frame is not remotely handshaped, classify as noise
        if (frame.shape[0] * frame.shape[1] > 200*200) or (frame.shape[0] > 200) or (frame.shape[1] > 200):
            self.isTrackingHand = False
            return "Noise"
       
        # Look if the window looks somewhat like a known gesture
        self.isTrackingHand = False
        for key in self.gestureMeans:
            if features.dist(t1,(self.gestureMeans[key],(),(),'thing'),(1,0,0)) < 0.5:
                self.isTrackingHand = True

        # KNN classification
        for key in self.gestures:
            for t2 in self.gestures[key]:
                t2Dist = features.dist(t1,t2,a)
                candidates.append((t2Dist,t2[3]))
        candidates.sort()
        candidates = candidates[:k]

        maxDict = {}
        for (_,c) in candidates:
            if not c in maxDict:
                maxDict[c] = 1
            else:
                maxDict[c] = maxDict[c] + 1

        maxKey, maxValue = '',0
        for key in maxDict:
            if maxDict[key] > maxValue:
                maxValue = maxDict[key]
                maxKey = key
                
        return maxKey
        

    # Does tracking in the depth image
    def track(self, data):
        MAX_PIXEL_MOVE = 40
        
        # Find indices to track
        if len(self.trackIndices) == 0:
            binImage = self.threshold_bin(data,np.min(data),800)
            self.trackIndices = np.argwhere(binImage)
            self.massCenter = self.centerOfMass(binImage)
           
            # Compute bounding box
            u = self.trackIndices[:,0]
            v = self.trackIndices[:,1] 
            self.bBox =[(np.min(u),np.min(v)),(np.max(u),np.max(v))]
            self.avgDepth = np.min(data)
            #imgBox = data[self.bBox[0][0]:self.bBox[1][0],self.bBox[0][1]:self.bBox[1][1]]
            print 'initial segmentation'


        else:
            # Create a window in which we look for the new hand position
            window = (max(self.bBox[0][0]-MAX_PIXEL_MOVE,0),min(self.bBox[1][0]+1+MAX_PIXEL_MOVE,data.shape[0]-1),\
                     max(self.bBox[0][1]-MAX_PIXEL_MOVE,0),min(self.bBox[1][1]+1+MAX_PIXEL_MOVE,data.shape[1]-1))
            
            # imgBox contains the data in the current frame where the hand was last seen + bounding pixels
            imgBox = data[window[0]:window[1],window[2]:window[3]]
                   
            # Display the tracking window...
            #imageR = cv.CreateImageHeader((imgBox.shape[1], imgBox.shape[0]),cv.IPL_DEPTH_16U,1)
            #cv.SetData(imageR, imgBox.tostring(),imgBox.dtype.itemsize * imgBox.shape[1])
        
            #cv.ShowImage('ImageR', imageR)
            #cv.WaitKey(3)

            binImage = self.threshold_bin(imgBox,np.min(imgBox),800)
            tmpDepth = (np.mean(imgBox[np.nonzero(binImage)])/self.f2)+self.f1
            tmpDepth = ck.depth2xyzuv(np.array([tmpDepth]),np.array([0]),np.array([0]))[0][0][2]

            self.avgDepth = tmpDepth * -100
            self.trackIndices = np.argwhere(binImage)
            self.trackIndices += (window[0],window[2])

            self.massCenter = self.centerOfMass(binImage)
            self.massCenter = (self.massCenter[0]+window[2],self.massCenter[1]+window[0])

            if len(self.trackIndices) > 0:
                u = self.trackIndices[:,0]
                v = self.trackIndices[:,1] 
                #print 'old bBox: ',self.bBox
                self.bBox = [(np.min(u),np.min(v)),(np.max(u),np.max(v))]
            else:
                print 'lost hand!'
                #print 'new bBox: ',self.bBox


    # Displays the depth image
    def display_depth(self, dev, data, timestamp):
        self.f1 = np.min(data)
        self.f2 = 65536 / np.max(data)
        data -= self.f1
        data *= self.f2

        if time.time() - self.start_time > self.startup_time:
           # self.track(data.copy())
           self.track(data)
        else:
            return
        
        if len(self.trackIndices) > 0:
            binImageA = np.zeros((self.bBox[1][0]-self.bBox[0][0] + 5, self.bBox[1][1]-self.bBox[0][1]+5))
            
            binIndices = self.trackIndices - (self.bBox[0][0]+2,self.bBox[0][1]+2)
            binImageA[[binIndices[:,0],binIndices[:,1]]] = 1

            self.classification = self.classify(binImageA,5)

            binImageA = binImageA.astype('uint8')
            binImageA = binImageA * 20000
            image2 = cv.CreateImageHeader((binImageA.shape[1], binImageA.shape[0]),cv.IPL_DEPTH_8U,1)
            cv.SetData(image2, binImageA.tostring(),binImageA.dtype.itemsize * binImageA.shape[1])
            cv.ShowImage('bboxA',image2)
            cv.WaitKey(3)
        

        image = cv.CreateImageHeader((data.shape[1], data.shape[0]),cv.IPL_DEPTH_16U,1)
        cv.SetData(image, data.tostring(),data.dtype.itemsize * data.shape[1])
        
        cv.ShowImage('Depth', image)
        cv.WaitKey(3)
    
    def trackDepthData(self, data):
        self.f1 = np.min(data)
        self.f2 = 65536 / np.max(data)
        data -= self.f1
        data *= self.f2
        #data -= np.min(data.ravel())
        #data *= 65536 / np.max(data.ravel())

        if time.time() - self.start_time > self.startup_time:
           self.track(data)
        else:
            return
        if len(self.trackIndices) > 0:
            binImageA = np.zeros((self.bBox[1][0]-self.bBox[0][0] + 5, self.bBox[1][1]-self.bBox[0][1]+5))
            binIndices = self.trackIndices - (self.bBox[0][0]+2,self.bBox[0][1]+2)
            binImageA[[binIndices[:,0],binIndices[:,1]]] = 1
            self.classification = self.classify(binImageA,5)

        
    # Displays the tracker image (copy of RGB image with some pixels modified)
    def display_tracker(self, dev,data,timestamp):
        #if len(self.trackIndices) > 0:
        #    u = self.trackIndices[:,1] 
        #    v = self.trackIndices[:,0] 
        #    self.bBox =[(np.min(u),np.min(v)),(np.max(u),np.max(v))]
        image = cv.CreateImageHeader((data.shape[1], data.shape[0]), cv.IPL_DEPTH_8U,3)
        # Note: We swap from RGB to BGR here
        cv.SetData(image, data[:, :, ::-1].tostring(), data.dtype.itemsize * 3 * data.shape[1])
        

        if len(self.trackIndices) > 0 and not self.classification == 'Noise':
            if self.isTrackingHand:
                cv.Rectangle(image,(self.bBox[1][1],self.bBox[1][0]),(self.bBox[0][1],self.bBox[0][0]),cv.CV_RGB(0,255,0))
                cv.Circle(image,(int(self.massCenter[0]),int(self.massCenter[1])),4,(0,255,0),thickness=-1)
                self.put_text(image,self.classification,(self.bBox[1][1],self.bBox[1][0]),col=(0,255,0))
            else:
                cv.Rectangle(image,(self.bBox[1][1],self.bBox[1][0]),(self.bBox[0][1],self.bBox[0][0]),cv.CV_RGB(255,0,0))
                cv.Circle(image,(int(self.massCenter[0]),int(self.massCenter[1])),4,(0,0,255),thickness=-1)
                self.put_text(image,self.classification,(self.bBox[1][1],self.bBox[1][0]))
        
        self.put_text(image,str(round(time.time()-self.start_time)),(20,20))
        
        cv.ShowImage('Tracker', image)
        cv.WaitKey(3)

    # Wrapper for placing text on the screen
    def put_text(self,image,text,pos,col=(0,0,255)):
        cv.PutText(image,text,pos,cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX,0.5,0.5),col)

    # Displays the normal RGB camera image
    def display_rgb(self,dev, data, timestamp):
        image = cv.CreateImageHeader((data.shape[1], data.shape[0]),cv.IPL_DEPTH_8U, 3)
        # Note: We swap from RGB to BGR here
        cv.SetData(image, data[:, :, ::-1].tostring(), data.dtype.itemsize * 3 * data.shape[1])
        cv.ShowImage('RGB', image)
        cv.WaitKey(3)
        self.display_tracker(dev,data.copy(),timestamp)

    # Misc operations that are called by the kinect
    def body(self,dev, ctx):
        name = sys.argv[1]
        if len(self.buffer)>50:
            filename = 'data/' + name + '.features'
            
            if os.path.isfile(filename):
                file = open(filename,'r')
                fv = cPickle.load(file)
                file.close()
            else:
                fv = []
            
            file = open(filename,'w')
            for frame in buffer:
                fv.append(features.getFeatures(frame.astype('uint8'),name))
            cPickle.dump(fv,file)
            file.close()

            #infoFile = open('gestures.info','rw')
            #infoFile.write(name+""" = 'data/""" +name+'.features'+"""'""")
            #infoFile.write("")

            print('50 frames saved, killing')
            raise freenect.Kill
        return

    def body2(self, dev, ctx):
        return


    # Find index closest to index idx of array with length lenB in an array of length lenA
    def mapIndex(self,lenA, lenB, idx):
        ratio = float(lenA)/lenB
        return int(round(ratio*idx))

    def step(self):
        if not self.render_depth:
            self.trackDepthData(freenect.sync_get_depth()[0])
        else:
            self.display_depth(None,freenect.sync_get_depth()[0],None)
        if self.render_tracker:
            self.display_tracker(None,freenect.sync_get_video()[0],None)
        return ((self.massCenter[0],self.massCenter[1],self.avgDepth),self.classification)

    def runPassive(self):
        self.loadGestures()
        dData = cPickle.load(open(pathPrefix + 'fingers_spread_depth.data'))
        iData = cPickle.load(open(pathPrefix + 'fingers_spread_image.data'))
        self.start_time = time.time()
        for i in range(len(dData)):
            frame = dData[i]
            #frame = rescale.frame(frame)
            self.display_depth(None, frame, None)
            frame = iData[mapIndex(len(iData),len(dData),i)]
            self.display_tracker(None, frame, None)

    # Use kinect as source (kills after 60 seconds)
    def runAsync(self):
        self.loadGestures()
        self.start_time = time.time()
        freenect.runloop(depth=self.display_depth, video=self.display_tracker, body=self.body2)

    # Use synced function
    # Seems unstable
    def runSync(self):
        self.loadGestures()
        self.start_time = time.time()
        while(True):
            self.display_depth(None,freenect.sync_get_depth()[0],None)
            self.display_tracker(None,freenect.sync_get_video()[0],None)
            i,o,e = select.select([sys.stdin],[],[],0.0001)
            for s in i:
                if s == sys.stdin:
                    input = sys.stdin.readline()
                    raise freenect.Kill

if __name__ == '__main__':
    tracker = KinectTracker()
    tracker.runAsync()
