"""Module responsible for depth image processing"""
from PIL import Image, ImageQt, ImageDraw
from numpy import clip, min, max, require, floor, array, uint8, histogram, zeros, argmax, hstack
from re import findall
import scipy.io as sio
from scipy.ndimage import center_of_mass
from scipy.ndimage.measurements import label, find_objects, sum
import params
import numpy
import _cam as cam
from PyQt4 import QtGui, QtCore
from time import clock, sleep
from multiprocessing import Process, Queue

class ImageProcessor():
    def __init__(self):
        self.depthImage = None
        self.RGBImage = None
        self.leftHandStatus = None
        self.rightHandStatus = None
        self.runs = 0
        self.initial = True
        self.handsTracked = False
        self.time = clock() #for FPS counting purposes
             
        #prepare gaussian type mask
        self.weights = createGaussianMask(100, 100)
#        
#        self.imQL = Queue(1)
#        self.posQL = Queue(1)
#        self.modelQL = Queue(1)
#        
#        self.imQR = Queue(1)
#        self.posQR = Queue(1)
#        self.modelQR = Queue(1)
#        
#        self.procL = Process(target = trackHandParallel, args = (self.imQL, self.posQL, self.modelQL))
#        self.procL.start()
#        
#        self.procR = Process(target = trackHandParallel, args = (self.imQR, self.posQR, self.modelQR))
#        self.procR.start()
         
    def cleanup(self):
        """Terminate additional processes"""
#        self.procL.terminate()
#        self.procR.terminate()      
        pass
        
    def reinit(self):     
        #set initial phase (calibration) and reset FPS counter every 100 cycles
        self.prevDepthImage = self.depthImage
        self.runs += 1
        if self.runs > params.INITIAL_FRAMES:
            if self.initial == True:
                self.initial = False
            self.runs = 0
            self.time = clock()
            
        self.handsTracked = False
    
    def processImage(self):
        if self.initial:
            #create first template frame of both hands
            rightHandIm = self.depthImage[200:300][:, 220:320]           
            leftHandIm = self.depthImage[200:300][:, 370:470]
            self.leftHist = getBDS(leftHandIm)     
            self.rightHist = getBDS(rightHandIm) 

            self.rightPos = [200, 300, 220, 320]
            self.leftPos = [200, 300, 370, 470]
        else:
            #hand tracking
#            self.rightPos, self.rightHist = self.trackHandSerial(self.rightPos, self.rightHist)
            self.leftPos, self.leftHist = self.trackHandSerial(self.leftPos, self.leftHist)
            
#            #right
#            self.imQR.put(self.depthImage)
#            self.posQR.put(self.leftPos)
#            self.modelQR.put(self.leftHist)
#            
#            #left
#            self.imQL.put(self.depthImage)
#            self.posQL.put(self.rightPos)
#            self.modelQL.put(self.rightHist)
#                   
#            #get data
#            self.rightPos = self.posQR.get() 
#            self.rightHist = self.modelQR.get()
#            self.leftPos = self.posQL.get() 
#            self.leftHist = self.modelQL.get()

            #self.handsTracked= True
                      
    def trackHandSerial(self, pos, model):
        """Track single hand using Bi-Dimensional-Sum and Bhattacharyya Coefficient - non multiprocessor version"""
#        
#        posOld = pos
#       
     
        maxMove = 80             
        offset1 = array([[0, -maxMove, -maxMove, 0, maxMove, maxMove, maxMove, 0, -maxMove],
                        [0, 0, maxMove, maxMove, maxMove, 0, -maxMove, -maxMove, -maxMove]],
                        dtype = int)
        maxMove = 40             
        offset2 = array([[0, -maxMove, -maxMove, 0, maxMove, maxMove, maxMove, 0, -maxMove],
                        [0, 0, maxMove, maxMove, maxMove, 0, -maxMove, -maxMove, -maxMove]],
                        dtype = int)
        maxMove = 20            
        offset3 = array([[0, -maxMove, -maxMove, 0, maxMove, maxMove, maxMove, 0, -maxMove],
                        [0, 0, maxMove, maxMove, maxMove, 0, -maxMove, -maxMove, -maxMove]],
                        dtype = int)
        offset = hstack((offset1, offset2, offset3))
        
        while True:
            bhCoeff = []
            hist = []
            for i in range(offset.shape[1]):
                coord = [pos[0] + offset[0, i], pos[1] + offset[0, i],
                         pos[2] + offset[1, i], pos[3] + offset[1, i]]
                if coord[0] < 0 or coord[2] < 0 or coord[1] > 480 or coord[3] > 640:
                    hist.append(None)
                    bhCoeff.append(0)
                    continue
                candidate = (self.depthImage[pos[0] + offset[0, i]:pos[1] + offset[0, i]]
                                  [:, pos[2] + offset[1, i]:pos[3] + offset[1, i]])
                hist.append(getBDS(candidate))
                bhCoeff.append(computeBhCoeff(model, hist[i]))
                
            mov = argmax(bhCoeff)             
            if mov == 0:
                if max(offset) > 1:
                    offset = offset // 2
                else:
                    break
            pos = [pos[0] + offset[0, mov], pos[1] + offset[0, mov],
                            pos[2] + offset[1, mov], pos[3] + offset[1, mov]]
               
#        candIm = (self.depthImage[pos[0]:pos[1]:, pos[2]:pos[3]])  
#        modelIm = (self.prevDepthImage[posOld[0]:posOld[1]:, posOld[2]:posOld[3]])  
#        
#        sio.savemat("klatka" + str(self.runs) + '.mat', {'img':self.depthImage, 'model':modelIm, 'cand':candIm,
#                                                         'bhcoeff':bhCoeff[mov] }, oned_as = 'row')     
#        model = hist[mov]
        return pos, model

    def getDepthImage(self):   
        """Returns depth image with first plane objects only as numpy array""" 
        self.depthImage = cam.getDepthImageRaw() 
        b = 1085 - 32573 / params.MAX_DISTANCE
        self.depthImage = self.depthImage * (self.depthImage < b)
        self.depthImage = 100 / (-0.00307 * self.depthImage + 3.33)
#        lastMinRow = max(self.depthImage[479])
#        self.depthImage = self.depthImage * (self.depthImage < lastMinRow + 10)
        #weave.blitz("depthImage = 100 / ( -0.00307 * depthImage + 3.33 )") // if weave will be python3 compatible      
        return self.depthImage
      
    def getObjectMap(self):
        """Returns numpy array with labeled objects -> 4-connected"""
        [self.objectMap, self.numObjects] = label(self.depthImage, array([[ True, True, True], [ True, True, True], [ True, True, True]], dtype = bool))
        return self.objectMap, self.numObjects
    
    def getSeparatedHandsParams(self):
        """
        Processes object map and computes several parameters for both hands
        leftHandStatus[] // rightHandStatus[] with fields:
            hand_center
            hand_position
            hand_area
            hand_distance
            
        """  
        self.leftHandStatus = {}
        self.rightHandStatus = {}       
        
        #separated two biggest objects (expecting hands)
        
        objectsSilhouette = self.objectMap > 0        
        objectsArea = sum(objectsSilhouette, self.objectMap, range(self.numObjects + 1))
        objectsArea = [(objectsArea[i], i) for i in range(self.numObjects + 1)]
        objectsArea.sort(key = lambda id: id[0] , reverse = True)
        
        leftHandId = objectsArea[0][1]
        rightHandId = objectsArea[1][1]
        
        self.separatedLeftHand = self.objectMap == leftHandId
        self.separatedRightHand = self.objectMap == rightHandId
        
        objectsPosition = find_objects(self.objectMap)
        
        self.leftHandStatus["hand_position"] = objectsPosition[leftHandId - 1]
        self.leftHandStatus["hand_position"] = findall('\d+', str(self.leftHandStatus["hand_position"]))
        for i in range(len(self.leftHandStatus["hand_position"])):
            self.leftHandStatus["hand_position"][i] = int(self.leftHandStatus["hand_position"][i])
            
        self.leftHandStatus["hand_area"] = objectsArea[0][0]   
        
        #compute center of mass of each hand and distinguish between left and right
        self.leftHandStatus["hand_center"] = require(floor(center_of_mass(self.separatedLeftHand[objectsPosition[leftHandId - 1]])), dtype = int)
        self.leftHandStatus["hand_center"][0] += self.leftHandStatus["hand_position"][0]
        self.leftHandStatus["hand_center"][1] += self.leftHandStatus["hand_position"][2]
        
        self.leftHandStatus["hand_distance"] = self.depthImage[self.leftHandStatus["hand_center"][0]][self.leftHandStatus["hand_center"][1]]     
    
        self.rightHandStatus["hand_position"] = objectsPosition[rightHandId - 1]
        self.rightHandStatus["hand_position"] = findall('\d+', str(self.rightHandStatus["hand_position"]))
        for i in range(len(self.rightHandStatus["hand_position"])):
            self.rightHandStatus["hand_position"][i] = int(self.rightHandStatus["hand_position"][i]) 
            
        #compute center of mass of each hand and distinguish between left and right
        self.rightHandStatus["hand_center"] = require(floor(center_of_mass(self.separatedRightHand[objectsPosition[rightHandId - 1]])), dtype = int)
        self.rightHandStatus["hand_center"][0] += self.rightHandStatus["hand_position"][0]
        self.rightHandStatus["hand_center"][1] += self.rightHandStatus["hand_position"][2]
     
        self.rightHandStatus["hand_area"] = objectsArea[1][0]    
        self.rightHandStatus["hand_distance"] = self.depthImage[self.rightHandStatus["hand_center"][0]][self.rightHandStatus["hand_center"][1]]                               
        #scipy.io.savemat( "nowy.mat", mdict = {'dataL': separatedLeftHand, 'dataR': separatedRightHand} )
    
        #we have taken two biggest object, here we check if left is really left and switch if necessary
        if self.leftHandStatus["hand_center"][1] < self.rightHandStatus["hand_center"][1]:
            tmp = self.rightHandStatus
            self.rightHandStatus = self.leftHandStatus
            self.leftHandStatus = tmp
    
            tmp2 = array(self.separatedRightHand)
            self.separatedRightHand = self.separatedLeftHand
            self.separatedLeftHand = tmp2          
    
        return self.leftHandStatus, self.rightHandStatus, self.separatedLeftHand, self.separatedRightHand
      
    def getDepthImageWithSelection(self):
        """Returns depth image converted from array to QPixmap object"""    
        maxValue = max(self.depthImage) 
        minValue = min(self.depthImage)
        depthIm = array((self.depthImage - minValue) / maxValue * 255, uint8)     
        img = depth2QImage(depthIm)
        pixmap = QtGui.QPixmap(img)

        pen = QtGui.QPen()
        draw = QtGui.QPainter(pixmap)
        
        if self.initial:
            #draw hud
            
            draw.setRenderHint(QtGui.QPainter.Antialiasing, True)
           
            pen.setColor(QtGui.QColor(92, 178, 244, 255))
            draw.setPen(pen)
            
            #draw text
            font = QtGui.QFont("Segoe UI", 10, 75)
            draw.setFont(font)
            draw.drawText(200, 320, "Place your hands and wait for rectangles to disappear")
            draw.drawText(240, 190, "Right hand")
            draw.drawText(390, 190, "Left hand")
            draw.drawText(200, 340, "Ticks left: " + str(params.INITIAL_FRAMES - self.runs))
            
            #draw rectangles
            pen.setWidth(2)
            pen.setColor(QtGui.QColor(255, 255, 255, 255))
            draw.setPen(pen)
            draw.setBrush(QtGui.QBrush(QtGui.QColor(255, 233, 113, 50)))
            draw.drawRect(220, 200, 100, 100)            
            draw.drawRect(370, 200, 100, 100)
            
            #draw circles      
            pen.setWidth(0)
            pen.setColor(QtGui.QColor(255, 255, 255, 0))
            draw.setPen(pen)
            draw.setBrush(QtGui.QBrush(QtGui.QColor(255, 255, 255, 128)))
            center = QtCore.QPoint(270, 250)
            draw.drawEllipse(center, 6, 6)
            center = QtCore.QPoint(420, 250)
            draw.drawEllipse(center, 6, 6)
        else:
            #mark selected hands
            pen.setWidth(2)
            pen.setColor(QtGui.QColor(255, 255, 255, 255))
            draw.setPen(pen)
            draw.setBrush(QtGui.QBrush(QtGui.QColor(255, 233, 113, 50)))    
                   
            draw.drawRect(self.leftPos[2], self.leftPos[0], 100, 100)
            draw.drawRect(self.rightPos[2], self.rightPos[0], 100, 100)
            
        #draw FPS
        font = QtGui.QFont("Segoe UI", 10, 75)
        draw.setFont(font)
        draw.setPen(QtGui.QPen(QtGui.QColor(255, 20, 20, 255)))
        draw.drawText(570, 470, '{0} {1:.1f} '.format("FPS:", self.runs / (clock() - self.time)))
        
        return pixmap
    
    def getRGBImage(self):
        """Returns colour RGB image in QImage format from array"""
        self.RGBImage = cam.getRGBImageRaw()  
        img = rgb2QImage(self.RGBImage) 
        return img
    
def createGaussianMask(width, height):
    """Create Gaussian type mask for kernel creation"""
    weights = zeros((width, height))
    wCenter = int(width / 2)
    hCenter = int(height / 2)       
    for i in range(width):
        for j in range(height):
                weights[i, j] = (-(wCenter - i) ** 2) - ((hCenter - j) ** 2)
    weights = weights - min(weights)
    weights = weights / max(weights)
    return weights    
    
def rgb2QImage(rgb):
    """Convert the 3D numpy array `rgb` into a 32-bit QImage.  `rgb` must
    have three dimensions with the vertical, horizontal and RGB image axes."""
    
    h, w, channels = rgb.shape
    # Qt expects 32bit BGRA data for color images:
    bgra = numpy.empty((h, w, 4), numpy.uint8, 'C')
    bgra[..., 0] = rgb[..., 2]
    bgra[..., 1] = rgb[..., 1]
    bgra[..., 2] = rgb[..., 0]
    bgra[..., 3].fill(255)
    fmt = QtGui.QImage.Format_RGB32

    result = QtGui.QImage(bgra.data, w, h, fmt)
    result.ndarray = bgra
    return result

def depth2QImage(depth):
    """Convert the depth numpy array into a 32-bit QImage."""  
    h, w = depth.shape

    # Qt expects 32bit BGRA data for color images:
    bgra = numpy.empty((h, w, 4), numpy.uint8, 'C')
    bgra[..., 0] = depth
    bgra[..., 1] = depth
    bgra[..., 2] = depth
    bgra[..., 3].fill(255)
    fmt = QtGui.QImage.Format_RGB32

    result = QtGui.QImage(bgra.data, w, h, fmt)
    result.ndarray = bgra
    return result

def getBDS(image):
    """Compute Bi-Dimensional-Sum aka sum of values across two axis, normalized to sum up to 1.
    This replaces histogram and gives better result for depth image"""   
#    hist = histogram(image, bins = int(params.MAX_DISTANCE / 5), range = (0, params.MAX_DISTANCE), weights = weights)[0]
    bds = hstack((image.sum(axis = 0), image.sum(axis = 1)))
    bds = bds / bds.sum()   
    return bds

def computeBhCoeff(model, candidate):
    """Compute Bhattacharyya Coefficient of two discrete distributions
    result equal to 1 means same pdf function
    result equal to 0 means no similarity"""
    return numpy.sqrt(model * candidate).sum()

def trackHandParallel(imQ, posQ, modelQ):
    """Track single hand using Bi-Dimensional-Sum and Bhattacharyya Coefficient"""
    maxMove = 50             
    offset = array([[0, -maxMove, -maxMove, 0, maxMove, maxMove, maxMove, 0, -maxMove],
                    [0, 0, maxMove, maxMove, maxMove, 0, -maxMove, -maxMove, -maxMove]],
                   dtype = int)
    while True:
        im = imQ.get()
        pos = posQ.get()
        model = modelQ.get()
        while True:
            bhCoeff = []
            hist = []
            for i in range(offset.shape[1]):
                coord = [pos[0] + offset[0, i], pos[1] + offset[0, i],
                         pos[2] + offset[1, i], pos[3] + offset[1, i]]
                if coord[0] < 0 or coord[2] < 0 or coord[1] > 480 or coord[3] > 640:
                    hist.append(None)
                    bhCoeff.append(0)
                    continue
                candidate = (im[pos[0] + offset[0, i]:pos[1] + offset[0, i]]
                                  [:, pos[2] + offset[1, i]:pos[3] + offset[1, i]])
                hist.append(getBDS(candidate))
                bhCoeff.append(computeBhCoeff(model, hist[i]))
                
            mov = argmax(bhCoeff)             
            if mov == 0:
                if max(offset) > 1:
                    offset = offset // 2
                else:
                    break
            pos = [pos[0] + offset[0, mov], pos[1] + offset[0, mov],
                            pos[2] + offset[1, mov], pos[3] + offset[1, mov]]
             
        model = hist[mov]
        
        posQ.put(pos)
        modelQ.put(model)
