import cv, sys, scipy.ndimage, numpy, scipy.stats.mstats, orange, selv1
import time
'''
WARNINGS:
    1. You may get better results if you wear clothes with high contrast
    and turn off auto-gain on your webcam. 
'''
#Segmentation ideas:
#    Histogram based background segmentation. 

train_data = orange.ExampleTable("../train")
knn = orange.kNNLearner()
knn.k = 5
#knn.distanceConstructor = orange.ExamplesDistanceConstructor_Relief()
knn.distanceConstructor = orange.ExamplesDistanceConstructor_Euclidean()
knn = knn(train_data)

DEBUG = True
#Governs latency of how long a user has to stand 
#still before we notice take their picture. If its too high, we can 
#confuse little jitters as motions
VARIANCE_ALPHA = .85
#Another parameter (in ms)
FRAME_PERIOD = 10
#What little jitter isn't counted as movement? Counted in iterations, 
#totally experimental, but a rough function of FRAME_PERIOD
#MIN_MOVEMENT_TIME_CNT = 15
MIN_MOVEMENT_TIME_CNT = 10
COLLECT_SAMPLES = False

selv1.openHomePage()

def mycapture(img):
    print "Capturing image, enter name"
    base_name = sys.stdin.readline().strip()
    name = "../data/" + base_name + ".png"
    if len(base_name) > 0:
        print name
        cv.SaveImage(name, img)
    else:
        print "File name too short, no print"
        
def secondarySegment(img):
    labelArr, labels = scipy.ndimage.label(numpy.asarray(cv.GetMat(img)))
    
    cnts = numpy.bincount(labelArr.flatten())
    
    #First destroy the background frequency
    if cnts[0] > 1000: 
        cnts[0] = -1
    else:
        return img #If the background isn't the largest element, all bets are off
    biggest = 0
    for i in range(len(cnts)):
        #print cnts[i]
        if cnts[i] > cnts[biggest]:
            biggest = i
    #print ""
    #print labels
    #print cnts
    #print biggest
            
    labelArr = scipy.stats.mstats.threshold(labelArr,biggest,biggest)
    
    img = cv.GetImage(cv.fromarray(labelArr))
    scaledImg = cv.CreateImage(cv.GetSize(img),cv.IPL_DEPTH_8U,1)
    cv.Convert(img,scaledImg)
    cv.Threshold(scaledImg,scaledImg,0,256,cv.CV_THRESH_BINARY)
    #cv.ShowImage("Segmented difference", scaledImg)
    #cv.WaitKey(1000000)
    return scaledImg

def extractFeatureVec(img):  
    img = secondarySegment(img)  
    if DEBUG:
        print "Extracting feature vector"
    
    moments = cv.Moments(cv.GetMat(img))
    featureVec = []
    featureVec.append(cv.GetNormalizedCentralMoment(moments, 1, 1))
    featureVec.append(cv.GetNormalizedCentralMoment(moments, 0, 2))
    featureVec.append(cv.GetNormalizedCentralMoment(moments, 2, 0))
    featureVec.append(cv.GetNormalizedCentralMoment(moments, 1, 2))
    featureVec.append(cv.GetNormalizedCentralMoment(moments, 2, 1))
    featureVec.append(cv.GetNormalizedCentralMoment(moments, 0, 3))
    featureVec.append(cv.GetNormalizedCentralMoment(moments, 3, 0))
    featureVec.append('lickStamp')
    x = orange.Example(train_data[0].domain, featureVec)

    c = knn(x)
    print c

    #selv1.openHomePage()
    selv1.doAction(str(c))
    
    if COLLECT_SAMPLES:
        mycapture(img)

def convertColor(img):
    c1 = cv.CreateImage(cv.GetSize(img), 8, 1)
    cv.CvtColor(img,img,cv.CV_RGB2YCrCb)
    cv.SetImageCOI(img, 1)
    cv.Copy(img,c1)
    return c1

capture = cv.CaptureFromCAM(0)
origImg = cv.QueryFrame(capture)
cv.Smooth(origImg, origImg, cv.CV_GAUSSIAN, 7)
origImg = convertColor(origImg)
diffImg = cv.CloneImage(origImg)
movingDiffAvg = cv.CreateImage(cv.GetSize(diffImg), cv.IPL_DEPTH_32F, 1)
iterCount = 0
noMovement = 0
lastNotMovingIter = 0
while True:
    currImg = cv.QueryFrame(capture)
    cv.Smooth(currImg,currImg, cv.CV_GAUSSIAN, 7)
    currImg = convertColor(currImg)
    
    cv.AbsDiff(currImg,origImg,diffImg)
    #cv.CalcHist(currImg)
    #if DEBUG:
    #    cv.ShowImage("Difference", diffImg)
    t = max(cv.Avg(diffImg)[0],15)
    cv.Threshold(diffImg,diffImg,t,256,cv.CV_THRESH_BINARY)
    numpy.asarray(diffImg)
    
    if DEBUG:
        cv.ShowImage("Segmented difference", diffImg)
    
    cv.RunningAvg(diffImg,movingDiffAvg, .25)
    
    variance =  cv.CreateImage(cv.GetSize(diffImg), cv.IPL_DEPTH_32F, 1)
    cv.ConvertScale(diffImg, variance, 1.0, 0)
    cv.Sub(movingDiffAvg,variance,variance)
    cv.Erode(variance,variance,None,10)
    cv.Dilate(variance,variance,None,10)
    
    #if DEBUG:
    #    cv.ShowImage("Moving Average difference", variance)
    
    threshVar = cv.CreateImage(cv.GetSize(variance), 8, 1)
    cv.ConvertScale(variance, threshVar, 1.0, 0)
    cv.AdaptiveThreshold(threshVar,threshVar,256)
    movementAmt = cv.Sum(threshVar)[0]
    
    #I really started guessing here, and lost a lot of robustness I'm sure
    if movementAmt < noMovement + 5 and movementAmt > noMovement - 5:
        if iterCount - lastNotMovingIter > MIN_MOVEMENT_TIME_CNT:
            extractFeatureVec(diffImg)
        #this is totally exposed to wrap-around error
        lastNotMovingIter = iterCount
        
    if iterCount == 0:
        noMovement = movementAmt
    
    if cv.WaitKey(FRAME_PERIOD) == 27:
        break
    iterCount = (iterCount + 1)