'''
Created on Oct 12, 2012
this is a threading test module
@author: issa
'''
import numpy as np
import cv2
import time
from multiprocessing.pool import ThreadPool
from collections import deque

from track import Tracker
from common import clock, draw_str, StatValue , anorm
from functools import partial
import video
import balancer

FLANN_INDEX_KDTREE = 1  # bug: flann enums are missing
flann_params = dict(algorithm = FLANN_INDEX_KDTREE,
                    trees = 4)
            

class DummyTask:
    def __init__(self, data):
        self.data = data
    def ready(self):
        return True
    def get(self):
        return self.data
    
def process_frame(img3, t0 , kp1 , source):
        # some intensive computation...
    if source[1] ==0:
        return img3 , t0 , source[0]
    
    img4 = cv2.blur(img3, (7,7))
    kp2 = surf_det.detect(img4)
    kp2, desc2 = surf_ext.compute(img4, kp2)
    t = Tracker(kp1 , desc1 , kp2 , desc2 , 0.55)
    res = t.match_flann()
    i = 0
    
    xf = 0
    yf = 0
    if res is not None and len(res) > 20 :
        for p in res:
            i += 1
            x, y = np.int32(kp2[p[1]].pt)
            xf +=x
            yf +=y
        xf /= len(res)
        yf /= len(res)
        r = int(p.size)+10
        cv2.circle(img3, (xf, yf), r, (0, 255, 0) , thickness=-1)   
        print "Source" , source[0],"matched" ,  i 

    return img3, t0 , source[0]
    
if __name__ == '__main__':
    #create camera captures we have two: 0 and 1
    cap = []
    cap.append(video.create_capture(1))
    cap.append(video.create_capture(2))   

    cap[0].set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 640)
    cap[0].set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 480)

    
    cap[1].set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 640)
    cap[1].set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 480)

    #create the load balancer
    balancerModule = balancer.Balancer(2)
    #allocate thread pool
    threadn = cv2.getNumberOfCPUs()
    pool = ThreadPool(processes = threadn)
    pending = deque()
    threaded_mode = True

    latency = StatValue()
    frame_interval = StatValue()
    last_frame_time = clock()
    
    #create SURF detector and extractor to be used with target image once
    surf_det = cv2.FeatureDetector_create("SURF")
    surf_ext = cv2.DescriptorExtractor_create("SURF")
    
    #load and blur target image
    fn1 = 'issa.png' 
    im1 = cv2.imread(fn1, 0)
    im1 = cv2.blur(im1, (13,13)) # determine the strength of features to be used (bigger kernel means only strong features)
    im1 = cv2.flip(im1, 2)
    
    #extract descriptor points from target image
    kp1 = surf_det.detect(im1)
    kp1, desc1 = surf_ext.compute(im1, kp1)

    while True:
        while len(pending) > 0 and pending[0].ready():
            res, t0  , source = pending.popleft().get()
            latency.update(clock() - t0)
            draw_str(res, (20, 20), "threaded      :  " + str(threaded_mode))
            draw_str(res, (20, 40), "latency        :  %.1f ms" % (latency.value*1000))
            draw_str(res, (20, 60), "frame interval :  %.1f ms" % (frame_interval.value*1000))
            cv2.imshow('capture %d' % source, res)
            
        if len(pending) < threadn:

            ret, frame = cap[balancerModule.getCamSource()[0]].read()
            t = clock()
            #perform a horizontal flip on frame for convenience
            frame = cv2.flip(frame,2) 
            
            frame_interval.update(t - last_frame_time)
            last_frame_time = t
            if threaded_mode:
                task = pool.apply_async(process_frame, (frame.copy(), t , kp1 , balancerModule.getCamSource()))
                balancerModule.updateFeed()
            else:
                task = DummyTask(process_frame(frame, t , kp1))
            pending.append(task)
        ch = 0xFF & cv2.waitKey(1)
        if ch == ord(' '):
            threaded_mode = not threaded_mode
        if ch == 27:
            break
cv2.destroyAllWindows()


            
    
