from __future__ import division
import nao_app as na
import cv2
from cv2 import cv
import numpy as np
import math


#nao includes
from naoqi import ALProxy
import Image
import vision_definitions

#needed to run continuesly
import time

DEBUG_WEBCAM = 0 #0 to disable
DEBUG_NAO = 1 #0 to disable

#IP = "192.168.10.151"
#PORT = 9559


# 1) get img
# 2) resize 40x40
# 3) edge detection ( x and y )
# 4) dividing matrices x and y get atan (give gradient orientation
# 5) create histogram of 18 bins
def get_converted_img(path, learning):
    # 1) Load an color image in grayscale
    if path == 'nao':
        img = get_nao_image()
    else:
        img = cv2.imread(path,0)


    #blurred = cv2.blur(img, (5,5))

    if learning == 0:
        cv2.imshow("capture", img)
        cv2.waitKey(750)

    # 2) resize
    rsize = cv2.resize(img, (40, 40))

    # 3) edge detection
    #Gradient X
    ddepth = cv2.CV_16S
    scale = 1
    delta = 0

    #only do additional grayscaling if img is retrieved from nao instead of localfs
    if path == 'nao':
        gray = cv2.cvtColor(rsize,cv2.COLOR_BGR2GRAY)
        #Scharr( src_gray, grad_x, ddepth, 1, 0, scale, delta, BORDER_DEFAULT );
        grad_x = cv2.Sobel(gray,ddepth,1,0,ksize = 3, scale = scale, delta = delta,borderType = cv2.BORDER_DEFAULT)
        grad_y = cv2.Sobel(gray,ddepth,0,1,ksize = 3, scale = scale, delta = delta,borderType = cv2.BORDER_DEFAULT)
    else:
        grad_x = cv2.Sobel(rsize,ddepth,1,0,ksize = 3, scale = scale, delta = delta,borderType = cv2.BORDER_DEFAULT)
        grad_y = cv2.Sobel(rsize,ddepth,0,1,ksize = 3, scale = scale, delta = delta,borderType = cv2.BORDER_DEFAULT)

    # 4)convert to vectors using atan
    #a = 0
    #b = 0
    #orientation = {}
    #for x in grad_x:
    #    orientation[a] = {}
    #    for y in grad_x[a]:
    #        orientation[a][b] = cv2.fastAtan2(grad_y[a][b],grad_x[a][b])
    #        b = b + 1
    #    a = a + 1
    #    b = 0
    #print(orientation[1])

    # 4) and 5) convert to vectors, then create histograms (18)
    a = 0
    b = 0
    orientation = {}
    grad_bins = [0] * 18
    for x in grad_x:
        orientation[a] = {}
        for y in grad_x[a]:
            orientation[a][b] = cv2.fastAtan2(grad_y[a][b],grad_x[a][b])
            j_bin = int(orientation[a][b]/20)
            grad_bins[j_bin] = grad_bins[j_bin] + 1
            b = b + 1
        a = a + 1
        b = 0

    if DEBUG_WEBCAM or DEBUG_NAO:
        print grad_bins

    grad_binss = [0.0] * 18

    for bin in range(len(grad_bins)):
        grad_binss[bin] = grad_bins[bin]/256

    return grad_binss



def get_image():
    retval, im = camera.read()
    return im


def get_nao_image():
    resolution = vision_definitions.kQVGA
    colorSpace = vision_definitions.kBGRColorSpace

    cam = ALProxy("ALVideoDevice", na.IP, PORT)
    vidya = cam.subscribe("nao", resolution, colorSpace, 5)
    shot = cam.getImageRemote(vidya)
    cam.unsubscribe(vidya)

    size = (shot[0], shot[1])
    image = cv.CreateImageHeader(size, cv.IPL_DEPTH_8U, 3)
    cv.SetData(image, shot[6], shot[0]*3)
    image = np.asarray(image[:,:])

    return image


if __name__ == '__main__':
    if DEBUG_WEBCAM:
        camera = cv2.VideoCapture(0)   #Laptop
        bad_frames = 10   #Nodig voor de lichtintensiteit
        fpath = "test.jpg"
        #wait until the lightning issue is gone
        for i in xrange(bad_frames):
            temp = get_image()
        capture = get_image()
        cv2.imwrite(fpath, capture)
        get_converted_img(fpath)
    elif DEBUG_NAO:
        #get image, write to camImage.jpg
        #get_nao_image()
        fpath = "camImage.png"
        while True:
            get_converted_img(fpath)
            time.sleep(0.1)
    else:
        get_converted_img('pics/paper/frame92.jpg')

