#!/usr/bin/python2

import config
import cv2
import itertools
import os
import urllib

import numpy as np
np.set_printoptions(precision=2)

import openface

from sql import *

align = openface.AlignDlib(config.align)

net = openface.TorchNeuralNet(config.net, 96)

def get_features(img,bb):
    alignedFace = align.align(96, img, bb,
                              landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
    if alignedFace is None:
        raise Exception("Unable to align image: {}".format(imgPath))

    rep = net.forward(alignedFace)

    return rep

def get_faces(url):
    rgb_img = img_load(url)
    bb = align.getAllFaceBoundingBoxes(rgb_img)

    iter = 0

    for img in bb:
        iter += 1
        x1 = abs(img.tl_corner().x)
        y1 = abs(img.tl_corner().y)
        x2 = abs(img.br_corner().x)
        y2 = abs(img.br_corner().y)
        print(x1,x2,y1,y2)
#        crop_img = bgr_img[y1:y2, x1:x2]
#        cv2.imwrite("face"+str(iter)+".png",crop_img)
        face = get_features(rgb_img,img)
        index = 0.0
        for i in range(128):
            index += face[i]
        add_face(face, x1,y1,x2,y2, url, index)

def get_face(rgb_img):
    bb = align.getLargestFaceBoundingBox(rgb_img)
    return get_features(rgb_img,bb)

def img_load(url):
    
    if(os.path.isfile(config.temp+"/imagefile")):
        os.remove(config.temp+"/imagefile")
    urllib.urlretrieve(url,config.temp+"/imagefile")
    imgPath = config.temp+"/imagefile"
    bgr_img = cv2.imread(imgPath)
    if bgr_img is None:
        raise Exception("Unable to load image: {}".format(imgPath))
    rgb_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2RGB)
    return rgb_img
