from __future__ import print_function
from time import gmtime, strftime 
from MicroExpNet import *
import tensorflow as tf
import numpy as np
import cv2
import sys
import os

labels = ["neutral", "anger", "contempt", "disgust", "fear", "happy", "sadness", "surprise"]

# Import the xml files of frontal face and eye
face_cascade = cv2.CascadeClassifier('./haarcascades/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('./haarcascades/haarcascade_eye.xml') 

def detectFaces(img):
    # Convertinto grayscale since it works with grayscale images
    gray_img= cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # Detect the face
    faces = face_cascade.detectMultiScale(gray_img, 1.3, 5)

    if len(faces):
        return faces[0] 
    else:
        return [-13, -13, -13, -13]

# Detects the face and eliminates the rest and resizes the result img
def segmentFace(image, imgXdim, imgYdim):
    # Read the image
    #img = cv2.imread(inputFile)  

    # Detect the face
    (p,q,r,s) = detectFaces(image)

    # Return the whole image if it failed to detect the face
    if p != -13:
        image = image[q:q+s, p:p+r]

    # Crop & resize the image
    img = cv2.resize(image, (256, 256))
    img = img[32:256, 32:256]
    img = cv2.resize(img, (imgXdim, imgYdim)) 
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    return img

def get_time():
    return strftime("%a, %d %b %Y %X", gmtime())

class Emotion(object):
    def __init__(self):
        # Static parameters
        self.imgXdim = 84
        self.imgYdim = 84
        self.nInput = self.imgXdim*self.imgYdim
        # Dynamic parameters
        self.modelDir = "./Models/OuluCASIA/"
        # tf Graph input
        self.x = tf.placeholder(tf.float32, shape=[None, self.nInput])

        # Construct model
        self.classifier = MicroExpNet(self.x)

        # Deploy weights and biases for the model saver
        self.weights_biases_deployer = tf.train.Saver({"wc1": self.classifier.w["wc1"], \
                                            "wc2": self.classifier.w["wc2"], \
                                            "wfc": self.classifier.w["wfc"], \
                                            "wo": self.classifier.w["out"],   \
                                            "bc1": self.classifier.b["bc1"], \
                                            "bc2": self.classifier.b["bc2"], \
                                            "bfc": self.classifier.b["bfc"], \
                                            "bo": self.classifier.b["out"]})

    def emotiondetect(self,image):
        # Deploy images and their labels
        print("[" + get_time() + "] " + "Preparation part is completed.")
        print("[" + get_time() + "] " + "Initializing placeholders...")
        print(self.imgXdim)
        # Read the image
        image = segmentFace(image, self.imgXdim, self.imgYdim)
        testX = np.reshape(image, (1, self.imgXdim*self.imgYdim))
        testX = testX.astype(np.float32)
        emotion=""
        with tf.Session() as sess:
            # Initializing the variables
            sess.run(tf.global_variables_initializer())
            print("[" + get_time() + "] " + "Testing is started...")
            self.weights_biases_deployer.restore(sess, tf.train.latest_checkpoint(self.modelDir))
            print("[" + get_time() + "] Weights & Biases are restored.")                

            predictions = sess.run([self.classifier.pred], feed_dict={self.x: testX})
            argmax = np.argmax(predictions)
            emotion = labels[argmax]
            print("[" + get_time() + "] Emotion: " + labels[argmax])
        return emotion