# -*- coding:utf-8 -*-
import os
import sys
import datetime
import logging
import flask
import werkzeug
import optparse
import tornado.wsgi
import tornado.httpserver
import numpy as np
import skimage.io
import json
import cv2
from scipy import misc
#from http_client import post_result_from_http
import detect_face
import facenet
import tensorflow as tf

caffe_root = '/home/dl/download/caffe-master/'
sys.path.insert(0, caffe_root + 'python')
import caffe

REPO_DIRNAME = os.path.dirname(os.path.abspath(__file__))
MODEL_DIRNAME = os.path.join(REPO_DIRNAME, 'models')
UPLOAD_FOLDER = '/home/dl/test/face/uploadimages'


# Obtain the flask app object
app = flask.Flask(__name__)

@app.route('/')
def hello():
    return 'hello, welcome to the world of face recognizer !'

@app.route('/extract_upload', methods=['POST'])
def extract_upload():
    result = {}
    try:
        # We will save the file to disk for possible data collection.
        imagefile = flask.request.files['imagefile']
        filename_ = str(datetime.datetime.now()).replace(' ', '_') + \
            werkzeug.secure_filename(imagefile.filename)
        filename = os.path.join(UPLOAD_FOLDER, filename_)
        imagefile.save(filename)
        logging.info('Saving to %s.', filename)
    except Exception as err:
        logging.info('Uploaded image open error: %s', err)
        result['msg'] = 'Uploaded image open error'
        return json.dumps(result)
    try:
        image = misc.imread(filename)
    except (IOError, ValueError, IndexError) as e:
        errorMessage = '{}: {}'.format(filename, e)
        print(errorMessage)
    else:
        if image.ndim < 2:
            print('Unable to align "%s"' % filename)
        elif image.ndim == 2:
            image = facenet.to_rgb(image)
        image = image[:, :, 0:3]
    scaled_faces = app.aln.get_aligned_faces(image)
    id = 1
    faces = []
    for face in scaled_faces:
        #misc.imsave('/home/dl/test/face/uploadimages/face.jpg', aligned_image_age_gender)
        age, gender = app.agp.predict_agegender(face)
        #uploadresult = post_result_from_http('http://192.168.99.83:5889/uploadFaceImg', alignfilename)
        embedding = app.ext.extract_image(face)
        result['errorno'] = '0'
        result['msg'] = 'detect success !'
        data = {}
        data['id'] = id
        data['age'] = age
        data['gender'] = gender
        data['feature'] = embedding.tolist()
        faces.append(data)
        id += 1
    result['data'] = faces
    return json.dumps(result)



class FaceAlignmentor(object):
    default_args = {
        'mtcnn_model_dir': (
            '{}/models/mtcnn'.format(REPO_DIRNAME)),
    }
    default_args['gpu_memory_fraction'] = 0.25
    default_args['margin'] = 32
    default_args['minsize'] = 20 #minimum size of face
    default_args['threshold'] = [0.6, 0.7, 0.7]  # three steps's threshold
    default_args['factor'] = 0.709  # scale factor

    def __init__(self, mtcnn_model_dir, gpu_memory_fraction,
                 margin, minsize, threshold, factor):

        with tf.Graph().as_default():
            gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
            sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
            with sess.as_default():
                self.pnet, self.rnet, self.onet = detect_face.create_mtcnn(sess, mtcnn_model_dir)
        self.margin = margin
        self.minsize = minsize
        self.threshold = threshold
        self.factor = factor

    def get_aligned_faces(self, image, image_size=160):
        bounding_boxes, points = detect_face.detect_face(image, self.minsize,self.pnet, self.rnet,
                                                    self.onet, self.threshold, self.factor)
        nrof_faces = bounding_boxes.shape[0]
        print('detect face :{}').format(nrof_faces)
        rbg = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        scaled_faces = []
        if nrof_faces > 0:
            for face_position in bounding_boxes:
                det = face_position[0:4]
                img_size = np.asarray(image.shape)[0:2]
                bb = np.zeros(4, dtype=np.int32)
                bb[0] = np.maximum(det[0] - self.margin / 2, 0)
                bb[1] = np.maximum(det[1] - self.margin / 2, 0)
                bb[2] = np.minimum(det[2] + self.margin / 2, img_size[1])
                bb[3] = np.minimum(det[3] + self.margin / 2, img_size[0])
                cropped = image[bb[1]:bb[3], bb[0]:bb[2], :]
                scaled = misc.imresize(cropped, (image_size, image_size), interp='bilinear')
                scaled_faces.append(scaled)
                face_position = face_position.astype(int)
                cv2.rectangle(rbg, (face_position[0], face_position[1]), (face_position[2], face_position[3]),
                              (0, 255, 0), 2)
        cv2.imwrite(os.path.join(UPLOAD_FOLDER, 'face.jpg'), rbg)
        return scaled_faces


class FaceExtractor(object):
    default_args = {
        'facenet_model_dir': (
            '{}/models/facenet/20170512-110547'.format(REPO_DIRNAME)),
    }
    default_args['image_size'] = 160
    def __init__(self, facenet_model_dir, image_size):
        self.image_size = image_size
        tf.Graph().as_default()
        self.sess = tf.Session()
        facenet.load_model(facenet_model_dir, self.sess)

    def prewhiten(self, x):
        mean = np.mean(x)
        std = np.std(x)
        std_adj = np.maximum(std, 1.0 / np.sqrt(x.size))
        y = np.multiply(np.subtract(x, mean), 1 / std_adj)
        return y

    def extract_image(self, image):
        # image_size = images_placeholder.get_shape()[1]  # For some reason this doesn't work for frozen graphs
        # get the embeddings
        data = self.prewhiten(image)
        data = data.reshape(-1, self.image_size, self.image_size, 3)

        # Get input and output tensors
        images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
        embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
        phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
        feed_dict = {images_placeholder: np.array(data), phase_train_placeholder: False}
        emb_data = self.sess.run(embeddings,feed_dict=feed_dict)
        return emb_data

class AgeGenderPredictor(object):
    default_args = {
        'age_gender_model_dir': (
            '{}/models/age_gender'.format(REPO_DIRNAME)),
    }
    def __init__(self, age_gender_model_dir):
        caffe.set_mode_gpu()
        mean_filename = os.path.join(age_gender_model_dir, 'mean.binaryproto')
        proto_data = open(mean_filename, "rb").read()
        a = caffe.io.caffe_pb2.BlobProto.FromString(proto_data)
        mean = caffe.io.blobproto_to_array(a)[0]
        #age
        age_net_pretrained = os.path.join(age_gender_model_dir, 'age_net.caffemodel')
        age_net_model_file = os.path.join(age_gender_model_dir, 'deploy_age.prototxt')
        self.age_list = ['(0, 2)', '(4, 6)', '(8, 12)', '(15, 20)', '(25, 32)', '(38, 43)', '(48, 53)', '(60, 100)']
        self.age_net = caffe.Classifier(age_net_model_file, age_net_pretrained,
                               mean=mean,
                               channel_swap=(2,1,0),
                               raw_scale=255,
                               image_dims=(256, 256))

        #gender
        gender_net_pretrained=os.path.join(age_gender_model_dir,'gender_net.caffemodel')
        gender_net_model_file=os.path.join(age_gender_model_dir,'deploy_gender.prototxt')
        self.gender_list=['Male','Female']
        self.gender_net = caffe.Classifier(gender_net_model_file, gender_net_pretrained,
                               mean=mean,
                               channel_swap=(2,1,0),
                               raw_scale=255,
                               image_dims=(256, 256))

    #predict age and gender
    def predict_agegender(self, image):
        im = skimage.img_as_float(image).astype(np.float32)
        prediction = self.age_net.predict([im])
        age = self.age_list[prediction[0].argmax()]

        prediction = self.gender_net.predict([im])
        gender = self.gender_list[prediction[0].argmax()]
        return age, gender

def start_tornado(app, port=5000):
    http_server = tornado.httpserver.HTTPServer(
        tornado.wsgi.WSGIContainer(app))
    http_server.listen(port)
    print("Tornado server starting on port {}".format(port))
    tornado.ioloop.IOLoop.instance().start()

def start_from_terminal(app):
    """
    Parse command line options and start the server.
    """
    parser = optparse.OptionParser()
    parser.add_option(
        '-d', '--debug',
        help="enable debug mode",
        action="store_true", default=False)
    parser.add_option(
        '-p', '--port',
        help="which port to serve content on",
        type='int', default=5009)

    opts, args = parser.parse_args()

    # Initialize classifier + warm start by forward for allocation
    app.aln = FaceAlignmentor(**FaceAlignmentor.default_args)
    app.ext = FaceExtractor(**FaceExtractor.default_args)
    app.agp = AgeGenderPredictor(**AgeGenderPredictor.default_args)

    if opts.debug:
        app.run(debug=True, host='0.0.0.0', port=opts.port)
    else:
        start_tornado(app, opts.port)

if __name__ == '__main__':
    logging.getLogger().setLevel(logging.INFO)
    if not os.path.exists(UPLOAD_FOLDER):
        os.makedirs(UPLOAD_FOLDER)
    start_from_terminal(app)