import tensorflow as tf
import cv2
import numpy as np
import mtcnn.detect_face as detect_face
import os
import sys
import argparse
import imageio

model_path = r'E:\testDir\ml\mtcnn_output\export_model_for_serving\1'

def to_rgb(img):
    w, h = img.shape
    ret = np.empty((w, h, 3), dtype=np.uint8)
    ret[:, :, 0] = ret[:, :, 1] = ret[:, :, 2] = img
    return ret
def main(args):
    output_dir = args.output_dir
    # model_path = os.path.join(output_dir,'export_model_for_serving/1')
    with tf.compat.v1.Session() as sess:
        meta_graph_def = tf.compat.v1.saved_model.loader.load(sess, [tf.compat.v1.saved_model.tag_constants.SERVING], model_path)
        # meta_graph_def = tf.saved_model.loader.load(sess, [tf.compat.v1.saved_model.tag_constants.SERVING], model_path)
        signature = meta_graph_def.signature_def
        pnet_image_tensor_name = signature['pnet_predict'].inputs['images'].name
        pnet_out1_tensor_name = signature['pnet_predict'].outputs['result1'].name
        pnet_out2_tensor_name = signature['pnet_predict'].outputs['result2'].name
        pnet_image = sess.graph.get_tensor_by_name(pnet_image_tensor_name)
        pnet_out1 = sess.graph.get_tensor_by_name(pnet_out1_tensor_name)
        pnet_out2 = sess.graph.get_tensor_by_name(pnet_out2_tensor_name)


        pnet_fun = lambda img: sess.run((pnet_out1, pnet_out2), feed_dict={pnet_image: img})

        rnet_image_tensor_name = signature['rnet_predict'].inputs['images'].name
        rnet_out1_tensor_name = signature['rnet_predict'].outputs['result1'].name
        rnet_out2_tensor_name = signature['rnet_predict'].outputs['result2'].name
        rnet_image = sess.graph.get_tensor_by_name(rnet_image_tensor_name)
        rnet_out1 = sess.graph.get_tensor_by_name(rnet_out1_tensor_name)
        rnet_out2 = sess.graph.get_tensor_by_name(rnet_out2_tensor_name)

        rnet_fun = lambda img: sess.run((rnet_out1, rnet_out2), feed_dict={rnet_image: img})

        onet_image_tensor_name = signature['onet_predict'].inputs['images'].name
        onet_out1_tensor_name = signature['onet_predict'].outputs['result1'].name
        onet_out2_tensor_name = signature['onet_predict'].outputs['result2'].name
        onet_out3_tensor_name = signature['onet_predict'].outputs['result3'].name
        onet_image = sess.graph.get_tensor_by_name(onet_image_tensor_name)
        onet_out1 = sess.graph.get_tensor_by_name(onet_out1_tensor_name)
        onet_out2 = sess.graph.get_tensor_by_name(onet_out2_tensor_name)
        onet_out3 = sess.graph.get_tensor_by_name(onet_out3_tensor_name)
        onet_fun = lambda img: sess.run((onet_out1, onet_out2, onet_out3),
                                        feed_dict={onet_image: img})

        minsize = 20  # minimum size of face
        threshold = [0.6, 0.7, 0.7]  # three steps's threshold
        factor = 0.709  # scale factor
        # Add a random key to the filename to allow align ment using multiple processes
        random_key = np.random.randint(0, high=99999)
        bounding_boxes_filename = os.path.join(output_dir, 'bounding_boxes_%05d.txt' % random_key)
        image_path = args.image_path
        filename = os.path.splitext(os.path.split(image_path)[1])[0]
        output_filename = os.path.join(output_dir, filename + '.png')
        try:
            img = imageio.imread(image_path)
        except (IOError, ValueError, IndexError) as e:
            errorMessage = '{}: {}'.format(image_path, e)
            print(errorMessage)
        else:
            if img.ndim < 2:
                print('Unable to align "%s"' % image_path)
            if img.ndim == 2:
                img = to_rgb(img)
            img = img[:, :, 0:3]

            bounding_boxes, key_points = detect_face.detect_face(img, minsize, pnet_fun, rnet_fun, onet_fun, threshold, factor)
            nrof_faces = bounding_boxes.shape[0]
            if nrof_faces > 0:
                det = bounding_boxes[:, 0:4]
                img_size = np.asarray(img.shape)[0:2]
                if nrof_faces > 1:
                    bounding_box_size = (det[:, 2] - det[:, 0]) * (det[:, 3] - det[:, 1])
                    img_center = img_size / 2
                    offsets = np.vstack(
                        [(det[:, 0] + det[:, 2]) / 2 - img_center[1], (det[:, 1] + det[:, 3]) / 2 - img_center[0]])
                    offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)
                    index = np.argmax(bounding_box_size - offset_dist_squared * 2.0)  # some extra weight on the centering
                    det = det[index, :]
                det = np.squeeze(det)
                bb = np.zeros(4, dtype=np.int32)
                print(det[0], det[1], det[2], det[3])
                bb[0] = np.maximum(det[0] - args.margin / 2, 0)
                bb[1] = np.maximum(det[1] - args.margin / 2, 0)
                bb[2] = np.minimum(det[2] + args.margin / 2, img_size[1])
                bb[3] = np.minimum(det[3] + args.margin / 2, img_size[0])
                # print(bb)
                # cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
                # scaled = misc.imresize(cropped, (args.image_size, args.image_size), interp='bilinear')
                # misc.imsave(output_filename, scaled)
                showimage = cv2.imread(image_path)
                cv2.rectangle(showimage, (int(det[0]), int(det[1])), (int(det[2]), int(det[3])), (0, 255, 0), 2)
                key_points = np.squeeze(key_points)
                print(key_points)
                for i in range(int(len(key_points) / 2)):
                    key_point = (int(key_points[i]), int(key_points[i + 4]))
                    cv2.circle(showimage, key_point, 1, (0, 255, 0), 4)
                cv2.imwrite(output_filename, showimage)
            else:
                print('Unable to align "%s"' % image_path)

def parse_arguments(argv):
    parser = argparse.ArgumentParser()

    parser.add_argument('--image_path', type=str, help='Unaligned face images.', default=r'E:\testDir\ml\detect_face.jpg')
    parser.add_argument('--output_dir', type=str, help='Test MTCNN output dir',
                        default=r'E:\testDir\ml\mtcnn_output')
    parser.add_argument('--image_size', type=int, help='Image size (height, width) in pixels.', default=182)
    parser.add_argument('--margin', type=int,
                        help='Margin for the crop around the bounding box (height, width) in pixels.', default=44)
    parser.add_argument('--random_order',
                        help='Shuffles the order of images to enable alignment using multiple processes.',
                        action='store_true')
    parser.add_argument('--gpu_memory_fraction', type=float,
                        help='Upper bound on the amount of GPU memory that will be used by the process.', default=1.0)
    parser.add_argument('--detect_multiple_faces', type=bool,
                        help='Detect and align multiple faces per image.', default=False)

    return parser.parse_args(argv)
if __name__ == '__main__':
    main(parse_arguments(sys.argv[1:]))