import tensorflow as tf
import numpy as np
import mtcnn.detect_face as detect_face
import os
import sys
import argparse
import imageio
import re

model_path = r'E:\testDir\ml\mtcnn_output\export_model_for_serving\1'

def person_id_extract(text):
    person_id = re.findall(
        r"([1-9]\d{5}(18|19|([23]\d))\d{2}((0[1-9])|(10|11|12))(([0-2][1-9])|10|20|30|31)\d{3}[0-9Xx])", text)
    per_id = ""
    if person_id:
        matrix = np.array(person_id)
        for i in matrix[:, 0]:
            per_id = per_id + ' ' + "".join(tuple(i))
    return per_id
# 单通道照片转化为3通道照片
def to_rgb(img):
    w, h = img.shape
    ret = np.empty((w, h, 3), dtype=np.uint8)
    ret[:, :, 0] = ret[:, :, 1] = ret[:, :, 2] = img
    return ret
def main(args):
    output_dir = args.output_dir
    with tf.compat.v1.Session() as sess:
        # 加载之前导出的模型
        meta_graph_def = tf.compat.v1.saved_model.loader.load(sess, [tf.compat.v1.saved_model.tag_constants.SERVING], model_path)
        signature = meta_graph_def.signature_def
        # 得到图中的指定向量
        pnet_image_tensor_name = signature['pnet_predict'].inputs['images'].name
        pnet_out1_tensor_name = signature['pnet_predict'].outputs['result1'].name
        pnet_out2_tensor_name = signature['pnet_predict'].outputs['result2'].name
        pnet_image = sess.graph.get_tensor_by_name(pnet_image_tensor_name)
        pnet_out1 = sess.graph.get_tensor_by_name(pnet_out1_tensor_name)
        pnet_out2 = sess.graph.get_tensor_by_name(pnet_out2_tensor_name)

        # 相当于定义了一个匿名函数
        pnet_fun = lambda img: sess.run((pnet_out1, pnet_out2), feed_dict={pnet_image: img})

        rnet_image_tensor_name = signature['rnet_predict'].inputs['images'].name
        rnet_out1_tensor_name = signature['rnet_predict'].outputs['result1'].name
        rnet_out2_tensor_name = signature['rnet_predict'].outputs['result2'].name
        rnet_image = sess.graph.get_tensor_by_name(rnet_image_tensor_name)
        rnet_out1 = sess.graph.get_tensor_by_name(rnet_out1_tensor_name)
        rnet_out2 = sess.graph.get_tensor_by_name(rnet_out2_tensor_name)

        rnet_fun = lambda img: sess.run((rnet_out1, rnet_out2), feed_dict={rnet_image: img})

        onet_image_tensor_name = signature['onet_predict'].inputs['images'].name
        onet_out1_tensor_name = signature['onet_predict'].outputs['result1'].name
        onet_out2_tensor_name = signature['onet_predict'].outputs['result2'].name
        onet_out3_tensor_name = signature['onet_predict'].outputs['result3'].name
        onet_image = sess.graph.get_tensor_by_name(onet_image_tensor_name)
        onet_out1 = sess.graph.get_tensor_by_name(onet_out1_tensor_name)
        onet_out2 = sess.graph.get_tensor_by_name(onet_out2_tensor_name)
        onet_out3 = sess.graph.get_tensor_by_name(onet_out3_tensor_name)
        onet_fun = lambda img: sess.run((onet_out1, onet_out2, onet_out3),
                                        feed_dict={onet_image: img})

        minsize = 40  # 最小面部尺寸
        threshold = [0.6, 0.7, 0.7]  # 三个步骤的阈值
        factor = 0.709  # 图像在进行下采样时的缩放比例
        # 循环传入的路径下的文件进行裁剪
        for root, dirs, files in os.walk(args.image_dir):
            for file in files:
                image_path = os.path.join(root, file)
                filename = os.path.splitext(os.path.split(image_path)[1])[0]
                # 裁剪保存的证件照
                if not os.path.exists(output_dir):
                    os.mkdir(output_dir)
                output_filename = os.path.join(output_dir, filename + '.jpg')
                try:
                    img = imageio.imread(image_path)
                except (IOError, ValueError, IndexError) as e:
                    errorMessage = '{}: {}'.format(image_path, e)
                    print(errorMessage)
                else:
                    if img.ndim < 2:
                        print('Unable to recognition "%s"' % image_path)
                        continue
                    if img.ndim == 2:
                        img = to_rgb(img)
                    img = img[:, :, 0:3]
                    # 得到所有的函数和图像进行运算
                    bounding_boxes, key_points = detect_face.detect_face(img, minsize, pnet_fun, rnet_fun, onet_fun, threshold, factor)
                    nrof_faces = bounding_boxes.shape[0]
                    if nrof_faces > 0:
                        det = bounding_boxes[:, 0:4]
                        img_size = np.asarray(img.shape)[0:2]
                        if nrof_faces > 1:
                            bounding_box_size = (det[:, 2] - det[:, 0]) * (det[:, 3] - det[:, 1])
                            img_center = img_size / 2
                            offsets = np.vstack(
                                [(det[:, 0] + det[:, 2]) / 2 - img_center[1], (det[:, 1] + det[:, 3]) / 2 - img_center[0]])
                            offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)
                            index = np.argmax(bounding_box_size - offset_dist_squared * 2.0)  # some extra weight on the centering
                            det = det[index, :]
                        det = np.squeeze(det)
                        bb = np.zeros(4, dtype=np.int32)
                        # print(det[0], det[1], det[2], det[3])
                        # （det[0], det[1]），（det[2], det[3]） 为脸部区域的左上角和右下角的点
                        # 对人脸的边界做一个固定的增加边界
                        bb[0] = np.maximum(det[0] - args.margin / 2, 0)
                        bb[1] = np.maximum(det[1] - args.margin / 2, 0)
                        bb[2] = np.minimum(det[2] + args.margin / 2, img_size[1])
                        bb[3] = np.minimum(det[3] + args.margin / 2, img_size[0])
                        # pt1 为截图的左上角坐标  pt2右下角坐标
                        #  根据人脸的区域我们在区域的左右各扩大1/2  在区域的上部扩大1/2 在区域的下部扩大2/3
                        pt1 = (int(det[0])-int((int(det[2])-int(det[0]))/2), int(det[1])-int((int(det[3])-int(det[1]))/2))
                        pt2 = (int(det[2])+int((int(det[2])-int(det[0]))/2), int(det[3])+int((int(det[3])-int(det[1]))*2/3))
                        cropped = img[pt1[1]:pt2[1], pt1[0]:pt2[0], :]
                        imageio.imwrite(output_filename, cropped)
                    else:
                        print('Unable to recognition "%s"' % image_path)

def parse_arguments(argv):
    parser = argparse.ArgumentParser()

    parser.add_argument('--image_dir', type=str, help='Unaligned face images.', default=r'E:\testDir\ZS_BYZ\20210414103818_re')
    parser.add_argument('--output_dir', type=str, help='Test MTCNN output dir',
                        default=r'E:\testDir\ZS_BYZ\20210414103818_result_re')
    parser.add_argument('--image_size', type=int, help='Image size (height, width) in pixels.', default=182)
    parser.add_argument('--margin', type=int,
                        help='Margin for the crop around the bounding box (height, width) in pixels.', default=44)
    parser.add_argument('--random_order',
                        help='Shuffles the order of images to enable alignment using multiple processes.',
                        action='store_true')
    parser.add_argument('--gpu_memory_fraction', type=float,
                        help='Upper bound on the amount of GPU memory that will be used by the process.', default=1.0)
    parser.add_argument('--detect_multiple_faces', type=bool,
                        help='Detect and align multiple faces per image.', default=False)

    return parser.parse_args(argv)
if __name__ == '__main__':
    main(parse_arguments(sys.argv[1:]))