import tensorflow as tf
import time
import grpc
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
import sys
import cv2
import numpy as np
import mtcnn.detect_face as detect_face
import os
import argparse
import imageio


def detect_face_fuc(img, minsize, threshold, factor):
    # 设置grpc
    options = [('grpc.max_send_message_length', 1000 * 1024 * 1024),
               ('grpc.max_receive_message_length', 1000 * 1024 * 1024)]
    channel = grpc.insecure_channel('139.199.34.94:8500', options=options)
    stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)

    # im: input image
    # minsize: minimum of faces' size
    # pnet, rnet, onet: caffemodel
    # threshold: threshold=[th1 th2 th3], th1-3 are three steps's threshold
    # fastresize: resize img from last scale (using in high-resolution images) if fastresize==true
    factor_count = 0
    total_boxes = np.empty((0, 9))
    points = []
    h = img.shape[0]
    w = img.shape[1]
    minl = np.amin([h, w])
    m = 12.0 / minsize
    minl = minl * m
    # creat scale pyramid
    scales = []
    while minl >= 12:
        scales += [m * np.power(factor, factor_count)]
        minl = minl * factor
        factor_count += 1

    # first stage
    for j in range(len(scales)):
        scale = scales[j]
        hs = int(np.ceil(h * scale))
        ws = int(np.ceil(w * scale))
        im_data = detect_face.imresample(img, (hs, ws))
        im_data = (im_data - 127.5) * 0.0078125
        img_x = np.expand_dims(im_data, 0)
        img_y = np.transpose(img_x, (0, 2, 1, 3))
        # out = pnet(img_y)
        print("pnet 请求。")
        request = predict_pb2.PredictRequest()
        request.model_spec.name = 'mtcnn'
        request.model_spec.signature_name = 'pnet_predict'
        tensor_image = tf.compat.v1.make_tensor_proto(img_y, dtype=tf.float32, shape=img_y.shape)
        request.inputs['images'].CopyFrom(tensor_image)
        result_future = stub.Predict.future(request, 30.0)  # 10 secs timeout
        response = result_future.result()
        result1 = response.outputs['result1']
        result2 = response.outputs['result2']
        nd_array1 = tf.contrib.util.make_ndarray(result1)
        nd_array2 = tf.contrib.util.make_ndarray(result2)
        out0 = np.transpose(nd_array1, (0, 2, 1, 3))
        out1 = np.transpose(nd_array2, (0, 2, 1, 3))

        boxes, _ = detect_face.generateBoundingBox(out1[0, :, :, 1].copy(), out0[0, :, :, :].copy(), scale, threshold[0])

        # inter-scale nms
        pick = detect_face.nms(boxes.copy(), 0.5, 'Union')
        if boxes.size > 0 and pick.size > 0:
            boxes = boxes[pick, :]
            total_boxes = np.append(total_boxes, boxes, axis=0)

    numbox = total_boxes.shape[0]
    if numbox > 0:
        pick = detect_face.nms(total_boxes.copy(), 0.7, 'Union')
        total_boxes = total_boxes[pick, :]
        regw = total_boxes[:, 2] - total_boxes[:, 0]
        regh = total_boxes[:, 3] - total_boxes[:, 1]
        qq1 = total_boxes[:, 0] + total_boxes[:, 5] * regw
        qq2 = total_boxes[:, 1] + total_boxes[:, 6] * regh
        qq3 = total_boxes[:, 2] + total_boxes[:, 7] * regw
        qq4 = total_boxes[:, 3] + total_boxes[:, 8] * regh
        total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, 4]]))
        total_boxes = detect_face.rerec(total_boxes.copy())
        total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32)
        dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = detect_face.pad(total_boxes.copy(), w, h)

    numbox = total_boxes.shape[0]
    if numbox > 0:
        # second stage
        tempimg = np.zeros((24, 24, 3, numbox))
        for k in range(0, numbox):
            tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
            tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = img[y[k] - 1:ey[k], x[k] - 1:ex[k], :]
            if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
                tempimg[:, :, :, k] = detect_face.imresample(tmp, (24, 24))
            else:
                return np.empty()
        tempimg = (tempimg - 127.5) * 0.0078125
        tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))
        # out = rnet(tempimg1)
        print("rnet 请求。")
        request = predict_pb2.PredictRequest()
        request.model_spec.name = 'mtcnn'
        request.model_spec.signature_name = 'rnet_predict'
        tensor_image = tf.compat.v1.make_tensor_proto(tempimg1, dtype=tf.float32, shape=tempimg1.shape)
        request.inputs['images'].CopyFrom(tensor_image)
        result_future = stub.Predict.future(request, 30.0)  # 10 secs timeout
        response = result_future.result()
        result1 = response.outputs['result1']
        result2 = response.outputs['result2']
        nd_array1 = tf.contrib.util.make_ndarray(result1)
        nd_array2 = tf.contrib.util.make_ndarray(result2)
        out0 = np.transpose(nd_array1)
        out1 = np.transpose(nd_array2)

        score = out1[1, :]
        ipass = np.where(score > threshold[1])
        total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)])
        mv = out0[:, ipass[0]]
        if total_boxes.shape[0] > 0:
            pick = detect_face.nms(total_boxes, 0.7, 'Union')
            total_boxes = total_boxes[pick, :]
            total_boxes = detect_face.bbreg(total_boxes.copy(), np.transpose(mv[:, pick]))
            total_boxes = detect_face.rerec(total_boxes.copy())

    numbox = total_boxes.shape[0]
    if numbox > 0:
        # third stage
        total_boxes = np.fix(total_boxes).astype(np.int32)
        dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = detect_face.pad(total_boxes.copy(), w, h)
        tempimg = np.zeros((48, 48, 3, numbox))
        for k in range(0, numbox):
            tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
            tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = img[y[k] - 1:ey[k], x[k] - 1:ex[k], :]
            if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
                tempimg[:, :, :, k] = detect_face.imresample(tmp, (48, 48))
            else:
                return np.empty()
        tempimg = (tempimg - 127.5) * 0.0078125
        tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))
        # out = onet(tempimg1)
        print("onet 请求。")
        request = predict_pb2.PredictRequest()
        request.model_spec.name = 'mtcnn'
        request.model_spec.signature_name = 'onet_predict'
        tensor_image = tf.compat.v1.make_tensor_proto(tempimg1, dtype=tf.float32, shape=tempimg1.shape)
        request.inputs['images'].CopyFrom(tensor_image)
        result_future = stub.Predict.future(request, 30.0)  # 10 secs timeout
        response = result_future.result()
        result1 = response.outputs['result1']
        result2 = response.outputs['result2']
        result3 = response.outputs['result3']
        nd_array1 = tf.contrib.util.make_ndarray(result1)
        nd_array2 = tf.contrib.util.make_ndarray(result2)
        nd_array3 = tf.contrib.util.make_ndarray(result3)
        out0 = np.transpose(nd_array1)
        out1 = np.transpose(nd_array2)
        out2 = np.transpose(nd_array3)

        score = out2[1, :]
        points = out1
        ipass = np.where(score > threshold[2])
        points = points[:, ipass[0]]
        total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)])
        mv = out0[:, ipass[0]]

        w = total_boxes[:, 2] - total_boxes[:, 0] + 1
        h = total_boxes[:, 3] - total_boxes[:, 1] + 1
        points[0:5, :] = np.tile(w, (5, 1)) * points[0:5, :] + np.tile(total_boxes[:, 0], (5, 1)) - 1
        points[5:10, :] = np.tile(h, (5, 1)) * points[5:10, :] + np.tile(total_boxes[:, 1], (5, 1)) - 1
        if total_boxes.shape[0] > 0:
            total_boxes = detect_face.bbreg(total_boxes.copy(), np.transpose(mv))
            pick = detect_face.nms(total_boxes.copy(), 0.7, 'Min')
            total_boxes = total_boxes[pick, :]
            points = points[:, pick]

    return total_boxes, points

def to_rgb(img):
    w, h = img.shape
    ret = np.empty((w, h, 3), dtype=np.uint8)
    ret[:, :, 0] = ret[:, :, 1] = ret[:, :, 2] = img
    return ret

def detect(args):
    output_dir = args.output_dir

    minsize = 20  # minimum size of face
    threshold = [0.6, 0.7, 0.7]  # three steps's threshold
    factor = 0.709  # scale factor
    # Add a random key to the filename to allow align ment using multiple processes
    random_key = np.random.randint(0, high=99999)
    bounding_boxes_filename = os.path.join(output_dir, 'bounding_boxes_%05d.txt' % random_key)
    image_path = args.image_path
    filename = os.path.splitext(os.path.split(image_path)[1])[0]
    output_filename = os.path.join(output_dir, filename + '.png')
    try:
        img = imageio.imread(image_path)
    except (IOError, ValueError, IndexError) as e:
        errorMessage = '{}: {}'.format(image_path, e)
        print(errorMessage)
    else:
        if img.ndim < 2:
            print('Unable to align "%s"' % image_path)
        if img.ndim == 2:
            img = to_rgb(img)
        img = img[:, :, 0:3]

        bounding_boxes, key_points = detect_face_fuc(img, minsize, threshold, factor)
        nrof_faces = bounding_boxes.shape[0]
        if nrof_faces > 0:
            det = bounding_boxes[:, 0:4]
            img_size = np.asarray(img.shape)[0:2]
            if nrof_faces > 1:
                bounding_box_size = (det[:, 2] - det[:, 0]) * (det[:, 3] - det[:, 1])
                img_center = img_size / 2
                offsets = np.vstack(
                    [(det[:, 0] + det[:, 2]) / 2 - img_center[1], (det[:, 1] + det[:, 3]) / 2 - img_center[0]])
                offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)
                index = np.argmax(bounding_box_size - offset_dist_squared * 2.0)  # some extra weight on the centering
                det = det[index, :]
            det = np.squeeze(det)
            bb = np.zeros(4, dtype=np.int32)
            print(det[0], det[1], det[2], det[3])
            bb[0] = np.maximum(det[0] - args.margin / 2, 0)
            bb[1] = np.maximum(det[1] - args.margin / 2, 0)
            bb[2] = np.minimum(det[2] + args.margin / 2, img_size[1])
            bb[3] = np.minimum(det[3] + args.margin / 2, img_size[0])
            # print(bb)
            # cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
            # scaled = misc.imresize(cropped, (args.image_size, args.image_size), interp='bilinear')
            # misc.imsave(output_filename, scaled)
            showimage = cv2.imread(image_path)
            cv2.rectangle(showimage, (int(det[0]), int(det[1])), (int(det[2]), int(det[3])), (0, 255, 0), 2)
            key_points = np.squeeze(key_points)
            print(key_points)
            for i in range(int(len(key_points) / 2)):
                key_point = (int(key_points[i]), int(key_points[i + 4]))
                cv2.circle(showimage, key_point, 1, (0, 255, 0), 4)
            cv2.imwrite(output_filename, showimage)
        else:
            print('Unable to align "%s"' % image_path)

def parse_arguments(argv):
    print(argv)
    parser = argparse.ArgumentParser()

    parser.add_argument('--image_path', type=str, help='Unaligned face images.', default=r'E:\testDir\ml\detect_face.jpg')
    parser.add_argument('--output_dir', type=str, help='Test MTCNN output dir',
                        default=r'E:\testDir\ml\mtcnn_output')
    parser.add_argument('--image_size', type=int, help='Image size (height, width) in pixels.', default=182)
    parser.add_argument('--margin', type=int,
                        help='Margin for the crop around the bounding box (height, width) in pixels.', default=44)
    parser.add_argument('--random_order',
                        help='Shuffles the order of images to enable alignment using multiple processes.',
                        action='store_true')
    parser.add_argument('--gpu_memory_fraction', type=float,
                        help='Upper bound on the amount of GPU memory that will be used by the process.', default=1.0)
    parser.add_argument('--detect_multiple_faces', type=bool,
                        help='Detect and align multiple faces per image.', default=False)

    return parser.parse_args(argv)
if __name__ == '__main__':
    # print(parse_arguments(['--image_path=12313131312']))
    detect(parse_arguments(sys.argv[1:]))