# 下面的代码是对gpu的内存进行动态分配。
import tensorflow as tf
import keras

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
keras.backend.tensorflow_backend.set_session(tf.Session(config=config))

import cv2
import numpy as np
import utils.utils as utils
from net.mtcnn import mtcnn


def init_camera():
    cameraMatL = np.array(
        [[736.0782263101347, 0, 667.0776992045809], [0, 735.4788953393497, 416.0432707182775], [0, 0, 1]])
    cameraMatR = np.array(
        [[739.4163033284111, 0, 690.866677866396], [0, 739.2275006227519, 365.611579247121], [0, 0, 1]])
    distCoeffL = np.array(
        [[0.1073336228536438, -0.1209288117734842, -0.001059375805799081, -0.001222676712819057, -0.05013492123452137]])
    distCoeffR = np.array(
        [[0.09773561120050704, -0.09023619607905146, -0.00122165657009673, 0.000327305117498997, -0.06235629563496246]])
    R = np.array([[0.9999682416680484, 0.000229035368787236, -0.007966379234710925],
                  [-0.0001901982530434424, 0.9999880963784729, 0.004875543660216398],
                  [0.007967401077887829, -0.004873873629668867, 0.9999563819866875]])
    T = np.array([[-59.43248187329462], [-0.06074768384861339], [0.1742225241296646]])
    # 得到校正变换矩阵与重投影矩阵Q并得到映射关系map
    R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(cameraMatL, distCoeffL, cameraMatR, distCoeffR, imgSize, R, T,
                                                      alpha=0)
    map1L, map2L = cv2.initUndistortRectifyMap(cameraMatL, distCoeffL, R1, P1, imgSize, cv2.CV_32FC1)
    map1R, map2R = cv2.initUndistortRectifyMap(cameraMatR, distCoeffR, R2, P2, imgSize, cv2.CV_32FC1)
    return cameraMatL, cameraMatR, map1L, map2L, map1R, map2R, Q, R, T


def file_detect():
    imgName = 'left1.png'
    img = cv2.imread('facedata/' + imgName)

    cameraMatL, cameraMatR, map1L, map2L, map1R, map2R, Q, R, T = init_camera()

    # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    model = mtcnn()
    # 三个参数对应三个网络的人脸得分阈值
    threshould = [0.5, 0.7, 0.8]
    boxes = model.detect_face(img, threshould)

    if len(boxes) == 0:
        print('没有检测出人脸，请重新选图')
        return

    imgCopy = img.copy()
    utils.draw(imgCopy, boxes)
    cv2.namedWindow('dawn', cv2.WINDOW_NORMAL)
    cv2.imshow('dawn', imgCopy)
    cv2.waitKey(0)


# 对应W和H而非han
imgSize = [1280, 720]


# 下为单设备号的双目实时进行人脸检测
def double_camera_detect():
    cameraMatL, cameraMatR, map1L, map2L, map1R, map2R, Q, R, T = init_camera()
    mtcnn_model = mtcnn()
    threshould = [0.5, 0.7, 0.8]

    cap = cv2.VideoCapture(0)
    # 设置分辨率。3为w，4为h
    cap.set(3, imgSize[0] * 2)
    cap.set(4, imgSize[1])
    # 创建可改变大小的窗口
    cv2.namedWindow('w1', cv2.WINDOW_NORMAL)
    while 1 > 0:
        b, frame = cap.read()
        if b == 0:
            print('摄像头读取图片失败')
            return
        imgL = frame[0:imgSize[1], 0:imgSize[0]]
        imgR = frame[0:imgSize[1], imgSize[0]:(imgSize[0] * 2)]
        rL = imgL.copy()
        rR = imgR.copy()
        cv2.remap(imgL, map1L, map2L, cv2.INTER_LINEAR, rL)
        cv2.remap(imgR, map1R, map2R, cv2.INTER_LINEAR, rR)
        # 获取框
        # imgL=cv2.cvtColor(imgL,cv2.COLOR_BGR2RGB)
        # imgR=cv2.cvtColor(imgR,cv2.COLOR_BGR2RGB)
        boxesL = mtcnn_model.detect_face(rL, threshould)
        boxesR = mtcnn_model.detect_face(rR, threshould)
        copyL = rL.copy()
        copyR = rR.copy()
        # 判断是否同一平面，注意阈值单位为mm
        judge = utils.judge_coplanarity(boxesL, boxesR, Q, 18)
        if len(boxesL) != 0:
            utils.draw(copyL, boxesL)
        if len(boxesR) != 0:
            utils.draw(copyR, boxesR)
        utils.draw_(copyL, judge)
        link = np.concatenate([copyL, copyR], axis=1)
        cv2.imshow("w1", link)
        # cv2.imshow('w2', copyR)
        cv2.waitKey(1)


def file_match():
    cameraMatL, cameraMatR, map1L, map2L, map1R, map2R, Q, R, T = init_camera()
    lName = 'leftP1.png'
    rName = 'rightP1.png'
    imgL = cv2.imread('facedata/' + lName)
    imgR = cv2.imread('facedata/' + rName)
    rL = imgL.copy()
    rR = imgR.copy()
    cv2.remap(imgL, map1L, map2L, cv2.INTER_LINEAR, rL)
    cv2.remap(imgR, map1R, map2R, cv2.INTER_LINEAR, rR)
    disp, disp8, depth, xyz = utils.SGBM(rL, rR, imgSize, Q)
    cv2.imshow('disp_show', disp)
    cv2.waitKey(0)


if __name__ == '__main__':
    # file_detect()
    double_camera_detect()
    # file_match()
