import numpy as np


def get_scales(h, w):
    # 如果图像的短边大于500，则将短边固定为500
    # 如果图像的长边小于500，则将长边固定为500
    scales = []
    # t记录当前图像是原图的尺寸的多少倍,rate为图像金字塔每次缩放比例
    t = 1.0
    rate = 0.709
    if min(h, w) > 500:
        t = 500 / min(h, w)
        h = int(t * h)
        w = int(t * w)
    elif max(h, w) < 500:
        t = 500 / max(h, w)
        h = int(t * h)
        w = int(t * w)
    minn = min(h, w)
    while minn >= 12:
        scales.append(t)
        t *= rate
        minn *= rate
    return scales


# 输入每行对应x1 y1 x2 y2
def rect_square(rectangle):
    w = rectangle[:, 2] - rectangle[:, 0]
    h = rectangle[:, 3] - rectangle[:, 1]
    mx = np.maximum(w, h)
    rectangle[:, 0] = rectangle[:, 0] + w / 2 - mx / 2
    rectangle[:, 1] = rectangle[:, 1] + h / 2 - mx / 2
    rectangle[:, 2] = rectangle[:, 2] - w / 2 + mx / 2
    rectangle[:, 3] = rectangle[:, 3] - h / 2 + mx / 2
    return rectangle


# 每次与剩余得分最高的IOU小于等于threshould的留下作为候选框
def NMS(boxes, threshold):
    x1 = boxes[:, 0]
    y1 = boxes[:, 1]
    x2 = boxes[:, 2]
    y2 = boxes[:, 3]
    scores = boxes[:, 4]
    # 降序排序索引
    index = scores.argsort()[::-1]
    s = (x2 - x1 + 1) * (y2 - y1 + 1)
    ansIndex = []
    while len(index) > 0:
        i = index[0]
        ansIndex.append(i)
        X1 = np.maximum(x1[i], x1[index[1:]])
        Y1 = np.maximum(y1[i], y1[index[1:]])
        X2 = np.minimum(x2[i], x2[index[1:]])
        Y2 = np.minimum(y2[i], y2[index[1:]])
        # 若小于0则不相交。
        inter = np.maximum(0.0, X2 - X1 + 1) * np.maximum(0.0, Y2 - Y1 + 1)
        iou = inter / (s[i] + s[index[1:]] - inter)
        tmp = np.where(iou <= threshold)[0] + 1
        # 注意这里排除首项已取要+1
        index = index[np.where(iou <= threshold)[0] + 1]
    return boxes[ansIndex].tolist()


# 在该函数中利用pnet的输出、网络的映射关系、缩放的比例还原出原图的框位置
def process_pnet_12out(pnetOut, scale, threshold, originH, originW):
    # 分类结果，人脸的得分在1上，取出所有行列。
    stride = 2
    claOut = pnetOut[0][0][:, :, 1]
    offset = pnetOut[1][0]
    # 取出符合条件框坐标，注意y对应高
    (y, x) = np.where(claOut >= threshold)
    # 取出得分
    score = np.expand_dims(claOut[y, x], -1)
    # 卷积映射关系得到输入图先验框
    topLeft = np.concatenate([np.expand_dims(stride * x, -1), np.expand_dims(stride * y, -1)], axis=-1)
    bottomRight = topLeft + 11
    # 通过偏移量得到原图先验框注意scale要除回去
    ansboxes = (np.concatenate([topLeft, bottomRight], axis=-1) + 12 * offset[y, x]) / scale
    # 将框纠正成严格的正方形，具体方法为选取长边中心并以长边长度更新短边对应坐标。
    ansboxes = rect_square(ansboxes)
    ansboxes[:, [0, 2]] = np.clip(ansboxes[:, [0, 2]], 0, originW)
    ansboxes[:, [1, 3]] = np.clip(ansboxes[:, [1, 3]], 0, originH)
    ansboxes = np.concatenate([ansboxes, score], -1)
    return ansboxes


def process_rnet_24out(claOut, offset, boxes, threshould, originH, originW):
    # 注意这里的技巧，过程与pnet类似
    pick = claOut[:, 1] >= threshould
    ansBoxes = boxes[pick, 0:4]
    offset = offset[pick]
    # 这样就直接是二维的
    score = claOut[pick, 1:2]
    # 利用偏移量进一步精确框
    w = np.expand_dims(ansBoxes[:, 2] - ansBoxes[:, 0], -1)
    h = np.expand_dims(ansBoxes[:, 3] - ansBoxes[:, 1], -1)
    ansBoxes[:, [0, 2]] = ansBoxes[:, [0, 2]] + offset[:, [0, 2]] * w
    ansBoxes[:, [1, 3]] = ansBoxes[:, [1, 3]] + offset[:, [1, 3]] * h

    # 转化为正方形并将框与得分合并
    ansBoxes = rect_square(ansBoxes)
    ansBoxes = np.concatenate([ansBoxes, score], axis=-1)

    # 偏移后可能不在范围内了。
    ansBoxes[:, [0, 2]] = np.clip(ansBoxes[:, [0, 2]], 0, originW)
    ansBoxes[:, [1, 3]] = np.clip(ansBoxes[:, [1, 3]], 0, originH)
    return np.array(NMS(ansBoxes, 0.7))


# 对onet的输出进行处理
def process_onet_48out(claOut, offset, landmarkRegress, boxes, threshould, originH, originW):
    # 截取得分满足的人脸
    pick = claOut[:, 1] >= threshould
    score = claOut[pick, 1]
    score = np.expand_dims(score, -1)
    offset = offset[pick]
    landmarkRegress = landmarkRegress[pick]
    ansBoxes = boxes[pick, 0:4]

    # 框回归同rnet 人脸回归以输入框的topleft为基准进行偏移。操作与框回归差不多。
    ansLandmark = np.zeros_like(landmarkRegress)
    w = np.expand_dims(ansBoxes[:, 2] - ansBoxes[:, 0], -1)
    h = np.expand_dims(ansBoxes[:, 3] - ansBoxes[:, 1], -1)
    # 0:1是为了转换成二维矩阵 注意人脸回归参数分别对应x1 x2 x3 .....而不是x1 y1 ...... 且框回归参数是一一对应的。
    ansLandmark[:, [0, 2, 4, 6, 8]] = w * landmarkRegress[:, [0, 1, 2, 3, 4]] + ansBoxes[:, 0:1]
    ansLandmark[:, [1, 3, 5, 7, 9]] = h * landmarkRegress[:, [5, 6, 7, 8, 9]] + ansBoxes[:, 1:2]
    ansBoxes[:, [0, 2]] = ansBoxes[:, [0, 2]] + offset[:, [0, 2]] * w
    ansBoxes[:, [1, 3]] = ansBoxes[:, [1, 3]] + offset[:, [1, 3]] * h

    # 偏移后可能不在范围内了。
    ansBoxes[:, [0, 2]] = np.clip(ansBoxes[:, [0, 2]], 0, originW)
    ansBoxes[:, [1, 3]] = np.clip(ansBoxes[:, [1, 3]], 0, originH)
    ansLandmark[:, [0, 2, 4, 6, 8]] = np.clip(ansLandmark[:, [0, 2, 4, 6, 8]], 0, originW)
    ansLandmark[:, [1, 3, 5, 7, 9]] = np.clip(ansLandmark[:, [1, 3, 5, 7, 9]], 0, originH)

    # 转化为正方形并将框与得分合并
    ansBoxes = rect_square(ansBoxes)
    ansBoxes = np.concatenate([ansBoxes, ansLandmark, score], axis=-1)
    return np.array(NMS(ansBoxes, 0.7))


import cv2


# 该函数用来画人脸框和5个关键点
def draw(img, boxes, color=(165, 155, 70)):
    # 选得分最高的框
    pos = np.argmax(boxes[:, 14])
    cv2.rectangle(img, (int(boxes[pos, 0]), int(boxes[pos, 1])), (int(boxes[pos, 2]), int(boxes[pos, 3])), color,
                  10, shift=0)
    for i in range(5):
        cv2.circle(img, (int(boxes[pos, 4 + 2 * i]), int(boxes[pos, 5 + 2 * i])), 8, color, -1)


# 该函数用来put_text：关键点是否在同一平面 judge=-1 0 1，分别表示见str。
def draw_(img, judge):
    h, w, _ = img.shape
    # 没有提取到关键点
    str = ['Coplanar', 'Non coplanar', 'missing landmark']
    cv2.putText(img, str[judge], (int(h / 8), int(w / 8)), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=2,
                color=(0, 255, 0), thickness=2, lineType=cv2.LINE_AA)


# SGBM双目匹配， b为基线长度，f为视差。
def SGBM(rL, rR, imgSize, Q):
    sgbmWinSize = 9
    uRatio = 10
    cn = rL.shape[2]
    numDisparities = ((int(imgSize[0] / 12) + 15)) & -16
    # 设置sgbm的参数
    sgbm = cv2.StereoSGBM_create(
        preFilterCap=63,  # 计算x方向的梯度，并截断[-pre,pre]中的值与后面的SAD代价合并作为代价
        blockSize=sgbmWinSize,  # sad代价窗口
        P1=8 * cn * sgbmWinSize * sgbmWinSize,  # 代价聚合中的平滑项， 见笔记
        P2=32 * cn * sgbmWinSize * sgbmWinSize,
        minDisparity=0,
        numDisparities=numDisparities,
        uniquenessRatio=uRatio,  # 低于当前视差代价的(1-uRatio)%时 把该视差作为该点视差
        speckleWindowSize=50,
        speckleRange=32,
        disp12MaxDiff=1,
        mode=cv2.STEREO_SGBM_MODE_SGBM
    )
    # 这里得到视差图后要除以16，因为计算的时候按16倍进行了缩放。
    disp = sgbm.compute(rL, rR).astype(np.float32) / 16.0
    # disp9是用来显示的视差图
    disp8 = disp * 255 / numDisparities
    # 计算深度图。这里的单位没换所以是mm
    depth = Q[2][3] / (Q[3][2] * disp + Q[3][3])
    # 利用重投影矩阵Q计算真实坐标
    xyz = cv2.reprojectImageTo3D(disp, Q)
    return disp, disp8, depth, xyz


# 利用关键点和重投影矩阵判断是否共面，方法为重投影后z最大和最小之差是否大于阈值，若大于则不共面，反之。
def judge_coplanarity(boxesL, boxesR, Q, threshold):
    if len(boxesL) == 0 or len(boxesR) == 0:
        return -1
    pos = 0
    z = Q[2][3] / (Q[3][2] * (boxesL[pos, [4, 6, 8, 10, 12]] - boxesR[pos, [4, 6, 8, 10, 12]]) + Q[3][3])
    diff = np.max(z) - np.min(z)
    print(diff, '\n')
    if diff > threshold:
        return 1
    return 0
