import cv2
import numpy as np


# from adafruit_servokit import ServoKit


def find_eye(img, gray, face):
    """
    在人脸范围内寻找眼睛
    :param img: 原始图像
    :param gray: 灰度图
    :param face: 脸部区域
    :return:
    """
    (x, y, w, h) = face
    # 将范围限制在人脸区域
    face_gray = gray[y:y + h, x:x + w]
    face_range = img[y:y + h, x:x + w]
    # 查找眼睛
    eyes = eye_cascade.detectMultiScale(face_gray)
    for (ex, ey, ew, eh) in eyes:
        # 绘制眼睛
        cv2.rectangle(face_range, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)


def add_alpha_channel(img):
    """
    为图像添加alpha通道
    :param img: 原始图像
    :return:
    """
    b_channel, g_channel, r_channel = cv2.split(img)  # 剥离jpg图像通道
    alpha_channel = np.ones(b_channel.shape, dtype=b_channel.dtype) * 255  # 创建Alpha通道

    img_new = cv2.merge((b_channel, g_channel, r_channel, alpha_channel))  # 融合通道
    return img_new


def merge_img(base_img, mask_img, mask_range):
    """
    将png透明图像与jpg图像叠加
    :param base_img: 基准图像
    :param mask_img: 添加的遮罩图片
    :param mask_range: 遮罩添加的范围
    :return:
    """
    x1, x2, y1, y2 = mask_range

    # 判断jpg图像是否已经为4通道
    if base_img.shape[2] == 3:
        base_img = add_alpha_channel(base_img)

    '''
    当叠加图像时，可能因为叠加位置设置不当，导致遮罩图像的边界超过背景图像，而程序报错
    这里设定一系列叠加位置的限制，可以满足遮罩图像超出背景图像范围时，依然可以正常叠加
    '''
    yy1 = 0
    yy2 = mask_img.shape[0]
    xx1 = 0
    xx2 = mask_img.shape[1]

    if x1 < 0:
        xx1 = -x1
        x1 = 0
    if y1 < 0:
        yy1 = - y1
        y1 = 0
    if x2 > base_img.shape[1]:
        xx2 = mask_img.shape[1] - (x2 - base_img.shape[1])
        x2 = base_img.shape[1]
    if y2 > base_img.shape[0]:
        yy2 = mask_img.shape[0] - (y2 - base_img.shape[0])
        y2 = base_img.shape[0]

    # 获取要覆盖图像的alpha值，将像素值除以255，使值保持在0-1之间
    alpha_png = mask_img[yy1:yy2, xx1:xx2, 3] / 255.0
    alpha_jpg = 1 - alpha_png

    # 开始叠加
    for c in range(0, 3):
        base_img[y1:y2, x1:x2, c] = ((alpha_jpg * base_img[y1:y2, x1:x2, c]) + (alpha_png * mask_img[yy1:yy2, xx1:xx2, c]))

    return base_img


def add_mask(img, face, mask):
    """
    添加遮罩，这里添加耳朵吧
    :param img: 原始图片
    :param face: 脸部范围
    :param mask_file: 遮罩图片
    :return:
    """
    (x, y, w, h) = face
    # 缩放图片大小
    mask_c = mask.copy()
    mask_c = cv2.resize(mask_c, (w, h))
    # 确定遮罩范围
    x1 = x
    y1 = y - mask_c.shape[0]
    x2 = x1 + mask_c.shape[1]
    y2 = y
    mask_range = (x1, x2, y1, y2)
    # 合并图片
    img = merge_img(img, mask_c, mask_range)
    return img


def get_offset(face, image_shape):
    """
    计算偏移量
    :return:
    """
    global pan
    global tilt
    (x, y, w, h) = face
    image_height, image_width, _ = image_shape
    x_cent = x + w / 2
    y_cent = y + h / 2
    errorPan = x_cent - image_width / 2
    errorTilt = y_cent - image_height / 2

    print(errorPan, errorTilt)
    # 计算舵机角度
    if abs(errorPan) > 15:
        pan = pan - errorPan / 50
    if abs(errorTilt) > 15:
        tilt = tilt - errorTilt / 50
    if pan > 180:
        pan = 180
        print("Pan out of Range")
    if pan < 0:
        pan = 0
        print("pan Out of Range")
    if tilt > 180:
        tilt = 180
        print("Pan out of Range")
    if tilt < 0:
        tilt = 0
        print("pan Out of Range")


if __name__ == "__main__":
    face_cascade = cv2.CascadeClassifier("./resource/haarcascade_frontalface_default.xml")
    eye_cascade = cv2.CascadeClassifier('./resource/haarcascade_eye.xml')
    mask = cv2.imread(r'./resource/ear.png', cv2.IMREAD_UNCHANGED)
    # kit = ServoKit(channels=16)
    pan = 90
    tilt = 90

    cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
    while True:
        # 读取帧
        success, img = cap.read()
        # 转为灰度图
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        # 光照不均匀会影响效果，这里可以还得更多的考虑光照预处理问题
        # 检测人脸
        faces = face_cascade.detectMultiScale(gray, 1.3, 5)
        for face in faces:
            (x, y, w, h) = face
            # 绘制人脸区域
            img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
            # 绘制眼睛
            find_eye(img, gray, face)
            # 添加耳朵
            img = add_mask(img, face, mask)
            # 计算偏移值
            get_offset(face, img.shape)
            # 发送驱动信号
            print(180 - pan, tilt)
            # kit.servo[0].angle = 180 - pan
            # kit.servo[1].angle = tilt

        # 显示识别窗口
        cv2.imshow("Face Tracking", img)
        if cv2.waitKey(1) & 0xff == ord('q'):
            break
