import cv2
import numpy as np
import os

recognizer = cv2.face.LBPHFaceRecognizer_create()  # 识别器
#recognizer.read('/Users/Shared/Previously Relocated Items/Security/Work/code_mrk/shumeipai/opencv/data/kratos.yml')  # 加载训练集
recognizer.read('/home/pi/kratos/test/shumeipai/opencv/data/kratos.yml')  # 加载训练集
#cascadePath = "/Users/Shared/Previously Relocated Items/Security/Work/code_mrk/shumeipai/opencv/haarcascade/haarcascade_frontalface_default.xml"
cascadePath = "/home/pi/kratos/test/shumeipai/opencv/haarcascade/haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath);

last_btm_degree = 100  # 最近一次底部舵机的角度值记录
last_top_degree = 100  # 最近一次顶部舵机的角度值记录
btm_kp = 5  # 底部舵机的Kp系数
top_kp = 5  # 顶部舵机的Kp系数
offset_dead_block = 0.1  # 设置偏移量的死区
font = cv2.FONT_HERSHEY_SIMPLEX  # 字体

# iniciate id counter
id = 0

# names related to ids: example ==> Marcelo: id=1,  etc
names = ['None', 'kratos', 'keivn']

# Initialize and start realtime video capture
cam = cv2.VideoCapture(0)
cam.set(3, 320)  # set video widht
cam.set(4, 240)  # set video height

# Define min window size to be recognized as a face
minW = 0.1 * cam.get(3)
minH = 0.1 * cam.get(4)
def calculate_offset(img_width, img_height, face):
    '''
    计算人脸在画面中的偏移量
    偏移量的取值范围： [-1, 1]
    '''
    (x, y, w, h) = face
    face_x = float(x + w / 2.0)
    face_y = float(y + h / 2.0)
    # 人脸在画面中心X轴上的偏移量
    offset_x = float(face_x / img_width - 0.5) * 2
    # 人脸在画面中心Y轴上的偏移量
    offset_y = float(face_y / img_height - 0.5) * 2

    return (offset_x, offset_y)

def face_filter(faces):
    '''
    对人脸进行一个过滤
    '''
    if len(faces) == 0:
        return None

    # 目前找的是画面中面积最大的人脸
    max_face = max(faces, key=lambda face: face[2] * face[3])
    (x, y, w, h) = max_face
    if w < 10 or h < 10:
        return None
    return max_face
def btm_servo_control(offset_x):
    '''
    底部舵机的比例控制
    这里舵机使用开环控制
    '''
    global offset_dead_block  # 偏移量死区大小
    global btm_kp  # 控制舵机旋转的比例系数
    global last_btm_degree  # 上一次底部舵机的角度

    # 设置最小阈值
    if abs(offset_x) < offset_dead_block:
        offset_x = 0

    # offset范围在-50到50左右
    delta_degree = offset_x * btm_kp
    # 计算得到新的底部舵机角度
    next_btm_degree = last_btm_degree + delta_degree
    # 添加边界检测
    if next_btm_degree < 0:
        next_btm_degree = 0
    elif next_btm_degree > 180:
        next_btm_degree = 180

    return int(next_btm_degree)
def top_servo_control(offset_y):
    '''
    顶部舵机的比例控制
    这里舵机使用开环控制
    '''
    global offset_dead_block
    global top_kp  # 控制舵机旋转的比例系数
    global last_top_degree  # 上一次顶部舵机的角度

    # 如果偏移量小于阈值就不相应
    if abs(offset_y) < offset_dead_block:
        offset_y = 0

    # offset_y *= -1
    # offset范围在-50到50左右
    delta_degree = offset_y * top_kp
    # 新的顶部舵机角度
    next_top_degree = last_top_degree + delta_degree
    # 添加边界检测
    if next_top_degree < 0:
        next_top_degree = 0
    elif next_top_degree > 180:
        next_top_degree = 180

    return int(next_top_degree)

while True:
    ret, img = cam.read()  # 读取一帧图像
    # img = cv2.flip(img, -1) # Flip vertically
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  # 转化为灰度图

    faces = faceCascade.detectMultiScale(
        gray,
        scaleFactor=1.2,
        minNeighbors=5,
        minSize=(int(minW), int(minH)),
    )
    face = face_filter(faces)

    for (x, y, w, h) in faces:
        cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
        id, confidence = recognizer.predict(gray[y:y + h, x:x + w])
        (x, y, w, h) = face
        print("img h:{} w:{}".format(h, w))

        # Check if confidence is less them 100 ==> "0" is perfect match
        if (confidence < 100):
            id = names[id]
            confidence = "  {0}%".format(round(100 - confidence))
        else:
            id = "unknown"
            confidence = "  {0}%".format(round(100 - confidence))

        cv2.putText(img, str(id), (x + 5, y - 5), font, 1, (255, 255, 255), 2)
        cv2.putText(img, str(confidence), (x + 5, y + h - 5), font, 1, (255, 255, 0), 1)
        (offset_x, offset_y) = calculate_offset(w, h, face)
        print("X轴偏移量：{} Y轴偏移量：{}".format(offset_x, offset_y))
        next_btm_degree = btm_servo_control(offset_x)
        next_top_degree = top_servo_control(offset_y)
        print('底部角度： {} 顶部角度：{}'.format(next_btm_degree, next_top_degree))


    cv2.imshow('camera', img)

    k = cv2.waitKey(10) & 0xff  # Press 'ESC' for exiting video
    if k == 27:
        break

# Do a bit of cleanup
print("\n [INFO] Exiting Program and cleanup stuff")
cam.release()
cv2.destroyAllWindows()

