import cv2

facer = cv2.CascadeClassifier("./haarcascades/haarcascade_frontalface_default.xml")
eyer = cv2.CascadeClassifier('./haarcascades/haarcascade_eye.xml')
smiler = cv2.CascadeClassifier('./haarcascades/haarcascade_smile.xml')
cap = cv2.VideoCapture(0)
while cap.isOpened():
    ret, frame = cap.read()
    if ret:
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        faces = facer.detectMultiScale(gray, 1.1, 5)
        for (x, y, w, h) in faces:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
            ROI_img = frame[y:y + h, x:x + w]
            eyes = eyer.detectMultiScale(ROI_img, 1.1, 5)
            smiles = smiler.detectMultiScale(ROI_img, scaleFactor=1.2, minNeighbors=16, minSize=(20, 20),
                                             flags=cv2.CASCADE_SCALE_IMAGE)
            for (x1, y1, w1, h1) in eyes:
                cv2.rectangle(ROI_img, (x1, y1), (x1 + w1, y1 + h1), (0, 255, 0), 2)
            # for (x2, y2, w2, h2) in smiles:
            #     cv2.rectangle(ROI_img, (x2, y2), (x2 + w2, y2 + h2), (255, 0, 0), 2)
            frame[y:y + h, x:x + w] = ROI_img
        cv2.imshow('video', frame)

    key = cv2.waitKey(1)
    if key & 0xFF == ord('q'):
        break
cv2.destroyAllWindows()
