import time
import os
import cv2
import numpy as np
import mediapipe as mp
from tensorflow.keras.models import load_model
from cvzone.SelfiSegmentationModule import SelfiSegmentation

# initialize mediapipe
mpHands = mp.solutions.hands
hands = mpHands.Hands(max_num_hands=1, min_detection_confidence=0.7)
mpDraw = mp.solutions.drawing_utils

# Load the gesture recognizer model
model = load_model('./mp_hand_gesture')

# Load class names
f = open('gesture.names', 'r')
classNames = f.read().split('\n')
f.close()
print(classNames)

photo_num = 1
# Initialize the webcam
cap = cv2.VideoCapture(photo_num)
wi = 1920
hi = 1080
cap.set(3, wi)
cap.set(4, hi)

segmentor = SelfiSegmentation()
imgBG = cv2.imread("background/background.jpg")
imgBG = cv2.resize(imgBG, (wi, hi), cv2.IMREAD_UNCHANGED)

threshold = 0.2


def photo():
    cap = cv2.VideoCapture(photo_num)
    cap.set(3, wi)
    cap.set(4, hi)
    start = time.time()
    while True:
        end = time.time() - start
        # Read and display each frame
        ret, img = cap.read()
        img = cv2.flip(img, 1)
        imgOut = segmentor.removeBG(img, imgBG, threshold=threshold)
        img = imgOut
        img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
        # cv2.imshow('take photo', img)
        k = cv2.waitKey(125)
        # Specify the countdown
        j = 30
        # set the key for the countdown to begin
        if 1 == 1 and end < 4:

            while j >= 10:
                ret, img = cap.read()
                img = cv2.flip(img, 1)
                imgOut = segmentor.removeBG(img, imgBG, threshold=threshold)
                img = imgOut
                # Display the countdown after 10 frames so that it is easily visible otherwise,
                # it will be fast. You can set it to anything or remove this condition and put
                # countdown on each frame
                text_location = (250, 250)
                if j % 10 == 0:
                    # specify the font and draw the countdown using puttext
                    font = cv2.FONT_HERSHEY_COMPLEX
                    id = str(j // 10)
                    imgBGs = cv2.imread(f"background/background_{id}.jpg")
                    imgBGs = cv2.resize(imgBGs, (wi, hi), cv2.IMREAD_UNCHANGED)
                    img = segmentor.removeBG(img, imgBGs, threshold=threshold)
                    time.sleep(0.2)
                    # cv2.putText(img, str(j // 10), text_location, font, 10, (0, 255, 255), 20, cv2.LINE_AA)
                    # img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
                img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
                cv2.imshow('take photo', img)
                cv2.waitKey(125)
                j = j - 1
            else:
                ret, img = cap.read()
                img = cv2.flip(img, 1)
                imgOut = segmentor.removeBG(img, imgBG, threshold=threshold)
                img = imgOut
                # Display the clicked frame for 1 sec.
                # You can increase time in waitKey also
                img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
                cv2.imshow('take photo', img)
                cv2.waitKey(1000)
                # Save the frame
                flag = str(time.time())
                cv2.imwrite(f'../output_result/photo_{flag}.jpg', img)
                # os.system(
                #     f"lpr -P Mi_Wireless_Photo_Printer_1S__7990_ /Users/eric/PycharmProjects/Innovation_day/output_result/photo_{flag}.jpg")
                time.sleep(1)
        # Press Esc to exit

        else:
            cap.release()
            cv2.destroyAllWindows()
            return


while True:
    # Read each frame from the webcam
    _, frame = cap.read()
    imgOut = segmentor.removeBG(frame, imgBG, threshold=threshold)
    x, y, c = frame.shape

    # Flip the frame vertically
    frame = cv2.flip(frame, 1)
    framergb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

    # Get hand landmark prediction
    result = hands.process(framergb)

    # print(result)

    className = ''

    # post process the result
    if result.multi_hand_landmarks:
        landmarks = []
        for handslms in result.multi_hand_landmarks:
            for lm in handslms.landmark:
                # print(id, lm)
                lmx = int(lm.x * x)
                lmy = int(lm.y * y)

                landmarks.append([lmx, lmy])

            # Drawing landmarks on frames
            mpDraw.draw_landmarks(frame, handslms, mpHands.HAND_CONNECTIONS)

            # Predict gesture
            prediction = model.predict([landmarks])
            # print(prediction)
            classID = np.argmax(prediction)
            className = classNames[classID]

    frame = segmentor.removeBG(frame, imgBG, threshold=threshold)
    # show the prediction on the frame
    if className == 'cut' or className == 'okay':
        photo()
    # Show the final output
    frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
    cv2.imshow("Output", frame)

    if cv2.waitKey(1) == ord('q'):
        break

# release the webcam and destroy all active windows
cap.release()

cv2.destroyAllWindows()
