# Importing required packages
from keras.models import load_model
import numpy as np
# import dlib
import cv2
# from retinaface import RetinaFace
import math
from mtcnn import MTCNN
detector = MTCNN()
emotion_offsets = (20, 40)
emotions = {
    0: {
        "emotion": "Angry",
        "emoji": "emoji/pouting-face-apple.png"
    },
    1: {
        "emotion": "Disgust",
        "emoji": "emoji/nauseated-face-apple.png"
    },
    2: {
        "emotion": "Fear",
        "emoji": "emoji/fearful-face-apple.png"
    },
    3: {
        "emotion": "Happy",
        "emoji": "emoji/hugging-face-apple.png"
    },
    4: {
        "emotion": "Sad",
        "emoji": "emoji/frowning-face-apple.png"
    },
    5: {
        "emotion": "Suprise",
        "emoji": "emoji/exploding-head-apple.png"
    },
    6: {
        "emotion": "Neutral",
        "emoji": "emoji/thinking-face-apple.png"
    }
}


# - The code starts by checking if the image is empty.
# - If it's not, then it calculates the x and y coordinates of where to place the overlay on top of the image.
# - The code then checks if there are any overlays that need to be placed on top of this one.
# - If so, then it calculates how far up or down from the bottom-left corner they should go based on their shape (shape refers to size).
# - It also checks for overlap with other overlays in case there are multiple ones overlapping each other.

# - The next step is blending using alpha masking which means taking a pixel value from 0-1 and multiplying it by an opacity value between 0-1.
# - This will determine what color you see at that location when you look at your screen or print out a copy of this image later- The code will overlay the image `img` onto the image `img_overlay` at (x, y) and blend using an alpha mask.
# - The alpha mask must have same HxW as `img_overlay` and values in range [0, 1].

# - The purpose of this code is to overlay the image `img_overlay` onto the image `img` at (x, y) and blend using an alpha mask.
# - The alpha mask must have same HxW as `img_overlay` and values in range [0, 1].- The code first creates a new image with the same dimensions as the original.
# - It then determines where to crop the image by finding out which pixels are within the determined ranges of y1 and x1.
# - The code then calculates how much alpha needs to be applied based on those values, and applies it to each pixel in turn.
# - Finally, it blends that alpha value into the existing image using an overlay function.

def overlay_image_alpha(img, img_overlay, x, y, alpha_mask):
    """Overlay `img_overlay` onto `img` at (x, y) and blend using `alpha_mask`.

    `alpha_mask` must have same HxW as `img_overlay` and values in range [0, 1].
    """
    # Image ranges
    y1, y2 = max(0, y), min(img.shape[0], y + img_overlay.shape[0])
    x1, x2 = max(0, x), min(img.shape[1], x + img_overlay.shape[1])

    # Overlay ranges
    y1o, y2o = max(0, -y), min(img_overlay.shape[0], img.shape[0] - y)
    x1o, x2o = max(0, -x), min(img_overlay.shape[1], img.shape[1] - x)

    # Exit if nothing to do
    if y1 >= y2 or x1 >= x2 or y1o >= y2o or x1o >= x2o:
        return

    # Blend overlay within the determined ranges
    img_crop = img[y1:y2, x1:x2]
    img_overlay_crop = img_overlay[y1o:y2o, x1o:x2o]
    alpha = alpha_mask[y1o:y2o, x1o:x2o, np.newaxis]
    alpha_inv = 1.0 - alpha

    img_crop[:] = alpha * img_overlay_crop + alpha_inv * img_crop

# - The shapePoints function takes a shape object and returns coordinates for all points inside of it (in this case 68x2).
# - The rectPoints function takes a rectangle object and returns coordinates for all points inside of it (in this case 2x68).- The code is used to determine the coordinates of a shape's points.

def shapePoints(shape):
    coords = np.zeros((68, 2), dtype="int")
    for i in range(0, 68):
        coords[i] = (shape.part(i).x, shape.part(i).y)
    return coords

# - The code is used to create a list of points that are in an image.- The code starts by declaring a function called get_degree.
# - This function takes one parameter, k, which is the degree of rotation to be performed.
# - The angle in radians is calculated using math.atan and then converted into degrees with math.degrees().


def rectPoints(rect):
    x = rect.left()
    y = rect.top()
    w = rect.right() - x
    h = rect.bottom() - y
    return (x, y, w, h)

emotionModelPath = 'emotionModel.hdf5'  # fer2013_mini_XCEPTION.110-0.65
emotionClassifier = load_model(emotionModelPath, compile=False)
emotionTargetSize = emotionClassifier.input_shape[1:3]

def process_image(path, res_path):
# - It then creates a variable called path that will hold the file path to the image.
# - The code then reads in an image from this path and stores it as frame .
# - Next, it converts the color of frame into BGR2RGB so that we can use OpenCV's face detection algorithm on it.
# - After detecting faces, for each face found, there is a list of coordinates (x, y) and width and height (w, h).
# - Face dimensions are stored in box .
# - Then grayFrame is created with only those pixels where there was no face detected.
# - This is done because OpenCV does not detect faces when they overlap with other objects or when their size is too small to be detected by our algorithm.
    frame = cv2.imread(path)
    img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces = detector.detect_faces(img)
    for face in faces:
# - Next comes a loop which iterates over all of the faces found in frames .
# - For each one of these faces, we create another variable called grayFace , which contains just those pixels where there was no face detected before.
# - We resize this new grayscale image using cv2.resize() so that its size matches emotionTargetSize , which means that if you want your app to recognize emotions like happiness or sadness based on facial expressions rather than body language or voice tone alone
        [x, y, w, h] = face['box']
        grayFace = grayFrame[y:y + h, x:x + w]
        try:
            grayFace = cv2.resize(grayFace, (emotionTargetSize))
        except:
            continue

        grayFace = grayFace.astype('float32')
        grayFace = grayFace / 255.0
        grayFace = (grayFace - 0.5) * 2.0
        grayFace = np.expand_dims(grayFace, 0)
        grayFace = np.expand_dims(grayFace, -1)
        emotion_prediction = emotionClassifier.predict(grayFace)
        emotion_probability = np.max(emotion_prediction)
        if True or (emotion_probability > 0.1):
            emotion_label_arg = np.argmax(emotion_prediction)
            emoji_path = emotions[emotion_label_arg]['emoji']
            emoji = cv2.imread(emoji_path, cv2.IMREAD_UNCHANGED)
            emoji = cv2.resize(emoji, (h, h))
            alpha_mask = emoji[:, :, 3] / 255.0
            frame = frame[:, :, :3]
            img_overlay = emoji[:, :, :3]
            overlay_image_alpha(frame, img_overlay, x, y, alpha_mask)
        else:
            color = (255, 255, 255)
            cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
    cv2.imwrite(res_path, frame)
