"""
@description: Detect strangers, track strangers, and detect elderly person's facial expression (smile).
@author: ZDZ, GHY
@create: 2022/07/12
"""
import numpy as np
import cv2
import imutils
import shutil
import time
import os

from elderlyCare.facial import FaceUtils
from elderlyCare.utils import file_assistant
from elderlyCare.utils.oss_utils import oss_upload
from elderlyCare.utils.http_utils import http_post
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import img_to_array
from PIL import Image, ImageDraw, ImageFont
from threading import Thread

Emotion_List = ['Angry', 'Digust', 'Fear', 'Happy', 'Neural', 'Sad', 'Surprise']


class StrangerEmotionDetector:
    """
    Detect strangers, track strangers, and detect elderly person's facial expression (smile).
    """

    # constants
    FACIAL_EXPRESSION_TARGET_WIDTH = 28
    FACIAL_EXPRESSION_TARGET_HEIGHT = 28
    VIDEO_WIDTH = 640
    VIDEO_HEIGHT = 480
    ANGLE = 20

    LABEL_UNKNOWN = "Unknown"
    LABEL_ELDERLY_PERSON = "elderly_person"
    LABEL_EMPLOYEE = "employee"
    LABEL_VOLUNTEER = "volunteer"

    FACIAL_NEURAL = "Neural"
    FACIAL_SMILE = "Smile"

    DIRECTION_LEFT = "left"
    DIRECTION_RIGHT = "right"

    SPLIT_NUM = 6

    # global variables
    people_info_path = "D:\\Develop\\pycharmWorkspace\\Intelligent-Endowment\\config\\people_info.csv"
    facial_expression_info_path = "D:\\Develop\\pycharmWorkspace\\Intelligent-Endowment\\config\\facial_expression_info.csv"

    output_stranger_path = "D:\\Develop\\pycharmWorkspace\\Intelligent-Endowment\\generate\\supervision\\strangers"
    output_smile_path = "D:\\Develop\\pycharmWorkspace\\Intelligent-Endowment\\generate\\supervision\\smile"

    facial_recognition_model_path = "D:\\Develop\\pycharmWorkspace\\Intelligent-Endowment\\generate\\models\\face_recognition_hog.pickle"
    facial_expression_model_path = "D:\\Develop\\pycharmWorkspace\\Intelligent-Endowment\\generate\\models\\face_expression.hdf5"

    def __init__(self, url, input_video=False):
        """
        Initialize attributes.
        :param url: Url to call DAO operation on SpringBoot end
        :param input_video: Video to be processed, not necessary
        """
        self.url = url
        self.input_video = input_video
        self.camera = None

        # get current time
        current_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
        print("[INFO] %s strangers detection and facial expression detection program starts..." % current_time)
        print("[INFO] Start detecting strangers and facial expressions of the elderly...")

        # initialize camera
        if not input_video:
            self.camera = cv2.VideoCapture(0)
            time.sleep(2)
        else:
            self.camera = cv2.VideoCapture(input_video)

        # control the detection of strangers
        self.strangers_timing = 0
        self.strangers_start_time = 0
        self.strangers_limit_time = 2  # if >= 2 seconds, then he/she is a stranger
        self.strangers_resend_time = 60  # if >= 60 seconds, then resend message.
        self.stranger_exist = False

        # control facial expression analysis
        self.facial_expression_timing = 0
        self.facial_expression_start_time = 0
        self.facial_expression_limit_time = 2  # if >= 2 seconds, then he/she is smiling

        # get map of "id->name", "id->type", "camera_id->camera_name", "expression_id->expression_name"
        self.id_card_to_name, self.id_card_to_type = file_assistant.get_people_info(self.people_info_path)
        self.facial_expression_id_to_name = file_assistant.get_facial_expression_info(self.facial_expression_info_path)

        # ensure that the output paths are empty
        if os.path.exists(self.output_stranger_path):
            shutil.rmtree(self.output_stranger_path, True)
        os.mkdir(self.output_stranger_path)
        if os.path.exists(self.output_smile_path):
            shutil.rmtree(self.output_smile_path, True)
        os.mkdir(self.output_smile_path)

        # initialize face recognition model and face expression model
        self.face_util = FaceUtils(self.facial_recognition_model_path)
        self.emotion_classifier = load_model(self.facial_expression_model_path, compile=False)
        self.emotion_target_size = self.emotion_classifier.input_shape[1:3]
        self.emotion_window = []

    def get_frame(self):
        """
        Get a frame from the video capture.
        :return: A frame
        """
        # grab the current frame
        (grabbed, frame) = self.camera.read()

        if grabbed:
            # if we haven't reached the end of the video
            if not self.input_video:
                frame = cv2.flip(frame, 1)

            # resize the frame to a fixed shape
            # compress, for faster recognition
            frame = imutils.resize(frame, width=self.VIDEO_WIDTH, height=self.VIDEO_HEIGHT)

            # grayscale, for emotion analysis
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            face_location_list, names = self.face_util.get_face_location_and_name(frame)

            # Draw vertical lines on 1/4 and 3/4 of the image (horizontal ratio)
            one_fourth_image_center = (int(self.VIDEO_WIDTH / 4), int(self.VIDEO_HEIGHT / 4))
            three_fourth_image_center = (int(self.VIDEO_WIDTH / 4 * 3), int(self.VIDEO_HEIGHT / 4 * 3))
            cv2.line(img=frame,
                     pt1=(one_fourth_image_center[0], 0), pt2=(one_fourth_image_center[0], self.VIDEO_HEIGHT),
                     color=(0, 255, 255), thickness=1)
            cv2.line(img=frame,
                     pt1=(three_fourth_image_center[0], 0), pt2=(three_fourth_image_center[0], self.VIDEO_HEIGHT),
                     color=(0, 255, 255), thickness=1)

            # handle all faces detected
            for ((left, top, right, bottom), name) in zip(face_location_list, names):
                # mark out the face
                rectangle_color = (0, 0, 255)
                cur_type = self.id_card_to_type[name]
                if cur_type == self.LABEL_ELDERLY_PERSON:
                    rectangle_color = (0, 0, 128)
                elif cur_type == self.LABEL_EMPLOYEE:
                    rectangle_color = (255, 0, 0)
                elif cur_type == self.LABEL_VOLUNTEER:
                    rectangle_color = (0, 255, 0)
                else:
                    pass
                cv2.rectangle(img=frame,
                              pt1=(left, top,), pt2=(right, bottom),
                              color=rectangle_color, thickness=2)

                # detect strangers
                if self.LABEL_UNKNOWN in names:
                    if self.strangers_timing == 0:
                        # start timing
                        self.strangers_timing = 1
                        self.strangers_start_time = time.time()
                    else:
                        # already started timing
                        strangers_end_time = time.time()
                        diff = strangers_end_time - self.strangers_start_time
                        current_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
                        if diff < self.strangers_limit_time:
                            # if the stranger appeared shorter than limitation, then ignore it
                            print("[INFO] %s The stranger only appeared for %1.f seconds, ignore it..."
                                  % (current_time, diff))
                        else:
                            # if the stranger appeared longer than limitation, then track him
                            # and save the snapshot of the moment detected
                            event_desc = "Stranger detected"
                            event_location = "Room"
                            print("[EVENT] %s Stranger detected in the room..." % current_time)
                            # cv2.imwrite(os.path.join(self.output_stranger_path, "snapshot_stranger_%s.jpg"
                            #                          % (time.strftime("%Y%m%d_%H%M%S"))), frame)

                            if not self.stranger_exist:
                                image_suffix = "snapshot_no_stranger_%s.jpg" % (time.strftime('%Y%m%d_%H%M%S'))
                                event_image = os.path.join(self.output_stranger_path, image_suffix)
                                image_url = "https://zzay.oss-cn-shenzhen.aliyuncs.com/" \
                                            + self.output_stranger_path.split('\\')[self.SPLIT_NUM] + "/" \
                                            + image_suffix
                                thread = Thread(target=http_post,
                                                args=(self.url, 2, event_location, event_desc, image_url))
                                thread.start()
                                cv2.imwrite(event_image, frame)
                                self.stranger_exist = True
                            if self.stranger_exist and diff > self.strangers_resend_time:
                                image_suffix = "snapshot_stranger_%s.jpg" % (time.strftime('%Y%m%d_%H%M%S'))
                                event_image = os.path.join(self.output_stranger_path, image_suffix)
                                image_url = "https://zzay.oss-cn-shenzhen.aliyuncs.com/" \
                                            + self.output_stranger_path.split('\\')[self.SPLIT_NUM] + "/" \
                                            + image_suffix
                                thread = Thread(target=http_post,
                                                args=(self.url, 2, event_location, event_desc, image_url))
                                thread.start()
                                cv2.imwrite(event_image, frame)
                                self.strangers_start_time = time.time()

                            # start stranger tracking
                            unknown_face_center = (int((left + right) / 2), int((top + bottom) / 2))
                            cv2.circle(img=frame,
                                       center=(unknown_face_center[0], unknown_face_center[1]),
                                       radius=4,
                                       color=(0, 255, 0), thickness=-1)

                            # if the face locates too left or right, we might need to move the servo
                            direction = ""
                            if unknown_face_center[0] < one_fourth_image_center[0]:
                                direction = self.DIRECTION_RIGHT
                            elif unknown_face_center[0] > three_fourth_image_center[0]:
                                direction = self.DIRECTION_LEFT
                            if direction:
                                print("The camera needs to turn %s %d degrees" % (direction, self.ANGLE))
                else:
                    # no strangers
                    self.strangers_timing = 0
                    self.stranger_exist = False

                # detect emotion / facial expression (target: elderly person, not stranger)
                if name != self.LABEL_UNKNOWN and self.id_card_to_type[name] == self.LABEL_ELDERLY_PERSON:
                    roi = gray[top:bottom, left:right]
                    # roi = cv2.resize(roi, (self.FACIAL_EXPRESSION_TARGET_WIDTH,
                    #                        self.FACIAL_EXPRESSION_TARGET_HEIGHT))
                    roi = cv2.resize(roi, (48, 48))
                    roi = roi.astype("float") / 255.0
                    roi = img_to_array(roi)
                    roi = np.expand_dims(roi, axis=0)

                    # determine facial expression through the model
                    Emotion_id_list = self.emotion_classifier.predict(roi)[0]
                    Emotion_id_list = Emotion_id_list.tolist()
                    facial_expression_label = Emotion_List[Emotion_id_list.index(max(Emotion_id_list))]

                    if facial_expression_label == 'Happy':
                        if self.facial_expression_timing == 0:
                            # start timing
                            self.facial_expression_timing = 1
                            self.facial_expression_start_time = time.time()
                        else:
                            # already started timing
                            facial_expression_end_time = time.time()
                            diff = facial_expression_end_time - self.facial_expression_start_time
                            current_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
                            current_name = self.id_card_to_name[name]
                            if diff < self.facial_expression_limit_time:
                                # if smile for too short
                                print("[INFO] %s %s smiled for %.1f seconds in room, ignore it..." % (
                                    current_time, current_name, diff))
                            else:
                                # if smile for long
                                # save the snapshot of the moment detected
                                event_desc = "%s is smiling" % current_name
                                event_location = "room"
                                image_suffix = "snapshot_smile_%s.jpg" % (time.strftime("%Y%m%d_%H%M%S"))
                                event_image = os.path.join(self.output_smile_path, image_suffix)
                                image_url = "https://zzay.oss-cn-shenzhen.aliyuncs.com/" \
                                            + self.output_stranger_path.split('\\')[self.SPLIT_NUM] + "/" \
                                            + image_suffix
                                print("[EVENT] %s %s is smiling in the room" % (current_time, current_name))
                                cv2.imwrite(event_image, frame)
                                thread = Thread(target=http_post,
                                                args=(self.url, 0, event_location, event_desc, image_url, name))
                                thread.start()
                    else:
                        # everything is ok
                        self.facial_expression_timing = 0

                else:
                    # if it's a stranger, don't detect facial expression
                    facial_expression_label = ""

                # print facial expression and name
                img_PIL = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
                draw = ImageDraw.Draw(img_PIL)
                final_label = self.id_card_to_name[name] + ': ' + self.facial_expression_id_to_name[
                    facial_expression_label] \
                    if facial_expression_label else self.id_card_to_name[name]
                draw.text((left, top - 30), final_label,
                          font=ImageFont.truetype("resources/NotoSansCJK-Black.ttc", 40),
                          fill=(255, 0, 0))

                # convert back to OpenCV format
                frame = cv2.cvtColor(np.asarray(img_PIL), cv2.COLOR_RGB2BGR)

            # return stream
            ret, jpeg = cv2.imencode(".jpg", frame)
            return jpeg.tobytes()
        else:
            # if we are viewing a video and we did not grab a frame, then we have reached the end of the video
            return None

    def __del__(self):
        """
        Release the camera and close any open windows.
        Upload all generated images to OSS.
        """
        oss_upload(self.output_stranger_path)
        oss_upload(self.output_smile_path)
        self.camera.release()
        cv2.destroyAllWindows()
