"""
@description: Detection of interaction activity.
@author: GHY, ZDZ
@create: 2022/07/13
"""
import numpy as np
import cv2
import imutils
import shutil
import time
import os

from PIL import Image, ImageDraw, ImageFont
from threading import Thread
from scipy.spatial import distance as dist
from elderlyCare.facial import FaceUtils
from elderlyCare.utils import file_assistant
from elderlyCare.utils.http_utils import http_post


class InteractionDetector:
    """
    Detection of interaction activity.
    """

    # constants
    VIDEO_WIDTH = 640
    VIDEO_HEIGHT = 480
    ANGLE = 20

    FACE_ACTUAL_WIDTH = 20  # estimation value (cm)
    ACTUAL_DISTANCE_LIMIT = 100  # (cm)

    LABEL_ELDERLY_PERSON = "elderly_person"
    LABEL_EMPLOYEE = "employee"
    LABEL_VOLUNTEER = "volunteer"

    DIRECTION_LEFT = "left"
    DIRECTION_RIGHT = "right"

    MODEL_PATH = "D:\\Develop\\pycharmWorkspace\\Intelligent-Endowment\\generate\\models\\face_recognition_hog.pickle"
    PEOPLE_INFO_PATH = "D:\\Develop\\pycharmWorkspace\\Intelligent-Endowment\\config\\people_info.csv"

    OUTPUT_INTERACTION_PATH = "D:\\Develop\\pycharmWorkspace\\Intelligent-Endowment\\generate\\supervision\\interaction"

    SPLIT_NUM = 6

    def __init__(self, url, input_video=False):
        """
        Initialize attributes.
        :param url: Url to call DAO operation on SpringBoot end
        :param input_video: Video to be processed, not necessary
        """
        self.url = url
        self.camera = None
        self.input_video = input_video
        self.counter = 0

        # directory that stores the output images of falling detected
        if os.path.exists(self.OUTPUT_INTERACTION_PATH):
            shutil.rmtree(self.OUTPUT_INTERACTION_PATH, True)
        os.mkdir(self.OUTPUT_INTERACTION_PATH)

        # detection control
        self.activity_timing = 0
        self.activity_start_time = 0
        self.activity_resend_time = 60  # if >= 60 seconds, then resend message

        # get map of "id->name", "id->type"
        self.id_card_to_name, self.id_card_to_type = file_assistant.get_people_info(self.PEOPLE_INFO_PATH)

        # initialize camera
        if not input_video:
            self.camera = cv2.VideoCapture(0)
            time.sleep(2)
        else:
            self.camera = cv2.VideoCapture(input_video)

        # load model
        self.face_utils = FaceUtils(self.MODEL_PATH)

        # start detecing
        print("[INFO] Start detecting interactions between the elderly and volunteers...")

    def get_frame(self):
        """
        Get a frame from the video capture.
        :return: A frame
        """
        # initialize attributes
        self.counter += 1
        camera_turned = 0

        # grab the current frame
        (grabbed, frame) = self.camera.read()

        if grabbed:
            # if we haven't reached the end of the video
            if not self.input_video:
                frame = cv2.flip(frame, 1)

            # resize the frame to a fixed shape
            # compress, for faster recognition
            frame = imutils.resize(frame, width=self.VIDEO_WIDTH, height=self.VIDEO_HEIGHT)

            face_location_list, names = self.face_utils.get_face_location_and_name(frame)

            # Draw vertical lines on 1/6 and 5/6 of the image (horizontal ratio)
            one_sixth_image_center = (int(self.VIDEO_WIDTH / 6), int(self.VIDEO_HEIGHT / 6))
            five_sixth_image_center = (int(self.VIDEO_WIDTH / 6 * 5), int(self.VIDEO_HEIGHT / 6 * 5))
            cv2.line(img=frame,
                     pt1=(one_sixth_image_center[0], 0),
                     pt2=(one_sixth_image_center[0], self.VIDEO_HEIGHT),
                     color=(0, 255, 255), thickness=1)
            cv2.line(img=frame,
                     pt1=(five_sixth_image_center[0], 0),
                     pt2=(five_sixth_image_center[0], self.VIDEO_HEIGHT),
                     color=(0, 255, 255), thickness=1)

            people_type_list = list(set([self.id_card_to_type[i] for i in names]))
            volunteer_name_direction_dict = {}
            volunteer_centroids = []
            old_people_centroids = []
            old_people_name = []

            # handle all faces detected
            for ((left, top, right, bottom), name) in zip(face_location_list, names):
                # mark out the face
                rectangle_color = (0, 0, 255)
                person_type = self.id_card_to_type[name]
                if person_type == self.LABEL_ELDERLY_PERSON:
                    rectangle_color = (0, 0, 128)
                elif person_type == self.LABEL_EMPLOYEE:
                    rectangle_color = (255, 0, 0)
                elif person_type == self.LABEL_VOLUNTEER:
                    rectangle_color = (0, 255, 0)
                else:
                    pass
                cv2.rectangle(img=frame,
                              pt1=(left, top), pt2=(right, bottom),
                              color=rectangle_color, thickness=2)

                # if there's no volunteer, just continue
                if self.LABEL_VOLUNTEER not in people_type_list:
                    continue

                if person_type == self.LABEL_VOLUNTEER:
                    # if there's a volunteer
                    # get the volunteer's position
                    volunteer_face_center = (int((right + left) / 2), int((top + bottom) / 2))
                    volunteer_centroids.append(volunteer_face_center)
                    cv2.circle(frame,
                               (volunteer_face_center[0], volunteer_face_center[1]),
                               8, (255, 0, 0), -1)
                    # if the face locates too left or right, we might need to move the servo
                    adjust_direction = ""
                    if volunteer_face_center[0] < one_sixth_image_center[0]:
                        adjust_direction = self.DIRECTION_RIGHT
                    elif volunteer_face_center[0] > five_sixth_image_center[0]:
                        adjust_direction = self.DIRECTION_LEFT
                    volunteer_name_direction_dict[name] = adjust_direction
                elif person_type == self.LABEL_ELDERLY_PERSON:
                    # if there's an elderly person
                    old_people_face_center = (int((right + left) / 2), int((top + bottom) / 2))
                    old_people_centroids.append(old_people_face_center)
                    old_people_name.append(name)
                    cv2.circle(frame,
                               (old_people_face_center[0], old_people_face_center[1]),
                               4, (0, 255, 0), -1)
                else:
                    pass

                # print facial expression and name
                img_PIL = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
                draw = ImageDraw.Draw(img_PIL)
                final_label = self.id_card_to_name[name]
                draw.text((left, top - 30), final_label,
                          font=ImageFont.truetype("resources/NotoSansCJK-Black.ttc", 40),
                          fill=(255, 0, 0))

                # convert back to OpenCV format
                frame = cv2.cvtColor(np.asarray(img_PIL), cv2.COLOR_RGB2BGR)

            # track the volunteer if exists
            if self.LABEL_VOLUNTEER in people_type_list:
                volunteer_adjust_direction_list = list(volunteer_name_direction_dict.values())
                if "" in volunteer_adjust_direction_list:
                    # in visible range, do not need to adjust the camera
                    print("Volunteer-%d is in visible range, do not need to adjust the camera..." % self.counter)
                else:
                    adjust_direction = volunteer_adjust_direction_list[0]
                    camera_turned = 1
                    print("camera-%d needs to turn %s %d degrees..." % (self.counter, adjust_direction, self.ANGLE))

            # Draw a line between the volunteer and the elderly person
            if camera_turned == 0:
                for i in volunteer_centroids:
                    for j_index, j in enumerate(old_people_centroids):
                        pixel_distance = dist.euclidean(i, j)
                        face_pixel_width = sum([i[2] - i[0] for i in face_location_list]) / len(face_location_list)
                        pixel_per_metric = face_pixel_width / self.FACE_ACTUAL_WIDTH
                        actual_distance = pixel_distance / pixel_per_metric

                        if actual_distance < self.ACTUAL_DISTANCE_LIMIT:
                            # if the distance between the volunteer and the elderly person is smaller than limitation
                            # then we consider an interaction occurs
                            cv2.line(img=frame,
                                     pt1=(int(i[0]), int(i[1])), pt2=(int(j[0]), int(j[1])),
                                     color=(255, 0, 255), thickness=2)
                            label = "Distance: %d cm" % actual_distance
                            cv2.putText(frame, label, (frame.shape[1] - 150, 30),
                                        cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                        (0, 0, 255), 2)
                            # event log
                            current_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
                            event_desc = "Elderly person interacting with the volunteer"
                            event_location = "Desk in the room"
                            print("[EVENT] Elderly person interacting with the volunteer...")

                            if self.activity_timing == 0:
                                self.activity_timing = 1
                                self.activity_start_time = time.time()
                                image_suffix = "snapshot_no_interaction_%s.jpg" % (time.strftime('%Y%m%d_%H%M%S'))
                                event_image = os.path.join(self.OUTPUT_INTERACTION_PATH, image_suffix)
                                image_url = "https://zzay.oss-cn-shenzhen.aliyuncs.com/" \
                                            + self.OUTPUT_INTERACTION_PATH.split('\\')[self.SPLIT_NUM] + "/" \
                                            + image_suffix
                                thread = Thread(target=http_post,
                                                args=(self.url, 4, event_location, event_desc, image_url,
                                                      self.id_card_to_name[old_people_name[j_index]]))
                                thread.start()
                                cv2.imwrite(event_image, frame)
                            if self.activity_timing == 1 and time.time() - self.activity_start_time > 60:
                                self.activity_start_time = time.time()
                                image_suffix = "snapshot_interaction_%s.jpg" % (time.strftime('%Y%m%d_%H%M%S'))
                                event_image = os.path.join(self.OUTPUT_INTERACTION_PATH, image_suffix)
                                image_url = "https://zzay.oss-cn-shenzhen.aliyuncs.com/" \
                                            + self.OUTPUT_INTERACTION_PATH.split('\\')[self.SPLIT_NUM] + "/" \
                                            + image_suffix
                                thread = Thread(target=http_post,
                                                args=(self.url, 4, event_location, event_desc, image_url,
                                                      self.id_card_to_name[old_people_name[j_index]]))
                                thread.start()
                                cv2.imwrite(event_image, frame)
                        else:
                            if self.activity_timing == 1:
                                self.activity_timing = 0

            # return stream
            ret, jpeg = cv2.imencode('.jpg', frame)
            return jpeg.tobytes()
        else:
            return None

    def __del__(self):
        """
        Release the camera and destroy all windows.
        Upload all generated images to OSS.
        """
        # oss_upload(self.output_activity_path)
        self.camera.release()
        cv2.destroyAllWindows()
