"""
@description: Detection of falling.
@author: GHY, ZDZ
@create: 2022/07/13
"""
import numpy as np
import cv2
import imutils
import shutil
import torch
import os
import time

from elderlyCare.HumanFAT.Detection.Utils import ResizePadding
from elderlyCare.HumanFAT.DetectorLoader import TinyYOLOv3_onecls
from elderlyCare.HumanFAT.PoseEstimateLoader import SPPE_FastPose
from elderlyCare.HumanFAT.fn import draw_single
from elderlyCare.HumanFAT.Track.Tracker import Detection, Tracker
from elderlyCare.HumanFAT.ActionsEstLoader import TSSTG
from elderlyCare.utils.oss_utils import oss_upload
from elderlyCare.utils.http_utils import http_post
from threading import Thread


class FallDetector:
    """
    Detection of falling.
    """

    # constants
    # DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    DEVICE = "cpu"

    DETECTION_INPUT_SIZE = 256

    POSE_INPUT_SIZE = "224x160"
    POSE_BACKBONE = "resnet50"

    SHOW_DETECTED = False
    SHOW_SKELETON = True

    LABEL_FALL = "Fall Down"
    LABEL_LYING = "Lying Down"

    OUTPUT_FALL_IMAGE_PATH = "D:\\Develop\\pycharmWorkspace\\Intelligent-Endowment\\generate\\supervision\\fall"

    SPLIT_NUM = 6

    def __init__(self, url, input_video=False):
        """
        Initialize attributes.
        :param url: Url to call DAO operation on SpringBoot end
        :param input_video: Video to be processed, not necessary
        """
        self.url = url
        self.camera = None
        self.input_video = input_video

        # log for the start-up of the program
        current_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
        print("[INFO] %s Fall detection program starts..." % current_time)

        # directory that stores the output images of falling detected
        if os.path.exists(self.OUTPUT_FALL_IMAGE_PATH):
            shutil.rmtree(self.OUTPUT_FALL_IMAGE_PATH, True)
        os.mkdir(self.OUTPUT_FALL_IMAGE_PATH)

        # detection model
        self.detection_model = TinyYOLOv3_onecls(input_size=self.DETECTION_INPUT_SIZE,
                                                 device=self.DEVICE)

        # pose model
        self.inp_pose = self.POSE_INPUT_SIZE.split("x")
        self.inp_pose = (int(self.inp_pose[0]), int(self.inp_pose[1]))
        self.pose_model = SPPE_FastPose(backbone=self.POSE_BACKBONE,
                                        input_height=self.inp_pose[0],
                                        input_width=self.inp_pose[1],
                                        device=self.DEVICE)

        # tracker
        self.max_age = 30
        self.tracker = Tracker(max_age=self.max_age,
                               n_init=3)

        # actions estimation
        self.action_model = TSSTG()
        self.resize_fn = ResizePadding(height=self.DETECTION_INPUT_SIZE,
                                       width=self.DETECTION_INPUT_SIZE)

        # detection control
        self.fall_timing = 0
        self.fall_start_time = 0
        self.fall_limit_time = 1  # if >= 1 seconds, then he/she falls
        self.fall_resend_time = 60
        self.fall_exist = False

        # initialize camera
        if not input_video:
            self.camera = cv2.VideoCapture(0)
        else:
            self.camera = cv2.VideoCapture(input_video)

        self.fps_time = 0
        self.f = 0

    def preprocess(self, image):
        """
        Preprocess the CameraLoader.
        """
        image = self.resize_fn(image)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        return image

    def kpt2bbox(self, kpt, ex=20):
        """
        Get bbox that hold on all of the keypoints (x,y)
        :param kpt: Array of shape (N, 2)
        :param ex: (int) Expand bounding box
        """
        return np.array((kpt[:, 0].min() - ex, kpt[:, 1].min() - ex,
                         kpt[:, 0].max() + ex, kpt[:, 1].max() + ex))

    def get_frame(self):
        """
        Return a frame processed.
        :return: A frame processed
        """
        # grab the current frame
        (grabbed, image) = self.camera.read()
        frame = image.copy()
        if grabbed:
            # if we haven't reached the end of the video
            if not self.input_video:
                frame = cv2.flip(image, 1)

            # resize the frame to a fixed shape
            # compress, for faster recognition
            frame = imutils.resize(frame, width=224, height=160)
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

            # detect humans bbox in the frame with detection model
            detected = self.detection_model.detect(frame, need_resize=True, expand_bb=10)

            # predict each tracks bbox of current frame from previous frames information with Kalman filter
            self.tracker.predict()

            # Merge two sources of predicted bbox together
            for track in self.tracker.tracks:
                det = torch.tensor([track.to_tlbr().tolist() + [0.5, 1.0, 0.0]], dtype=torch.float32)
                detected = torch.cat([detected, det], dim=0) if detected is not None else det

            # List of detection objects for tracking
            detections = []
            if detected is not None:
                # detected = non_max_suppression(detected[None, :], 0.45, 0.2)[0]
                # predict skeleton pose of each bboxs
                poses = self.pose_model.predict(frame, detected[:, 0:4], detected[:, 4])

                # create detection object
                detections = [Detection(tlbr=self.kpt2bbox(ps["keypoints"].numpy()),
                                        keypoints=np.concatenate((ps["keypoints"].numpy(), ps["kp_score"].numpy()),
                                                                 axis=1),
                                        confidence=ps["kp_score"].mean().numpy()) for ps in poses]

                # visualize the detection object if "SHOW_DETECTED" is set to True
                if self.SHOW_DETECTED:
                    for bb in detected[:, 0:5]:
                        frame = cv2.rectangle(frame, (bb[0], bb[1]), (bb[2], bb[3]), (0, 0, 255), 1)

            # update tracks by matching each track information of current and previous frame
            # or create a new track if no matched
            self.tracker.update(detections)

            # predict actions of each track
            for i, track in enumerate(self.tracker.tracks):
                if not track.is_confirmed():
                    continue

                track_id = track.track_id
                bbox = track.to_tlbr().astype(int)
                center = track.get_center().astype(int)

                action = "pending..."
                clr = (0, 255, 0)

                # Use 30 frames time-steps to prediction
                if len(track.keypoints_list) == 30:
                    pts = np.array(track.keypoints_list, dtype=np.float32)
                    out = self.action_model.predict(pts, frame.shape[:2])
                    action_name = self.action_model.class_names[out[0].argmax()]
                    action = "{}: {:.2f}%".format(action_name, out[0].max() * 100)
                    if action_name == self.LABEL_FALL:
                        clr = (255, 0, 0)
                        if self.fall_timing == 0:
                            # start timing
                            self.fall_timing = 1
                            self.fall_start_time = time.time()
                        else:
                            # already started timing
                            fall_end_time = time.time()
                            diff = fall_end_time - self.fall_start_time
                            current_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
                            if diff < self.fall_limit_time:
                                print("[INFO] %s Detected fall in corridor for only %.1f seconds, ignore it..."
                                      % (current_time, diff))
                            else:
                                # if detect falling actions
                                event_desc = "Fall action detected..."
                                event_location = "Corridor"
                                print("[EVENT] %s Detected fall in corridor for %.1f seconds" % (current_time, diff))

                                # insert into database
                                if not self.fall_exist:
                                    image_suffix = "snapshot_not_fall_%s.jpg" % (time.strftime("%Y%m%d_%H%M%S"))
                                    event_image = os.path.join(self.OUTPUT_FALL_IMAGE_PATH, image_suffix)
                                    image_url = "https://zzay.oss-cn-shenzhen.aliyuncs.com/" \
                                                + self.OUTPUT_FALL_IMAGE_PATH.split('\\')[self.SPLIT_NUM] + "/" \
                                                + image_suffix
                                    thread = Thread(target=http_post,
                                                    args=(self.url, 3, event_location, event_desc, image_url))
                                    thread.start()
                                    cv2.imwrite(event_image, image)
                                    self.fall_exist = True
                                if self.fall_exist and diff > self.fall_resend_time:
                                    image_suffix = "snapshot_fall_%s.jpg" % (time.strftime("%Y%m%d_%H%M%S"))
                                    event_image = os.path.join(self.OUTPUT_FALL_IMAGE_PATH, image_suffix)
                                    image_url = "https://zzay.oss-cn-shenzhen.aliyuncs.com/" \
                                                + self.OUTPUT_FALL_IMAGE_PATH.split('\\')[self.SPLIT_NUM] + "/" \
                                                + image_suffix
                                    thread = Thread(target=http_post,
                                                    args=(self.url, 3, event_location, event_desc, image_url))
                                    thread.start()
                                    cv2.imwrite(event_image, image)
                                    self.fall_start_time = time.time()

                    elif action_name == self.LABEL_LYING:
                        # mark lying people
                        clr = (255, 200, 0)

                # draw out the skeleton if "SHOW_SKELETON" is set to True
                if track.time_since_update == 0:
                    if self.SHOW_SKELETON:
                        frame = draw_single(frame, track.keypoints_list[-1])
                    frame = cv2.rectangle(img=frame,
                                          pt1=(bbox[0], bbox[1]), pt2=(bbox[2], bbox[3]),
                                          color=(0, 255, 0), thickness=1)
                    frame = cv2.putText(frame, str(track_id),
                                        (center[0], center[1]),
                                        cv2.FONT_HERSHEY_COMPLEX, 0.4,
                                        (255, 0, 0), 2)
                    frame = cv2.putText(frame, action,
                                        (bbox[0] + 5, bbox[1] + 15),
                                        cv2.FONT_HERSHEY_COMPLEX, 0.4,
                                        clr, 1)

            # Show Frame
            frame = cv2.resize(frame, (0, 0), fx=2., fy=2.)
            frame = cv2.putText(frame, "%d, FPS: %f" % (self.f, 1.0 / (time.time() - self.fps_time)),
                                (10, 20), cv2.FONT_HERSHEY_SIMPLEX,
                                0.5, (0, 255, 0), 1)
            frame = frame[:, :, ::-1]
            self.fps_time = time.time()

            # return stream
            ret, jpeg = cv2.imencode(".jpg", frame)
            return jpeg.tobytes()
        else:
            return None

    def __del__(self):
        """
        Release the camera and destroy all windows.
        Upload all generated images to OSS.
        :return:
        """
        oss_upload(self.OUTPUT_FALL_IMAGE_PATH)
        self.camera.release()
        cv2.destroyAllWindows()
