"""
@description: Detection of region invasion.
@author: GHY, ZDZ
@create: 2022/07/14
"""
import numpy as np
import cv2
import time
import os

from elderlyCare.utils.http_utils import http_post
from elderlyCare.utils.oss_utils import oss_upload
from threading import Thread


class RegionInvasionDetector:
    """
    Detector class of region invasion.
    """

    # constants
    WARN_IMAGE_PATH = "D:\\Develop\\pycharmWorkspace\\Intelligent-Endowment\\elderlyCare\\YOLOv3\\files\\warning.png"

    LABELS_FILE_PATH = "D:\\Develop\\pycharmWorkspace\\Intelligent-Endowment\\elderlyCare\\YOLOv3\\yolo-coco"
    LABELS_FILE_NAME = "coco.names"

    WEIGHT_FILE_PATH = "D:\\Develop\\pycharmWorkspace\\Intelligent-Endowment\\elderlyCare\\YOLOv3\\yolo-coco\\weights"
    WEIGHT_FILE_NAME = "yolov3.weights"

    CONFIG_FILE_PATH = "D:\\Develop\\pycharmWorkspace\\Intelligent-Endowment\\elderlyCare\\YOLOv3\\yolo-coco\\cfg"
    CONFIG_FILE_NAME = "yolov3.cfg"

    OUTPUT_REGION_INVASION_PATH = "D:\\Develop\\pycharmWorkspace\\Intelligent-Endowment\\generate\\supervision\\region-invasion"

    SPLIT_NUM = 6

    def __init__(self, url, input_video=False):
        """
        Initialize attributes.
        :param url: Url to call DAO operation on SpringBoot end
        :param input_video: Video to be processed, not necessary
        """
        self.url = url
        self.input_video = input_video
        self.camera = None

        self.h_NN = 192
        self.w_NN = 192

        self.h_show = 416 * 2
        self.w_show = 256 * 2

        self.thr_param = 0.3  # threshold when applying non-maxima suppression
        self.conf_param = 0.5  # minimum probability to filter weak detections

        self.warn_image = cv2.imread(self.WARN_IMAGE_PATH)
        self.H, self.W = self.warn_image.shape[:2]
        self.warn_image = cv2.resize(self.warn_image, (self.h_show // 2, self.w_show // 2))

        # load the COCO class labels our YOLO model was trained on
        self.labelsPath = os.path.sep.join([self.LABELS_FILE_PATH, self.LABELS_FILE_NAME])
        self.LABELS = open(self.labelsPath).read().strip().split("\n")

        # initialize a list of colors to represent each possible class label
        np.random.seed(42)
        self.COLORS = np.random.randint(0, 255,
                                        size=(len(self.LABELS), 3),
                                        dtype="uint8")

        # derive the paths to the YOLO weights and model configuration
        self.weightsPath = os.path.sep.join([self.WEIGHT_FILE_PATH, self.WEIGHT_FILE_NAME])
        self.configPath = os.path.sep.join([self.CONFIG_FILE_PATH, self.CONFIG_FILE_NAME])

        # load our YOLO object detector trained on COCO dataset (80 classes)
        # and determine only the *output* layer names that we need from YOLO
        print("[INFO] Loading YOLO from disk...")
        self.net = cv2.dnn.readNetFromDarknet(self.configPath, self.weightsPath)
        self.ln = self.net.getLayerNames()
        self.ln = [self.ln[i - 1] for i in self.net.getUnconnectedOutLayers()]

        self.fence_timing = 0
        self.fence_start_time = 0
        self.fence_limit_time = 2
        self.id = 0

        # initialize the video stream, pointer to output video file, and
        # frame dimensions
        if not input_video:
            print("[INFO] Starting video stream...")
            self.camera = cv2.VideoCapture(0)
            time.sleep(2)
        else:
            print("[INFO] Opening video file...")
            self.camera = cv2.VideoCapture(input_video)
        self.W, self.H = (None, None)

        self.counter = 0
        self.skip_frame = 20

    def get_frame(self):
        time_start = time.time()
        # read the next frame from the file
        (grabbed, frame) = self.camera.read()
        self.counter += 1
        # if the frame was not grabbed, then we have reached the end
        # of the stream
        if grabbed:
            if not self.input_video:
                frame = cv2.flip(frame, 1)
            # frame = imutils.resize(frame, width=96)

            # # if the frame dimensions are empty, grab them
            if self.W is None or self.H is None:
                self.H, self.W = frame.shape[:2]

            # construct a blob from the input frame and then perform a forward
            # pass of the YOLO object detector, giving us our bounding boxes
            # and associated probabilities

            blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (self.h_NN, self.w_NN),
                                         swapRB=True, crop=False)
            self.net.setInput(blob)
            start = time.time()
            layerOutputs = self.net.forward(self.ln)

            # initialize our lists of detected bounding boxes, confidences,
            # and class IDs, respectively
            boxes = []
            confidences = []
            classIDs = []

            # loop over each of the layer outputs
            for output in layerOutputs:
                # loop over each of the detections
                for detection in output:
                    # extract the class ID and confidence (i.e., probability)
                    # of the current object detection
                    scores = detection[5:]
                    classID = np.argmax(scores)
                    confidence = scores[classID]

                    cv2.line(frame, (0, self.H // 2), (self.W, self.H // 2), (0, 255, 255), 2)

                    # filter out weak predictions by ensuring the detected
                    # probability is greater than the minimum probability
                    if confidence > self.conf_param and (classID == 0):
                        # scale the bounding box coordinates back relative to
                        # the size of the image, keeping in mind that YOLO
                        # actually returns the center (x, y)-coordinates of
                        # the bounding box followed by the boxes' width and
                        # height
                        box = detection[0:4] * np.array([self.W, self.H, self.W, self.H])
                        (centerX, centerY, width, height) = box.astype("int")

                        if centerY > self.H // 2:
                            self.fence_timing = 0
                            continue
                        else:
                            if self.fence_timing == 0:
                                self.fence_timing = 1
                                self.fence_start_time = time.time()
                            else:
                                fence_end_time = time.time()
                                difference = fence_end_time - self.fence_start_time

                                current_time = time.strftime('%Y-%m-%d %H:%M:%S',
                                                             time.localtime(time.time()))
                                if difference < self.fence_limit_time:
                                    print(
                                        '[INFO] {} - {} 有人出现{}秒，忽略'
                                            .format(current_time, "Yard", difference))
                                else:
                                    cv2.circle(frame, (centerX, centerY), 4, (0, 255, 0), -1)

                                    # use the center (x, y)-coordinates to derive the top
                                    # and and left corner of the bounding box
                                    x = int(centerX - (width / 2))
                                    y = int(centerY - (height / 2))

                                    # update our list of bounding box coordinates,
                                    # confidences, and class IDs
                                    boxes.append([x, y, int(width), int(height)])
                                    confidences.append(float(confidence))
                                    classIDs.append(classID)

            # apply non-maxima suppression to suppress weak, overlapping
            # bounding boxes
            idxs = cv2.dnn.NMSBoxes(boxes, confidences, self.conf_param,
                                    self.thr_param)

            # ensure at least one detection exists
            if len(idxs) > 0:
                # loop over the indexes we are keeping
                for i in idxs.flatten():
                    # extract the bounding box coordinates
                    (x, y) = (boxes[i][0], boxes[i][1])
                    (w, h) = (boxes[i][2], boxes[i][3])

                    # draw a bounding box rectangle and label on the frame
                    color = [int(c) for c in self.COLORS[classIDs[i]]]
                    cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
                    self.id += 1
                    text = "{}-{}: {:.4f}".format(self.LABELS[classIDs[i]], self.id,
                                                  confidences[i])
                    cv2.putText(frame, text, (x, y - 5),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)

                    if self.counter % self.skip_frame == 0:
                        current_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
                        event_desc = "Region invasion"
                        event_location = "Yard"
                        image_suffix = "snapshot_invasion_%s.jpg" % (time.strftime('%Y%m%d_%H%M%S'))
                        event_image = os.path.join(self.OUTPUT_REGION_INVASION_PATH, image_suffix)
                        image_url = "https://zzay.oss-cn-shenzhen.aliyuncs.com/" \
                                            + self.OUTPUT_REGION_INVASION_PATH.split('\\')[self.SPLIT_NUM] + "/" \
                                            + image_suffix
                        print("[EVENT] %s Someone trespasses into the prohibited area..." % current_time)
                        cv2.imwrite(event_image, frame)
                        thread = Thread(target=http_post,
                                        args=(self.url, 4, event_location, event_desc, image_url))
                        thread.start()

            # Display the resulting frame
            frame = cv2.resize(frame, (self.h_show, self.w_show))
            if len(idxs) > 0:
                frame[frame.shape[0] // 4: frame.shape[0] // 4 + self.warn_image.shape[0],
                frame.shape[1] // 4: frame.shape[1] // 4 + self.warn_image.shape[1], :] = np.where(self.warn_image > 70,
                                                                                                   frame[
                                                                                                   frame.shape[
                                                                                                       0] // 4:
                                                                                                   frame.shape[
                                                                                                       0] // 4 +
                                                                                                   self.warn_image.shape[
                                                                                                       0],
                                                                                                   frame.shape[
                                                                                                       1] // 4:
                                                                                                   frame.shape[
                                                                                                       1] // 4 +
                                                                                                   self.warn_image.shape[
                                                                                                       1], :],
                                                                                                   self.warn_image)
            time_end = time.time()
            cv2.putText(frame, "FPS : {}".format(1 / (time_end - time_start)),
                        (5, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
            cv2.putText(frame, "Count : {}".format(self.id),
                        (5, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)

            # return stream
            ret, jpeg = cv2.imencode('.jpg', frame)
            return jpeg.tobytes()

    def __del__(self):
        oss_upload(self.OUTPUT_REGION_INVASION_PATH)
        self.camera.release()
        cv2.destroyAllWindows()
