from PIL import Image, ImageDraw, ImageFont
from datetime import datetime, timedelta
from threading import Timer, Thread
from ultralytics import YOLO
import numpy as np
import argparse
import requests
import logging
import time
import json
import wget
import cv2
import sys
import os
import gc


def algorithm_heartbeat():
    parser = argparse.ArgumentParser()
    parser.add_argument('--data', required=True, help='json_format')
    args = parser.parse_args()
    json_args = json.loads(str(args.data).replace("'", '"'))

    docker_uuid = json_args['docker_uuid']
    license_information = json_args['license_information']

    headers_ah = {
        "Content-Type": "application/json; charset=UTF-8",
        "Referer": "http://www.wisdiot.com/",
        "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) "
                      "AppleWebKit/537.36 (HTML, like Gecko) "
                      "Chrome/67.0.3396.87 "
                      "Safari/537.36",
    }

    alarm_url = "http://172.17.0.1:5001/gateway_api/normal_update_time"

    data = {"docker_uuid": docker_uuid, "license_information": license_information}

    logging.debug("url:%s" % alarm_url)
    response_ah = requests.post(alarm_url,
                                data=json.dumps(data),
                                headers=headers_ah,
                                timeout=10).text
    logging.debug("response:%s" % response_ah)

    t = Timer(5, algorithm_heartbeat)
    t.start()


def inference(loop_num):
    parser = argparse.ArgumentParser()
    parser.add_argument('--data', required=True, help='json_format')
    args = parser.parse_args()
    json_args = json.loads(str(args.data).replace("'", '"'))

    device_number = json_args['device_number']
    channel_name = json_args['channel_name']
    ip_addr = json_args['ip_addr']
    rtsp_addr = json_args['rtsp_addr']
    task_name = json_args['task_name']
    mark_position = json_args['mark_position']
    model_path = json_args['load_model_path']
    model_name = json_args['load_model_name']
    model_type = json_args['load_model_type']
    model_url = json_args['load_model_url']
    user_set_detect_device_number = json_args['set_detect_device_number']
    user_set_detect_interval = json_args['set_detect_interval']
    user_set_detect_iou = json_args['set_detect_iou']
    user_set_detect_conf = json_args['set_detect_conf']
    user_set_alarm_type = json_args['set_alarm_type']
    user_set_alarm_object = json_args['set_alarm_object']
    user_set_alarm_interval = json_args['set_alarm_interval']
    user_set_roi_region = json_args['set_roi_region']
    user_set_show_version = bool(json_args['set_show_version'])
    user_set_show_info_mark = bool(json_args['set_show_info_mark'])
    user_set_show_roi_edge = bool(json_args['set_show_roi_line'])
    user_set_show_roi_label = bool(json_args['set_show_roi_label'])
    user_set_show_fill_roi = bool(json_args['set_show_fill_roi'])
    user_set_show_fill_non_roi = bool(json_args['set_show_fill_non_roi'])
    user_set_show_result_box = bool(json_args['set_show_result_box'])
    user_set_show_result_label = bool(json_args['set_show_result_label'])
    user_set_show_result_conf = bool(json_args['set_show_result_conf'])
    user_set_show_result_count = bool(json_args['set_show_result_count'])

    if os.path.exists(model_path + model_name):
        pass

    else:
        wget.download(url=model_url, out=model_path)

    inference_task = YOLO(model_path + model_name, task=model_type)

    cap = cv2.VideoCapture(rtsp_addr)

    camera_fps = cap.get(cv2.CAP_PROP_FPS)
    camera_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    camera_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)

    detect_interval = round((camera_fps / 1000) * round(eval(user_set_detect_interval)))

    if detect_interval == 0:
        detect_interval = 1

    user_set_roi_region_name_list = []
    user_set_roi_region_coordinate_list_part = []
    user_set_roi_region_coordinate_list_all = []

    for roi_part in user_set_roi_region:
        user_set_roi_region_name = roi_part.get("set_roi_region_name")

        if len(user_set_roi_region_name) == 0:
            user_set_roi_region_name_list.append("Inference Region")

        else:
            user_set_roi_region_name_list.append(user_set_roi_region_name)

        user_set_roi_region_coordinate_step_one = roi_part.get("set_roi_region_coordinate")

        if len(user_set_roi_region_coordinate_step_one) == 0:
            user_set_roi_region_coordinate_list_all = [
                [
                    (0, 0),
                    (0, camera_height),
                    (camera_width, camera_height),
                    (camera_width, 0)
                ]
            ]

        else:
            for user_set_roi_region_coordinate_step_two in user_set_roi_region_coordinate_step_one:
                user_set_roi_region_coordinate = tuple(round(eval(coordinate))
                                                       for coordinate in
                                                       user_set_roi_region_coordinate_step_two.values())
                user_set_roi_region_coordinate_list_part.append(user_set_roi_region_coordinate)

            user_set_roi_region_coordinate_list_all.append(user_set_roi_region_coordinate_list_part)
            user_set_roi_region_coordinate_list_part = []

    product_version = ("Core Version: 1.0.1" + "\n" +
                       "Wisdiot Experience Edition" + "\n" +
                       "仅供体验，严禁商用! 如需商用请联系作者。")

    info_mark = "Device No." + device_number + "\n" + channel_name + f"({task_name})"

    show_mark_position = None

    if mark_position == "top_left":
        show_mark_position = (int(camera_width * 0.025), int(camera_height * 0.025))

    elif mark_position == "bottom_left":
        show_mark_position = (int(camera_width * 0.025), int(camera_height - camera_height * 0.025))

    elif mark_position == "top_right":
        show_mark_position = (int(camera_width - camera_width * 0.3), int(camera_height * 0.025))

    elif mark_position == "bottom_right":
        show_mark_position = (int(camera_width - camera_width * 0.3), int(camera_height - camera_height * 0.025))

    user_set_detect_device_number_list = []
    detect_device_number = None

    for set_detect_device_number in user_set_detect_device_number:
        if set_detect_device_number != "cpu":
            user_set_detect_device_number_element = int(eval(set_detect_device_number))
            user_set_detect_device_number_list.append(user_set_detect_device_number_element)

        elif set_detect_device_number == "cpu":
            user_set_detect_device_number_element = str(set_detect_device_number)
            user_set_detect_device_number_list.append(user_set_detect_device_number_element)

        detect_device_number = ("/".join("{}".format(detect_device_number)
                                         for detect_device_number in user_set_detect_device_number_list))

    if user_set_detect_iou == '' \
            or eval(user_set_detect_iou) >= 1 \
            or eval(user_set_detect_iou) <= 0:

        user_set_detect_iou = 0.7

    if user_set_detect_conf == '' \
            or eval(user_set_detect_conf) >= 1 \
            or eval(user_set_detect_conf) <= 0:

        user_set_detect_conf = 0.25

    if user_set_show_result_label:
        if not user_set_show_result_box:
            user_set_show_result_box = True

    if user_set_show_result_conf:
        if not user_set_show_result_box or user_set_show_result_label:
            user_set_show_result_box = True
            user_set_show_result_label = True

    memory_time = datetime.now() - timedelta(seconds=30)

    i = 0
    points_list = []
    inference_activation = True
    frame_pool = []

    while True:
        if cap.isOpened():
            if len(user_set_roi_region_coordinate_list_all) != 0:
                for pts_exam in user_set_roi_region_coordinate_list_all:
                    if len(pts_exam) <= 2:
                        inference_activation = False

                    for pts_len in pts_exam:
                        if len(pts_len) != 2:
                            inference_activation = False

            else:
                logging.debug("Wrong roi region args! "
                              "The algorithm program has been terminated. "
                              "Please check your args and try again.")
                cap.release()
                sys.exit(1)

            while inference_activation:

                if i % detect_interval == 0:
                    status, cap_frame = cap.read()

                    if status:
                        frame_pool.append(cap_frame)

                        if len(frame_pool) >= 10:
                            del frame_pool[0:-2]
                            gc.collect()

                        if len(frame_pool) != 0:
                            if loop_num != 0:
                                loop_num = 0

                            ori_frame = frame_pool.pop(-1)

                            mask_roi = np.zeros(ori_frame.shape[:2], np.uint8)

                            for pts_element in user_set_roi_region_coordinate_list_all:
                                points = np.array(pts_element, np.int_)
                                points_list.append(points)

                            for points_use_first in points_list:
                                cv2.fillPoly(mask_roi, [points_use_first], (255, 255, 255))

                                if user_set_show_roi_edge:
                                    cv2.polylines(ori_frame, [points_use_first], True, (0, 230, 255), 2)

                            mask_non_roi = ~mask_roi
                            post_frame = cv2.bitwise_and(ori_frame, ori_frame, mask=mask_roi)

                            if user_set_show_fill_non_roi:
                                fill_region = ori_frame.copy()
                                point_full_list = []

                                for points_full in [
                                    [
                                        (0, 0),
                                        (0, camera_height),
                                        (camera_width, camera_height),
                                        (camera_width, 0)
                                    ]
                                ]:
                                    point_full = np.array(points_full, np.int_)
                                    point_full_list.append(point_full)

                                for point_full_plot in point_full_list:
                                    cv2.fillPoly(fill_region, [point_full_plot], (0, 130, 255))

                                fill_region_non_roi = cv2.bitwise_and(fill_region, fill_region, mask=mask_non_roi)
                                ori_frame = cv2.addWeighted(fill_region_non_roi, 0.2, ori_frame, 1, gamma=0)

                            if user_set_show_fill_roi:
                                fill_region_roi = post_frame.copy()

                                for points_use_fourth in points_list:
                                    cv2.fillPoly(fill_region_roi, [points_use_fourth], (180, 170, 0))

                                ori_frame = cv2.addWeighted(fill_region_roi, 0.2, ori_frame, 1, gamma=0)

                            if user_set_show_info_mark:
                                cv2_img = cv2.cvtColor(ori_frame, cv2.COLOR_BGR2RGB)
                                pil_img = Image.fromarray(cv2_img)
                                draw = ImageDraw.Draw(pil_img)

                                text_font = ImageFont.truetype(
                                    "/usr/share/fonts/truetype/msyh.ttf",
                                    30,
                                    encoding="utf-8")

                                draw.text(
                                    (show_mark_position[0], show_mark_position[1]),
                                    info_mark,
                                    (255, 100, 0),
                                    font=text_font
                                )

                                ori_frame = cv2.cvtColor(np.array(pil_img), cv2.COLOR_RGB2BGR)

                            if (user_set_show_version
                                    or user_set_show_version is False):
                                cv2_img = cv2.cvtColor(ori_frame, cv2.COLOR_BGR2RGB)
                                pil_img = Image.fromarray(cv2_img)
                                draw = ImageDraw.Draw(pil_img)

                                text_font = ImageFont.truetype(
                                    "/usr/share/fonts/truetype/msyh.ttf",
                                    25,
                                    encoding="utf-8")

                                draw.text(
                                    (int(camera_width - camera_width * 0.25),
                                     int(camera_height - camera_height * 0.09)),
                                    product_version,
                                    (0, 200, 255),
                                    font=text_font
                                )

                                ori_frame = cv2.cvtColor(np.array(pil_img), cv2.COLOR_RGB2BGR)

                            if user_set_show_roi_label:
                                pts_list_temporary = [p for p in user_set_roi_region_coordinate_list_all]
                                roi_names_list_temporary = [r for r in user_set_roi_region_name_list]

                                drawing_roi_label = True

                                while drawing_roi_label:
                                    if len(pts_list_temporary) or len(roi_names_list_temporary) != 0:
                                        pts_roi = pts_list_temporary.pop(-1)
                                        roi_name = roi_names_list_temporary.pop(-1)

                                        cv2_img = cv2.cvtColor(ori_frame, cv2.COLOR_BGR2RGB)
                                        pil_img = Image.fromarray(cv2_img)
                                        draw = ImageDraw.Draw(pil_img)

                                        text_font = ImageFont.truetype(
                                            "/usr/share/fonts/truetype/msyh.ttf",
                                            25,
                                            encoding="utf-8")

                                        draw.text(
                                            (pts_roi[0][0] + 20, pts_roi[0][1] + 30),
                                            roi_name,
                                            (255, 255, 0),
                                            font=text_font
                                        )

                                        ori_frame = cv2.cvtColor(np.array(pil_img), cv2.COLOR_RGB2BGR)

                                    elif len(pts_list_temporary) or len(roi_names_list_temporary) == 0:
                                        drawing_roi_label = False

                            results = inference_task(source=post_frame,
                                                     show=False,
                                                     boxes=True,
                                                     vid_stride=detect_interval,
                                                     conf=eval(user_set_detect_conf),
                                                     iou=eval(user_set_detect_iou),
                                                     device=detect_device_number
                                                     )

                            annotated_frame = results[0].plot(conf=user_set_show_result_conf,
                                                              labels=user_set_show_result_label,
                                                              boxes=user_set_show_result_box,
                                                              img=ori_frame,
                                                              line_width=2,
                                                              font_size=5
                                                              )

                            detect_result = results[0].boxes

                            detect_object_list = []
                            counter_position_y = int(camera_height * 0.2)

                            for d in reversed(detect_result):
                                cls, conf = d.cls.squeeze(), d.conf.squeeze()
                                c = int(cls)
                                name = f'id:{int(d.id.item())} {inference_task.names[c]}' \
                                    if d.id is not None \
                                    else inference_task.names[c]

                                detect_object_list.append(name)

                            detect_object_list_reset = list(set(detect_object_list))

                            if user_set_show_result_count:

                                cv2_img = cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB)
                                pil_img = Image.fromarray(cv2_img)
                                draw = ImageDraw.Draw(pil_img)

                                text_font = ImageFont.truetype(
                                    "/usr/share/fonts/truetype/msyh.ttf",
                                    30,
                                    encoding="utf-8")

                                draw.text(
                                    (int(camera_width - camera_width * 0.3), counter_position_y),
                                    "检测结果统计:",
                                    (170, 255, 180),
                                    font=text_font
                                )

                                annotated_frame = cv2.cvtColor(np.array(pil_img), cv2.COLOR_RGB2BGR)

                            for user_set_alarm_object_part in user_set_alarm_object:
                                if user_set_alarm_object_part not in detect_object_list_reset:
                                    if user_set_show_result_count:
                                        show_counter = f"{user_set_alarm_object_part}" + ": " + "0"

                                        cv2_img = cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB)
                                        pil_img = Image.fromarray(cv2_img)
                                        draw = ImageDraw.Draw(pil_img)

                                        text_font = ImageFont.truetype(
                                            "/usr/share/fonts/truetype/msyh.ttf",
                                            30,
                                            encoding="utf-8")

                                        draw.text(
                                            (int(camera_width - camera_width * 0.3), counter_position_y + 35),
                                            show_counter,
                                            (170, 255, 180),
                                            font=text_font
                                        )

                                        annotated_frame = cv2.cvtColor(np.array(pil_img), cv2.COLOR_RGB2BGR)

                                    continue

                                if user_set_alarm_object_part in detect_object_list_reset:
                                    if user_set_show_result_count:
                                        set_counter = detect_object_list.count(user_set_alarm_object_part)
                                        show_counter = f"{user_set_alarm_object_part}" + ": " + f"{set_counter}"

                                        counter_position_y += 35

                                        cv2_img = cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB)
                                        pil_img = Image.fromarray(cv2_img)
                                        draw = ImageDraw.Draw(pil_img)

                                        text_font = ImageFont.truetype(
                                            "/usr/share/fonts/truetype/msyh.ttf",
                                            30,
                                            encoding="utf-8")

                                        draw.text(
                                            (int(camera_width - camera_width * 0.3), counter_position_y),
                                            show_counter,
                                            (170, 255, 180),
                                            font=text_font
                                        )

                                        annotated_frame = cv2.cvtColor(np.array(pil_img), cv2.COLOR_RGB2BGR)

                                    current_time = datetime.now()
                                    time_interval = current_time - memory_time

                                    if time_interval.seconds >= round(eval(user_set_alarm_interval)):
                                        json_output = {}

                                        pic_time = str(time.strftime("%Y-%m-%d_%H-%M-%S",
                                                                     time.localtime(time.time())))
                                        json_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")

                                        temporary_pic_path = r'/mnt/wisdiot_ai/pic_temporary_save/'
                                        temporary_pic_name = pic_time
                                        temporary_pic_format = ".jpg"
                                        temporary_pic_file = "{0}{1}{2}".format(temporary_pic_path,
                                                                                temporary_pic_name,
                                                                                temporary_pic_format)

                                        cv2.imwrite(temporary_pic_file, annotated_frame)

                                        url_path = "/upload/images/"
                                        pic_url = ip_addr + url_path + temporary_pic_name + temporary_pic_format

                                        memory_time = datetime.now()

                                        json_output["device_number"] = device_number
                                        json_output["alarm_time"] = json_time
                                        json_output["alarm_type"] = user_set_alarm_type
                                        json_output["base64img"] = pic_url

                                        headers = {
                                            "Content-Type": "application/json; charset=UTF-8",
                                            "Referer": "http://www.wisdiot.com/",
                                            "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) "
                                                          "AppleWebKit/537.36 (HTML, like Gecko) "
                                                          "Chrome/67.0.3396.87 "
                                                          "Safari/537.36",
                                        }

                                        alarm_url = "http://172.17.0.1:5001/gateway_api/data/analysis/data_analysis"

                                        logging.debug("url:%s" % alarm_url)
                                        response = requests.post(alarm_url,
                                                                 data=json.dumps(json_output),
                                                                 headers=headers,
                                                                 timeout=10).text
                                        logging.debug("response:%s" % response)

                                    else:
                                        pass

                            if i > 30000:
                                i = 0

                            i += 1

                            if len(frame_pool) >= 10:
                                del frame_pool[0:-2]
                                gc.collect()

                            continue

                        else:
                            continue

                    else:
                        loop_num += 1

                        del frame_pool[:]
                        gc.collect()

                        time.sleep(6)

                        if loop_num >= 5:
                            cap.release()
                            time.sleep(6)
                            cap = cv2.VideoCapture(rtsp_addr)
                            time.sleep(10)

                else:
                    i += 1
                    time.sleep(1 / camera_fps)

                    if i > 30000:
                        i = 0

        else:
            loop_num += 1

            del frame_pool[:]
            gc.collect()

            cap.release()
            time.sleep(6)
            cap = cv2.VideoCapture(rtsp_addr)
            time.sleep(10)

        if loop_num > 100:
            logging.debug("The algorithm program has stopped due to uncontrollable reasons. "
                          "Please check for errors and restart manually.")
            cap.release()

            del frame_pool[:]
            gc.collect()

            sys.exit(1)


if __name__ == '__main__':
    t1 = Thread(target=inference, args=(0,), name="1")
    t2 = Thread(target=algorithm_heartbeat, name="2")

    t2.daemon = True

    t1.start()
    t2.start()
