import json
import threading
import multiprocessing
import time
import inspect
import ctypes
import imageio
from Server_Compute_test import *
from Server_Status import *
from fight import *
from keras.models import load_model
from keras.optimizers import SGD
import numpy as np
from numba import cuda
import copy, struct
from keras import backend as K
import tensorflow as tf
from func_timeout import func_timeout, FunctionTimedOut
import datetime, os, signal


def reset():
    command = os.popen(f'ps -p {os.getpid()} -o cmd').read()
    import subprocess; subprocess.Popen(command[4:].strip().split())
    os.kill(os.getpid(), signal.SIGTERM)


def scan():
    while True:
        try:
            import psutil
            mem = psutil.virtual_memory()
            total = float(mem.total) / 1024 / 1024 / 1024
            used = float(mem.used) / 1024 / 1024 / 1024

            from datetime import datetime
            now = datetime.now()
            if used / total > 0.9 or (now.hour == 0 and now.minute == 0):
                th = threading.Thread(target=reset)
                th.start()
            import time; time.sleep(3)
        except:
            break


# Send the Server's Status
def send_status(tcp_server):
    print('状态线程启动')
    while True:
        try:
            server_status = get_server_status()
            server_status = json.dumps(server_status)
            tcp_server.send(server_status.encode('utf-8'))        
            time.sleep(30)
        except Exception as e:
            print(str(e))
            print('状态线程停止')
            break


# Receive the Client's Task
def receive_task(tcp_server):
    global thread_status, thread_status_lock
    while True:
        try:
            size = struct.unpack('!I', tcp_server.recv(4))[0]
            msg_str = tcp_server.recv(size).decode('utf-8')
            # print(msg)
            msg = json.loads(msg_str)
            if msg['INS_Type'] == 'IN4_CV_TASK':
                print(msg)
                INS_Type = msg['INS_Type']
                Task_ID = msg['Task_ID']
                Stream_URL = msg['Stream_URL']
                Scenes_Names = msg['Scenes_Names']
                Scenes_Confs = msg['Scenes_Confs']
                Scenes_IOUs = msg['Scenes_IOUs']
                Monitoring_Periods = msg['Monitoring_Periods']
                Monitoring_Regions = msg['Monitoring_Regions']
                thread_status_lock.acquire()
                if Task_ID not in thread_status.keys():
                    threading_TRT_detect = threading.Thread(target=TRT_detect, args=(
                    tcp_server, INS_Type, Task_ID, Stream_URL, Scenes_Names, Scenes_Confs, Scenes_IOUs,
                    Monitoring_Periods, Monitoring_Regions,))
                    threading_TRT_detect.start()
                    thread_status[Task_ID] = 1
                    print(f'{datetime.datetime.now()}    任务{Task_ID}开启成功    {thread_status}')
                else:
                    print(f'{datetime.datetime.now()}    任务{Task_ID}开启失败    {thread_status}')
                thread_status_lock.release()
            elif msg['INS_Type'] == 'IN4_CV_CANCEL':
                print(msg)
                Task_ID = msg['Task_ID']
                thread_status_lock.acquire()
                if Task_ID in thread_status.keys():
                    thread_status[Task_ID] = 0
                thread_status_lock.release()
            elif msg['INS_Type'] == 'IN4_CV_CANCEL_ALL':
                print(msg)
                thread_status_lock.acquire()
                for Task_ID in thread_status:
                    thread_status[Task_ID] = 0
                thread_status_lock.release()
            else:
                pass
        except Exception as e:
            print(repr(e))
            print(msg_str)
            print('TCP连接中断...')
            '''
            time.sleep(3)
            tcp_server.close()
            tcp_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            tcp_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
            tcp_server.bind(('', 2333))
            tcp_server.connect((ip, 23333))
            '''
            while True:
                try:
                    server_tmp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                    server_tmp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1     )
 
                    server_tmp.bind(('', 2333))
                    ip ='202.38.81.185'
                    server_tmp.connect((ip, 23333))
                    break
                except Exception as e:
                    server_tmp.close()
                    print(e)
                    print('连接失败， 尝试重新连接')
                    time.sleep(3)
                    continue
            tcp_server = server_tmp          
            threading_IN4_CV_STATUS = threading.Thread(target=send_status, args=(tcp_server,))
            threading_IN4_CV_STATUS.start()
  
            print('————已重连————')
    
       
def in_period(Monitoring_Periods):
    timeNow = time.strftime("%Y-%m-%d %H:%M:%S")[-8:-3].split(':')
    hourNow, minuteNow = int(timeNow[0]), int(timeNow[1])

    for Monitoring_Period in Monitoring_Periods:
        timeSet = Monitoring_Period.split('-')
        timeBegin = timeSet[0].split(':')
        hourBegin, minuteBegin = int(timeBegin[0]), int(timeBegin[1])
        timeEnd = timeSet[1].split(':')
        hourEnd, minuteEnd = int(timeEnd[0]), int(timeEnd[1])

        if minuteBegin + hourBegin * 60 <= minuteEnd + hourEnd * 60:
            if minuteBegin + hourBegin * 60 <= minuteNow + hourNow * 60 <= minuteEnd + hourEnd * 60:
                return True
        else:
            if minuteNow + hourNow * 60 >= minuteBegin + hourBegin * 60 or minuteNow + hourNow * 60 <= minuteEnd + hourEnd * 60:
                return True
                
    return False


def send_alarm(sence, tcp_server, Stream_URL, Task_ID, clock):
    try:
        server_save = {'INS_Type': 'IN4_CV_ALARM', 'Stream_URL': Stream_URL, 'Task_ID': Task_ID,
                         'Scene': sence, 'Video_Path': f'/home/icetc/saves/video/{sence}/{clock}.mp4'}
        server_save = json.dumps(server_save)
        tcp_server.send(server_save.encode('utf-8'))
    except Exception as e:
        print(str(e))


def send_alarm_end(sence, tcp_server, Task_ID, Video_Path, Stream_URL):
    try:
        server_save = {'INS_Type': 'IN4_CV_ALARM_END', 'Task_ID': Task_ID, 'Scene': sence, 'Video_Path': Video_Path, 'Stream_URL': Stream_URL}
        server_save = json.dumps(server_save)
        tcp_server.send(server_save.encode('utf-8'))
    except Exception as e:
        print(str(e))


def predict_fight(frames, model, sess, seg_graph):
    global fight_results
    flows = getOpticalFlow(frames)

    result = np.zeros((len(flows), 224, 224, 5))
    result[..., :3] = frames
    result[..., 3:] = flows

    Input = load_data(result)
    Input = np.expand_dims(Input, 0)

    with sess.as_default():
        with seg_graph.as_default():
            prediction = model.predict(Input)
    prob = round(max(prediction[0]), 2)
    key = prediction.argmax()

    if len(fight_results) == 3:
        fight_results.pop(0)

    k2v = {0: 'Fight', 1: 'noFight'}
    fight_results.append(k2v[key])


def save_label(clock_person, classes_person, locations_person, confs_person, Class):
    dict_frames = {}
    for i, (class_person, location_person, conf_person) in enumerate(zip(classes_person, locations_person, confs_person)):
        dict_frame = {}
        dict_frame['class'] = class_person
        dict_frame['location'] = location_person
        dict_frame['conf'] = conf_person
        dict_frames[str(i)] = dict_frame

    filename = f'/home/icetc/saves/label_class/{Class}/{clock_person}.json'
    with open(filename, "w", encoding="utf-8") as f:
        json.dump(dict_frames, f, ensure_ascii=False, indent=4)
        
    # np.save(filename, np.array(classes_person))
    # loaded_arr = np.load('1.npy')


def TRT_detect(tcp_server, INS_Type, Task_ID, Stream_URL, Scenes_Names, Scenes_Confs, Scenes_IOUs, Monitoring_Periods, Monitoring_Regions):
    global thread_status, fight_results, model, sess, seg_graph, thread_status_lock

    interval = 5

    PLUGIN_LIBRARY = "libmyplugins.so"
    ctypes.CDLL(PLUGIN_LIBRARY)

    print(Scenes_Names)
    for i, Scenes_Name in enumerate(Scenes_Names):
        if Scenes_Name == 'fall':
            CONF_THRESH_fall = Scenes_Confs[i]
            IOU_THRESHOLD_fall = Scenes_IOUs[i]
            Monitoring_Period_fall = Monitoring_Periods[i]
            Monitoring_Regions_fall = Monitoring_Regions[i]

            categories_fall = []
            file = open('./classes/coco.txt', 'r')
            for category in file.readlines():
                categories_fall.append(category.strip())
            file.close()
            engine_file_path = "yolov5s.engine"
            yolov5_wrapper_coco = YoLov5TRT(engine_file_path)
        elif Scenes_Name == 'chair':
            CONF_THRESH_chair = Scenes_Confs[i]
            IOU_THRESHOLD_chair = Scenes_IOUs[i]
            Monitoring_Period_chair = Monitoring_Periods[i]
            Monitoring_Regions_chair = Monitoring_Regions[i]


            categories_chair = []
            file = open('./classes/coco.txt', 'r')
            for category in file.readlines():
                categories_chair.append(category.strip())
            engine_file_path = "yolov5s.engine"
            yolov5_wrapper_chair = YoLov5TRT(engine_file_path)

            print('chair文件创建完毕==================================')
        elif Scenes_Name == 'person' or Scenes_Name == 'climb' or Scenes_Name == 'intrude':
            CONF_THRESH_person = Scenes_Confs[i]
            IOU_THRESHOLD_person = Scenes_IOUs[i]
            Monitoring_Period_person = Monitoring_Periods[i]
            Monitoring_Regions_person = Monitoring_Regions[i]

            categories_person = []
            file = open('./classes/coco.txt', 'r')
            for category in file.readlines():
                categories_person.append(category.strip())
            file.close()
            engine_file_path = "yolov5s.engine"
            yolov5_wrapper_person = YoLov5TRT(engine_file_path)

        elif Scenes_Name == 'knife':
            CONF_THRESH_knife = Scenes_Confs[i]
            IOU_THRESHOLD_knife = Scenes_IOUs[i]
            Monitoring_Period_knife = Monitoring_Periods[i]
            Monitoring_Regions_knife = Monitoring_Regions[i]

            categories_knife = []
            file = open('./classes/knife.txt', 'r')
            for category in file.readlines():
                categories_knife.append(category.strip())
            file.close()
            engine_file_path = "knife.engine"
            yolov5_wrapper_knife = YoLov5TRT(engine_file_path)
        elif Scenes_Name == 'fire':
            CONF_THRESH_fire = Scenes_Confs[i]
            IOU_THRESHOLD_fire = Scenes_IOUs[i]
            Monitoring_Period_fire = Monitoring_Periods[i]
            Monitoring_Regions_fire = Monitoring_Regions[i]

            categories_fire = []
            file = open('./classes/fire.txt', 'r')
            for category in file.readlines():
                categories_fire.append(category.strip())
            file.close()
            engine_file_path = "fire.engine"
            yolov5_wrapper_fire = YoLov5TRT(engine_file_path)
        elif Scenes_Name == 'fight':
            Monitoring_Period_fight = Monitoring_Periods[i]
        elif Scenes_Name == 'gender' or Scenes_Name == 'man' or Scenes_Name == 'woman':
            CONF_THRESH_gender = Scenes_Confs[i]
            IOU_THRESHOLD_gender = Scenes_IOUs[i]
            Monitoring_Period_gender = Monitoring_Periods[i]
            Monitoring_Regions_gender = Monitoring_Regions[i]

            categories_gender = []
            file = open('./classes/gender.txt', 'r')
            for category in file.readlines():
                categories_gender.append(category.strip())
            file.close()
            engine_file_path = "gender.engine"
            yolov5_wrapper_gender = YoLov5TRT(engine_file_path)
        
    Frame_Num = 1
    second_before = 0
    try:
        cap = cv2.VideoCapture(Stream_URL)
        '''
        try:
            cap = func_timeout(5, cv2.VideoCapture, args=(Stream_URL,))
            # cap = cv2.VideoCapture(Stream_URL)
        except (FunctionTimedOut, Exception) as e:
            print(f'{datetime.datetime.now()}    任务{Task_ID}视频流{Stream_URL}打开超时')
            raise e
        '''
        fps = int(cap.get(5))
        if fps > 200 or fps < 5:
            fps = 30

        dirnames = ['fire', 'fall', 'knife', 'fight', 'person', 'climb', 'intrude', 'gender','chair']
        for dirname in dirnames: 
            if not os.path.exists(f'/home/icetc/saves/video/{dirname}'): 
                os.makedirs(f'/home/icetc/saves/video/{dirname}') 
            if not os.path.exists(f'/home/icetc/saves/videoOrigin/{dirname}'): 
                os.makedirs(f'/home/icetc/saves/videoOrigin/{dirname}') 
            if not os.path.exists(f'/home/icetc/saves/label_loc/{dirname}'): 
                os.makedirs(f'/home/icetc/saves/label_loc/{dirname}') 
            if not os.path.exists(f'/home/icetc/saves/label_class/{dirname}'): 
                os.makedirs(f'/home/icetc/saves/label_class/{dirname}')

        num_fire = 0
        right_fire = 0
        frames_fire = []
        save_fire = 0
        num_fire_true = 0
        Results_Locs_fire = []
        Results_Clses_fire = []
        alarm_fire = 0
        count_fire =0
        writer_fire = imageio.get_writer('test.mp4', fps=30)
        writer_fire_origin = imageio.get_writer('test.mp4', fps=30)

        num_fall = 0
        right_fall = 0
        frames_fall = []
        save_fall = 0
        num_fall_true = 0
        Results_Locs_fall = []
        Results_Clses_fall = []
        alarm_fall = 0
        count_fall = 0
        writer_fall = imageio.get_writer('test.mp4', fps=30)
        writer_fall_origin = imageio.get_writer('test.mp4', fps=30)

        num_person = 0
        right_person = 0
        frames_person = []
        frames_person_origin = []
        classes_person_old = []
        locations_person_old = []
        confs_person_old = []
        save_person = 0
        num_person_true = 0
        Results_Locs_person = []
        Results_Clses_person = []
        Results_Confs_person = []
        alarm_person = 0
        count_person = 0
        writer_person = imageio.get_writer('test.mp4', fps=30)
        writer_person_origin = imageio.get_writer('test.mp4', fps=30)
        classes_person = []
        locations_person = []
        confs_person = []

        num_chair = 0
        right_chair = 0
        frames_chair = []
        frames_chair_origin = []
        classes_chair_old = []
        locations_chair_old = []
        confs_chair_old = []
        save_chair = 0
        num_chair_true = 0
        Results_Locs_chair = []
        Results_Clses_chair = []
        Results_Confs_chair = []
        alarm_chair = 0
        count_chair = 0
        writer_chair = imageio.get_writer('test.mp4', fps=30)
        writer_chair_origin = imageio.get_writer('test.mp4', fps=30)
        classes_chair = []
        locations_chair = []
        confs_chair = []

        num_knife = 0
        right_knife = 0
        frames_knife = []
        save_knife = 0
        num_knife_true = 0
        Results_Locs_knife = []
        Results_Clses_knife = []
        alarm_knife = 0
        count_knife = 0
        writer_knife = imageio.get_writer('test.mp4', fps=30)
        writer_knife_origin = imageio.get_writer('test.mp4', fps=30)

        num_fight = 0
        right_fight = 0
        frames_fight = []
        save_fight = 0
        num_fight_true = 0
        Results_Locs_fight = []
        Results_Clses_fight = []
        count_fight = 0

        frames = []
        frames_save = []
        alarm_fight = 0
        writer_fight = imageio.get_writer('test.mp4', fps=30)
        writer_fight_origin = imageio.get_writer('test.mp4', fps=30)

        num_gender = 0
        right_gender = 0
        frames_gender = []
        save_gender = 0
        num_gender_true = 0
        Results_Locs_gender = []
        Results_Clses_gender = []
        alarm_gender = 0
        count_gender = 0
        writer_gender = imageio.get_writer('test.mp4', fps=30)
        writer_gender_origin = imageio.get_writer('test.mp4', fps=30)

        flag_fight = 0
        while True:
            if not thread_status[Task_ID]:
                break
            flag_fire = 0
            flag_fall = 0
            flag_person = 0
            flag_knife = 0
            flag_gender = 0
            flag_chair = 0
            ret, frame = cap.read()
            '''
            try:
                ret, frame = func_timeout(3, cap.read)
                # ret, frame = cap.read()
            except (FunctionTimedOut, Exception) as e:
                print(f'{datetime.datetime.now()}    任务{Task_ID}视频流{Stream_URL}获取超时')
                raise e
            '''
            frame_origin = copy.deepcopy(frame)
            if ret:
                num_fire += 1
                num_knife += 1
                num_fight += 1
                num_fall += 1
                num_person += 1
                num_gender += 1
                num_chair +=1
                start = time.time()
                Frame_Time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))

                Results_Locs = []
                Results_Clses = []
                Results_Confs = []

                if 'knife' in Scenes_Names and in_period(Monitoring_Period_knife) and num_knife % interval == 0:
                    frame, Results_Locs_knife, Results_Clses_knife, Results_Confs_knife, flag_fire, flag_knife, flag_fall, flag_fight, flag_person, flag_gender,flag_chair = yolov5_wrapper_knife.infer(frame, categories_knife, [0, 0, 255],
                                                                                         CONF_THRESH_knife,IOU_THRESHOLD_knife, Monitoring_Regions_knife, flag_fire, flag_knife, flag_fall, flag_fight, flag_person, flag_gender)

                    Results_Locs += Results_Locs_knife
                    Results_Clses += Results_Clses_knife
                    Results_Confs += Results_Confs_knife

                    if flag_knife:
                        if count_knife > fps * 60:
                            writer_knife.close()

                            send_alarm_end('knife', tcp_server, Task_ID, f'/home/icetc/saves/video/knife/{clock_knife}.mp4', Stream_URL)

                            save_knife = 0
                            num_knife_true = 0
                            alarm_knife = 0
                            count_knife = 0

                        num_knife_true += 1
                        frames_knife.append(frame)

                        if save_knife == 1:
                            count_knife += 1
                            writer_knife.append_data(imageio.core.util.Array(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)))

                        if len(frames_knife) > fps * 2:
                            for i in range(len(frames_knife) - fps * 2):
                                frames_knife.pop(0)

                        if num_knife_true >= (fps // interval) or save_knife == 1:
                            right_knife = num_knife + fps * 2
                            save_knife = 1
                            if num_knife_true == (fps // interval) and alarm_knife == 0:
                                alarm_knife = 1
                                clock_knife = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime(time.time()))
                                send_alarm('knife', tcp_server, Stream_URL, Task_ID, clock_knife)

                                writer_knife = imageio.get_writer(f'/home/icetc/saves/video/knife/{clock_knife}.mp4', fps=fps)
                                for frame_knife in frames_knife:
                                    count_knife += 1
                                    writer_knife.append_data(imageio.core.util.Array(cv2.cvtColor(frame_knife, cv2.COLOR_RGB2BGR)))
                    else:
                        if save_knife:
                            if num_knife > right_knife:  # 当前帧超出了边界
                                writer_knife.close()

                                send_alarm_end('knife', tcp_server, Task_ID, f'/home/icetc/saves/video/knife/{clock_knife}.mp4', Stream_URL)

                                save_knife = 0
                                num_knife_true = 0
                                alarm_knife = 0
                                count_knife = 0
                            else:
                                num_knife_true = 0
                                frames_knife.append(frame)

                                count_knife += 1
                                writer_knife.append_data(imageio.core.util.Array(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)))

                                if len(frames_knife) > fps * 2:
                                    for i in range(len(frames_knife) - fps * 2):
                                        frames_knife.pop(0)
                        else:
                            num_knife_true = 0
                            frames_knife.append(frame)

                            if len(frames_knife) > fps * 2:
                                for i in range(len(frames_knife) - fps * 2):
                                    frames_knife.pop(0)
                elif 'knife' in Scenes_Names and in_period(Monitoring_Period_knife) and num_knife % interval != 0:
                    for i, loc in enumerate(Results_Locs_knife):
                        tl = round(0.002 * (frame.shape[0] + frame.shape[1]) / 2) + 1
                        center_x, center_y, width, height = list(map(int, loc.split(',')))
                        c1, c2 = (center_x-width//2, center_y-height//2), (center_x+width//2, center_y+height//2)
                        cv2.rectangle(frame, c1, c2, [255, 0, 0], thickness=tl, lineType=cv2.LINE_AA)

                        tf = max(tl - 1, 1)
                        t_size = cv2.getTextSize(Results_Clses_knife[i], 0, fontScale=tl / 3, thickness=tf)[0]
                        c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
                        cv2.rectangle(frame, c1, c2, [255, 0, 0], -1, cv2.LINE_AA)  # filled
                        cv2.putText(
                            frame,
                            Results_Clses_knife[i],
                            (c1[0], c1[1] - 2),
                            0,
                            tl / 3,
                            [225, 255, 255],
                            thickness=tf,
                            lineType=cv2.LINE_AA,
                        )
                    frames_knife.append(frame)

                    if save_knife == 1:
                        count_knife += 1
                        writer_knife.append_data(imageio.core.util.Array(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)))

                    if len(frames_knife) > fps * 2:
                        for i in range(len(frames_knife) - fps * 2):
                            frames_knife.pop(0)

                if 'fire' in Scenes_Names and in_period(Monitoring_Period_fire) and num_fire % interval == 0:
                    frame, Results_Locs_fire, Results_Clses_fire, Results_Confs_fire, flag_fire, flag_knife, flag_fall, flag_fight, flag_person, flag_gender,flag_chair = yolov5_wrapper_fire.infer(frame, categories_fire, [0, 0, 255],
                                                                                         CONF_THRESH_fire, IOU_THRESHOLD_fire, Monitoring_Regions_fire, flag_fire, flag_knife, flag_fall, flag_fight, flag_person, flag_gender)

                    Results_Locs += Results_Locs_fire
                    Results_Clses += Results_Clses_fire
                    Results_Confs += Results_Confs_fire

                    if flag_fire:
                        if count_fire > fps * 60:
                            writer_fire.close()

                            send_alarm_end('fire', tcp_server, Task_ID, f'/home/icetc/saves/video/fire/{clock_fire}.mp4', Stream_URL)

                            save_fire = 0
                            num_fire_true = 0
                            alarm_fire = 0
                            count_fire = 0

                        num_fire_true += 1
                        frames_fire.append(frame)

                        if save_fire == 1:
                            count_fire += 1
                            writer_fire.append_data(imageio.core.util.Array(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)))

                        if len(frames_fire) > fps * 2:
                            for i in range(len(frames_fire) - fps * 2):
                                frames_fire.pop(0)

                        if num_fire_true >= (fps // interval) or save_fire == 1:
                            right_fire = num_fire + fps * 2
                            save_fire = 1
                            if num_fire_true == (fps // interval) and alarm_fire == 0:
                                alarm_fire = 1
                                clock_fire = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime(time.time()))
                                send_alarm('fire', tcp_server, Stream_URL, Task_ID, clock_fire)

                                writer_fire = imageio.get_writer(f'/home/icetc/saves/video/fire/{clock_fire}.mp4', fps=fps)
                                for frame_fire in frames_fire:
                                    count_fire += 1
                                    writer_fire.append_data(imageio.core.util.Array(cv2.cvtColor(frame_fire, cv2.COLOR_RGB2BGR)))
                    else:
                        if save_fire:
                            if num_fire > right_fire:  # 当前帧超出了边界
                                writer_fire.close()

                                send_alarm_end('fire', tcp_server, Task_ID, f'/home/icetc/saves/video/fire/{clock_fire}.mp4', Stream_URL)

                                save_fire = 0
                                num_fire_true = 0
                                alarm_fire = 0
                                count_fire = 0
                            else:
                                num_fire_true = 0
                                frames_fire.append(frame)

                                count_fire += 1
                                writer_fire.append_data(imageio.core.util.Array(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)))

                                if len(frames_fire) > fps * 2:
                                    for i in range(len(frames_fire) - fps * 2):
                                        frames_fire.pop(0)
                        else:
                            num_fire_true = 0
                            frames_fire.append(frame)

                            if len(frames_fire) > fps * 2:
                                for i in range(len(frames_fire) - fps * 2):
                                    frames_fire.pop(0)
                elif 'fire' in Scenes_Names and in_period(Monitoring_Period_fire) and num_fire % interval != 0:
                    for i, loc in enumerate(Results_Locs_fire):
                        tl = round(0.002 * (frame.shape[0] + frame.shape[1]) / 2) + 1
                        center_x, center_y, width, height = list(map(int, loc.split(',')))
                        c1, c2 = (center_x-width//2, center_y-height//2), (center_x+width//2, center_y+height//2)
                        cv2.rectangle(frame, c1, c2, [255, 0, 0], thickness=tl, lineType=cv2.LINE_AA)

                        tf = max(tl - 1, 1)
                        t_size = cv2.getTextSize(Results_Clses_fire[i], 0, fontScale=tl / 3, thickness=tf)[0]
                        c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
                        cv2.rectangle(frame, c1, c2, [255, 0, 0], -1, cv2.LINE_AA)  # filled
                        cv2.putText(
                            frame,
                            Results_Clses_fire[i],
                            (c1[0], c1[1] - 2),
                            0,
                            tl / 3,
                            [225, 255, 255],
                            thickness=tf,
                            lineType=cv2.LINE_AA,
                        )
                    frames_fire.append(frame)

                    if save_fire == 1:
                        count_fire += 1
                        writer_fire.append_data(imageio.core.util.Array(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)))

                    if len(frames_fire) > fps * 2:
                        for i in range(len(frames_fire) - fps * 2):
                            frames_fire.pop(0)

                if 'fall' in Scenes_Names and in_period(Monitoring_Period_fall) and num_fall % interval == 0:
                    frame, Results_Locs_fall, Results_Clses_fall, Results_Confs_fall, flag_fire, flag_knife,flag_fall, flag_fight, flag_person, flag_gender,flag_chair = yolov5_wrapper_coco.infer(frame, categories_fall, [255, 0, 0],
                                                                                         CONF_THRESH_fall,IOU_THRESHOLD_fall, Monitoring_Regions_fall, flag_fire, flag_knife, flag_fall, flag_fight, flag_person, flag_gender)
                    Results_Locs += Results_Locs_fall
                    Results_Clses += Results_Clses_fall
                    Results_Confs += Results_Confs_fall

                    if flag_fall:
                        if count_fall > fps * 60:
                            writer_fall.close()

                            send_alarm_end('fall', tcp_server, Task_ID, f'/home/icetc/saves/video/fall/{clock_fall}.mp4', Stream_URL)

                            save_fall = 0
                            num_fall_true = 0
                            alarm_fall = 0
                            count_fall = 0

                        num_fall_true += 1
                        frames_fall.append(frame)

                        if save_fall == 1:
                            count_fall += 1
                            writer_fall.append_data(imageio.core.util.Array(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)))

                        if len(frames_fall) > fps * 2:
                            for i in range(len(frames_fall) - fps * 2):
                                frames_fall.pop(0)

                        if num_fall_true >= (fps // interval) or save_fall == 1:
                            right_fall = num_fall + fps * 2
                            save_fall = 1
                            if num_fall_true == (fps // interval) and alarm_fall == 0:
                                alarm_fall = 1
                                clock_fall = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime(time.time()))
                                send_alarm('fall', tcp_server, Stream_URL, Task_ID, clock_fall)

                                writer_fall = imageio.get_writer(f'/home/icetc/saves/video/fall/{clock_fall}.mp4', fps=fps)
                                for frame_fall in frames_fall:
                                    count_fall += 1
                                    writer_fall.append_data(imageio.core.util.Array(cv2.cvtColor(frame_fall, cv2.COLOR_RGB2BGR)))
                    else:
                        if save_fall:
                            if num_fall > right_fall:  # 当前帧超出了边界
                                writer_fall.close()

                                send_alarm_end('fall', tcp_server, Task_ID, f'/home/icetc/saves/video/fall/{clock_fall}.mp4', Stream_URL)

                                save_fall = 0
                                num_fall_true = 0
                                alarm_fall = 0
                                count_fall = 0
                            else:
                                num_fall_true = 0
                                frames_fall.append(frame)

                                count_fall += 1
                                writer_fall.append_data(imageio.core.util.Array(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)))

                                if len(frames_fall) > fps * 2:
                                    for i in range(len(frames_fall) - fps * 2):
                                        frames_fall.pop(0)
                        else:
                            num_fall_true = 0
                            frames_fall.append(frame)

                            if len(frames_fall) > fps * 2:
                                for i in range(len(frames_fall) - fps * 2):
                                    frames_fall.pop(0)
                elif 'fall' in Scenes_Names and in_period(Monitoring_Period_fall) and num_fall % interval != 0:
                    for i, loc in enumerate(Results_Locs_fall):
                        tl = round(0.002 * (frame.shape[0] + frame.shape[1]) / 2) + 1
                        center_x, center_y, width, height = list(map(int, loc.split(',')))
                        c1, c2 = (center_x-width//2, center_y-height//2), (center_x+width//2, center_y+height//2)
                        cv2.rectangle(frame, c1, c2, [255, 0, 0], thickness=tl, lineType=cv2.LINE_AA)

                        tf = max(tl - 1, 1)
                        t_size = cv2.getTextSize(Results_Clses_fall[i], 0, fontScale=tl / 3, thickness=tf)[0]
                        c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
                        cv2.rectangle(frame, c1, c2, [255, 0, 0], -1, cv2.LINE_AA)  # filled
                        cv2.putText(
                            frame,
                            Results_Clses_fall[i],
                            (c1[0], c1[1] - 2),
                            0,
                            tl / 3,
                            [225, 255, 255],
                            thickness=tf,
                            lineType=cv2.LINE_AA,
                        )
                    frames_fall.append(frame)

                    if save_fall == 1:
                        count_fall += 1
                        writer_fall.append_data(imageio.core.util.Array(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)))

                    if len(frames_fall) > fps * 2:
                        for i in range(len(frames_fall) - fps * 2):
                            frames_fall.pop(0)

                if ('intrude' in Scenes_Names or 'climb' in Scenes_Names or 'person' in Scenes_Names) and in_period(Monitoring_Period_person) and num_person % interval == 0:
                    frame, Results_Locs_person, Results_Clses_person, Results_Confs_person, flag_fire,flag_knife, flag_fall, flag_fight, flag_person, flag_gender,flag_chair = yolov5_wrapper_person.infer(frame, categories_person, [255, 0, 0],
                                                                                         CONF_THRESH_person,
                                                                                         IOU_THRESHOLD_person, Monitoring_Regions_person, flag_fire, flag_knife, flag_fall, flag_fight, flag_person, flag_gender)
                    Results_Locs += Results_Locs_person
                    Results_Clses += Results_Clses_person
                    Results_Confs += Results_Confs_person

                    print('检测结果接受===============================================================================')
                    if flag_person:
                        if count_person > fps * 60:
                            writer_person.close()
                            writer_person_origin.close()

                            if 'climb' in Scenes_Names:
                                send_alarm_end('climb', tcp_server, Task_ID, f'/home/icetc/saves/video/climb/{clock_person}.mp4', Stream_URL)
                            elif 'intrude' in Scenes_Names:
                                send_alarm_end('intrude', tcp_server, Task_ID, f'/home/icetc/saves/video/intrude/{clock_person}.mp4', Stream_URL)
                            else:
                                send_alarm_end('person', tcp_server, Task_ID, f'/home/icetc/saves/video/person/{clock_person}.mp4', Stream_URL)

                            save_person = 0
                            num_person_true = 0
                            alarm_person = 0
                            count_person = 0

                            Class = ''
                            if 'climb' in Scenes_Names:
                                Class = 'climb'
                            elif 'intrude' in Scenes_Names:
                                Class = 'intrude'
                            else:
                                Class = 'person'
                                
                            classes_person_save = copy.deepcopy(classes_person)
                            locations_person_save = copy.deepcopy(locations_person)
                            confs_person_save = copy.deepcopy(confs_person)
                            threading_save_label = threading.Thread(target=save_label, args=(clock_person, classes_person_save, locations_person_save, confs_person_save, Class))
                            threading_save_label.start()
                            classes_person = []
                            locations_person = []
                            confs_person = []

                        num_person_true += 1
                        frames_person.append(frame)
                        frames_person_origin.append(frame_origin)
                        classes_person_old.append(Results_Clses_person)
                        locations_person_old.append(Results_Locs_person)
                        confs_person_old.append(Results_Confs_person)

                        if save_person == 1:
                            count_person += 1
                            writer_person.append_data(imageio.core.util.Array(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)))
                            writer_person_origin.append_data(imageio.core.util.Array(cv2.cvtColor(frame_origin, cv2.COLOR_RGB2BGR)))
                            classes_person.append(Results_Clses_person)
                            locations_person.append(Results_Locs_person)
                            confs_person.append(Results_Confs_person)

                        if len(frames_person) > fps * 2:
                            for i in range(len(frames_person) - fps * 2):
                                frames_person.pop(0)
                                frames_person_origin.pop(0)
                                classes_person_old.pop(0)
                                locations_person_old.pop(0)
                                confs_person_old.pop(0)

                        if num_person_true >= (fps // interval) or save_person == 1:
                            right_person = num_person + fps * 2
                            save_person = 1
                            if num_person_true == (fps // interval) and alarm_person == 0:
                                alarm_person = 1
                                clock_person = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime(time.time()))
                                
                                if 'climb' in Scenes_Names:
                                    send_alarm('climb', tcp_server, Stream_URL, Task_ID, clock_person)
                                elif 'intrude' in Scenes_Names:
                                    send_alarm('intrude', tcp_server, Stream_URL, Task_ID, clock_person)
                                else:
                                    send_alarm('person', tcp_server, Stream_URL, Task_ID, clock_person)

                                if 'climb' in Scenes_Names:
                                    writer_person = imageio.get_writer(f'/home/icetc/saves/video/climb/{clock_person}.mp4', fps=fps)
                                    writer_person_origin = imageio.get_writer(f'/home/icetc/saves/videoOrigin/climb/{clock_person}.mp4', fps=fps)
                                elif 'intrude' in Scenes_Names:
                                    writer_person = imageio.get_writer(f'/home/icetc/saves/video/intrude/{clock_person}.mp4', fps=fps)
                                    writer_person_origin = imageio.get_writer(f'/home/icetc/saves/videoOrigin/intrude/{clock_person}.mp4', fps=fps)
                                else:
                                    writer_person = imageio.get_writer(f'/home/icetc/saves/video/person/{clock_person}.mp4', fps=fps)
                                    writer_person_origin = imageio.get_writer(f'/home/icetc/saves/videoOrigin/person/{clock_person}.mp4', fps=fps)
                                
                                for frame_person, frame_person_origin in zip(frames_person, frames_person_origin):
                                    count_person += 1
                                    writer_person.append_data(imageio.core.util.Array(cv2.cvtColor(frame_person, cv2.COLOR_RGB2BGR)))
                                    writer_person_origin.append_data(imageio.core.util.Array(cv2.cvtColor(frame_person_origin, cv2.COLOR_RGB2BGR)))
                                    classes_person.extend(classes_person_old)
                                    locations_person.extend(locations_person_old)
                                    confs_person.extend(confs_person_old)
                    else:
                        if save_person:
                            if num_person > right_person:  # 当前帧超出了边界
                                writer_person.close()
                                writer_person_origin.close()

                                if 'climb' in Scenes_Names:
                                    send_alarm_end('climb', tcp_server, Task_ID, f'/home/icetc/saves/video/climb/{clock_person}.mp4', Stream_URL)
                                elif 'intrude' in Scenes_Names:
                                    send_alarm_end('intrude', tcp_server, Task_ID, f'/home/icetc/saves/video/intrude/{clock_person}.mp4', Stream_URL)
                                else:
                                    send_alarm_end('person', tcp_server, Task_ID, f'/home/icetc/saves/video/person/{clock_person}.mp4', Stream_URL)

                                save_person = 0
                                num_person_true = 0
                                alarm_person = 0
                                count_person = 0

                                Class = ''
                                if 'climb' in Scenes_Names:
                                    Class = 'climb'
                                elif 'intrude' in Scenes_Names:
                                    Class = 'intrude'
                                else:
                                    Class = 'person'
                                    
                                classes_person_save = copy.deepcopy(classes_person)
                                locations_person_save = copy.deepcopy(locations_person)
                                confs_person_save = copy.deepcopy(confs_person)
                                threading_save_label = threading.Thread(target=save_label, args=(clock_person, classes_person_save, locations_person_save, confs_person_save, Class))
                                threading_save_label.start()
                                classes_person_old = []
                                locations_person_old = []
                                confs_person_old = []
                            else:
                                num_person_true = 0
                                frames_person.append(frame)
                                frames_person_origin.append(frame_origin)
                                classes_person_old.append(Results_Clses_person)
                                locations_person_old.append(Results_Locs_person)
                                confs_person_old.append(Results_Confs_person)

                                count_person += 1
                                writer_person.append_data(imageio.core.util.Array(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)))
                                writer_person_origin.append_data(imageio.core.util.Array(cv2.cvtColor(frame_origin, cv2.COLOR_RGB2BGR)))
                                classes_person.append(Results_Clses_person)
                                locations_person.append(Results_Locs_person)
                                confs_person.append(Results_Confs_person)

                                if len(frames_person) > fps * 2:
                                    for i in range(len(frames_person) - fps * 2):
                                        frames_person.pop(0)
                                        frames_person_origin.pop(0)
                                        classes_person_old.pop(0)
                                        locations_person_old.pop(0)
                                        confs_person_old.pop(0)
                        else:
                            num_person_true = 0
                            frames_person.append(frame)
                            frames_person_origin.append(frame_origin)
                            classes_person_old.append(Results_Clses_person)
                            locations_person_old.append(Results_Locs_person)
                            confs_person_old.append(Results_Confs_person)

                            if len(frames_person) > fps * 2:
                                for i in range(len(frames_person) - fps * 2):
                                    frames_person.pop(0)
                                    frames_person_origin.pop(0)
                                    classes_person_old.pop(0)
                                    locations_person_old.pop(0)
                                    confs_person_old.pop(0)
                elif ('intrude' in Scenes_Names or 'climb' in Scenes_Names or 'person' in Scenes_Names) and in_period(Monitoring_Period_person) and num_person % interval != 0:
                    for i, loc in enumerate(Results_Locs_person):
                        tl = round(0.002 * (frame.shape[0] + frame.shape[1]) / 2) + 1
                        center_x, center_y, width, height = list(map(int, loc.split(',')))
                        c1, c2 = (center_x-width//2, center_y-height//2), (center_x+width//2, center_y+height//2)
                        cv2.rectangle(frame, c1, c2, [255, 0, 0], thickness=tl, lineType=cv2.LINE_AA)

                        tf = max(tl - 1, 1)
                        t_size = cv2.getTextSize(Results_Clses_person[i], 0, fontScale=tl / 3, thickness=tf)[0]
                        c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
                        cv2.rectangle(frame, c1, c2, [255, 0, 0], -1, cv2.LINE_AA)  # filled
                        cv2.putText(
                            frame,
                            Results_Clses_person[i],
                            (c1[0], c1[1] - 2),
                            0,
                            tl / 3,
                            [225, 255, 255],
                            thickness=tf,
                            lineType=cv2.LINE_AA,
                        )
                    frames_person.append(frame)
                    frames_person_origin.append(frame_origin)
                    classes_person_old.append(Results_Clses_person)
                    locations_person_old.append(Results_Locs_person)
                    confs_person_old.append(Results_Confs_person)

                    if save_person == 1:
                        count_person += 1
                        writer_person.append_data(imageio.core.util.Array(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)))
                        writer_person_origin.append_data(imageio.core.util.Array(cv2.cvtColor(frame_origin, cv2.COLOR_RGB2BGR)))
                        classes_person.append(Results_Clses_person)
                        locations_person.append(Results_Locs_person)
                        confs_person.append(Results_Confs_person)

                    if len(frames_person) > fps * 2:
                        for i in range(len(frames_person) - fps * 2):
                            frames_person.pop(0)
                            frames_person_origin.pop(0)
                            classes_person_old.pop(0)
                            locations_person_old.pop(0)
                            confs_person_old.pop(0)


                if ('chair' in Scenes_Names) and in_period(Monitoring_Period_chair) and num_chair % interval == 0:
                    frame, Results_Locs_chair, Results_Clses_chair, Results_Confs_chair, flag_fire, flag_knife, flag_fall, flag_fight, flag_chair,flag_gender,flag_chair = yolov5_wrapper_chair.infer(frame, categories_chair, [255, 0, 0],
                                                                                         CONF_THRESH_chair,
                                                                                         IOU_THRESHOLD_chair, Monitoring_Regions_chair, flag_fire, flag_knife, flag_fall, flag_fight, flag_person, flag_gender,flag_chair)
                    Results_Locs += Results_Locs_chair
                    Results_Clses += Results_Clses_chair
                    Results_Confs += Results_Confs_chair


                    print('Results_Locs_chair', Results_Locs_chair,
                          'Results_Clses_chair',Results_Clses_chair,
                          'Results_Confs_chair', Results_Confs_chair,
                          flag_fire, flag_knife, 
                          flag_fall, flag_fight, 
                          flag_chair, flag_gender,flag_chair)


                    if flag_chair:
                        print('flag_chair=1检测到chair了================================================================')
                        if count_chair > fps * 60:
                            writer_chair.close()
                            writer_chair_origin.close()
                            send_alarm_end('chair', tcp_server, Task_ID, f'/home/icetc/saves/video/chair/{clock_chair}.mp4', Stream_URL)

                            save_chair = 0
                            num_chair_true = 0
                            alarm_chair = 0
                            count_chair = 0

                            Class = 'chair'
                                
                            # classes_chair_save = copy.deepcopy(classes_chair)
                            # locations_chair_save = copy.deepcopy(locations_chair)
                            # confs_chair_save = copy.deepcopy(confs_chair)
                            # threading_save_label = threading.Thread(target=save_label, args=(clock_chair, classes_chair_save, locations_chair_save, confs_chair_save, Class))
                            # threading_save_label.start()
                            classes_chair = []
                            locations_chair = []
                            confs_chair = []

                        num_chair_true += 1
                        frames_chair.append(frame)
                        frames_chair_origin.append(frame_origin)
                        classes_chair_old.append(Results_Clses_chair)
                        locations_chair_old.append(Results_Locs_chair)
                        confs_chair_old.append(Results_Confs_chair)

                        if save_chair == 1:
                            count_chair += 1
                            writer_chair.append_data(imageio.core.util.Array(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)))
                            writer_chair_origin.append_data(imageio.core.util.Array(cv2.cvtColor(frame_origin, cv2.COLOR_RGB2BGR)))
                            classes_chair.append(Results_Clses_chair)
                            locations_chair.append(Results_Locs_chair)
                            confs_chair.append(Results_Confs_chair)

                        if len(frames_chair) > fps * 2:
                            for i in range(len(frames_chair) - fps * 2):
                                frames_chair.pop(0)
                                frames_chair_origin.pop(0)
                                classes_chair_old.pop(0)
                                locations_chair_old.pop(0)
                                confs_chair_old.pop(0)

                        if num_chair_true >= (fps // interval) or save_chair == 1:
                            right_chair = num_chair + fps * 2
                            save_chair = 1
                            if num_chair_true == (fps // interval) and alarm_chair == 0:
                                alarm_chair = 1
                                clock_chair = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime(time.time()))
                                
                                send_alarm('chair', tcp_server, Stream_URL, Task_ID, clock_chair)

                                
                                writer_chair = imageio.get_writer(f'/home/icetc/saves/video/chair/{clock_chair}.mp4', fps=fps)
                                writer_chair_origin = imageio.get_writer(f'/home/icetc/saves/videoOrigin/chair/{clock_chair}.mp4', fps=fps)
                                
                                for frame_chair, frame_chair_origin in zip(frames_chair, frames_chair_origin):
                                    count_chair += 1
                                    writer_chair.append_data(imageio.core.util.Array(cv2.cvtColor(frame_chair, cv2.COLOR_RGB2BGR)))
                                    writer_chair_origin.append_data(imageio.core.util.Array(cv2.cvtColor(frame_chair_origin, cv2.COLOR_RGB2BGR)))
                                    classes_chair.extend(classes_chair_old)
                                    locations_chair.extend(locations_chair_old)
                                    confs_chair.extend(confs_chair_old)
                    else:
                        if save_chair:
                            if num_chair > right_chair:  # 当前帧超出了边界
                                writer_chair.close()
                                writer_chair_origin.close()

                                send_alarm_end('chair', tcp_server, Task_ID, f'/home/icetc/saves/video/chair/{clock_chair}.mp4', Stream_URL)

                                save_chair = 0
                                num_chair_true = 0
                                alarm_chair = 0
                                count_chair = 0

                                Class = 'chair'
                                    
                                # classes_chair_save = copy.deepcopy(classes_chair)
                                # locations_chair_save = copy.deepcopy(locations_chair)
                                # confs_chair_save = copy.deepcopy(confs_chair)
                                # threading_save_label = threading.Thread(target=save_label, args=(clock_chair, classes_chair_save, locations_chair_save, confs_chair_save, Class))
                                # threading_save_label.start()
                                classes_chair_old = []
                                locations_chair_old = []
                                confs_chair_old = []
                            else:
                                num_chair_true = 0
                                frames_chair.append(frame)
                                frames_chair_origin.append(frame_origin)
                                classes_chair_old.append(Results_Clses_chair)
                                locations_chair_old.append(Results_Locs_chair)
                                confs_chair_old.append(Results_Confs_chair)

                                count_chair += 1
                                writer_chair.append_data(imageio.core.util.Array(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)))
                                writer_chair_origin.append_data(imageio.core.util.Array(cv2.cvtColor(frame_origin, cv2.COLOR_RGB2BGR)))
                                classes_chair.append(Results_Clses_chair)
                                locations_chair.append(Results_Locs_chair)
                                confs_chair.append(Results_Confs_chair)

                                if len(frames_chair) > fps * 2:
                                    for i in range(len(frames_chair) - fps * 2):
                                        frames_chair.pop(0)
                                        frames_chair_origin.pop(0)
                                        classes_chair_old.pop(0)
                                        locations_chair_old.pop(0)
                                        confs_chair_old.pop(0)
                        else:
                            num_chair_true = 0
                            frames_chair.append(frame)
                            frames_chair_origin.append(frame_origin)
                            classes_chair_old.append(Results_Clses_chair)
                            locations_chair_old.append(Results_Locs_chair)
                            confs_chair_old.append(Results_Confs_chair)

                            if len(frames_chair) > fps * 2:
                                for i in range(len(frames_chair) - fps * 2):
                                    frames_chair.pop(0)
                                    frames_chair_origin.pop(0)
                                    classes_chair_old.pop(0)
                                    locations_chair_old.pop(0)
                                    confs_chair_old.pop(0)
                elif ('chair' in Scenes_Names) and in_period(Monitoring_Period_chair) and num_chair % interval != 0:
                    for i, loc in enumerate(Results_Locs_chair):
                        tl = round(0.002 * (frame.shape[0] + frame.shape[1]) / 2) + 1
                        center_x, center_y, width, height = list(map(int, loc.split(',')))
                        c1, c2 = (center_x-width//2, center_y-height//2), (center_x+width//2, center_y+height//2)
                        cv2.rectangle(frame, c1, c2, [255, 0, 0], thickness=tl, lineType=cv2.LINE_AA)

                        tf = max(tl - 1, 1)
                        t_size = cv2.getTextSize(Results_Clses_chair[i], 0, fontScale=tl / 3, thickness=tf)[0]
                        c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
                        cv2.rectangle(frame, c1, c2, [255, 0, 0], -1, cv2.LINE_AA)  # filled
                        cv2.putText(
                            frame,
                            Results_Clses_chair[i],
                            (c1[0], c1[1] - 2),
                            0,
                            tl / 3,
                            [225, 255, 255],
                            thickness=tf,
                            lineType=cv2.LINE_AA,
                        )
                    frames_chair.append(frame)
                    frames_chair_origin.append(frame_origin)
                    classes_chair_old.append(Results_Clses_chair)
                    locations_chair_old.append(Results_Locs_chair)
                    confs_chair_old.append(Results_Confs_chair)

                    if save_chair == 1:
                        count_chair += 1
                        writer_chair.append_data(imageio.core.util.Array(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)))
                        writer_chair_origin.append_data(imageio.core.util.Array(cv2.cvtColor(frame_origin, cv2.COLOR_RGB2BGR)))
                        classes_chair.append(Results_Clses_chair)
                        locations_chair.append(Results_Locs_chair)
                        confs_chair.append(Results_Confs_chair)

                    if len(frames_chair) > fps * 2:
                        for i in range(len(frames_chair) - fps * 2):
                            frames_chair.pop(0)
                            frames_chair_origin.pop(0)
                            classes_chair_old.pop(0)
                            locations_chair_old.pop(0)
                            confs_chair_old.pop(0)



                if ('gender' in Scenes_Names or 'man' in Scenes_Names or 'woman' in Scenes_Names) and in_period(Monitoring_Period_gender) and num_gender % interval == 0:
                    frame, Results_Locs_gender, Results_Clses_gender, Results_Confs_gender, flag_fire, flag_knife,flag_fall, flag_fight, flag_person, flag_gender,flag_chair = yolov5_wrapper_gender.infer(frame, categories_gender, [255, 0, 0],
                                                                                         CONF_THRESH_gender,
                                                                                         IOU_THRESHOLD_gender, Monitoring_Regions_gender, flag_fire, flag_knife, flag_fall, flag_fight, flag_person, flag_gender)
                    
                    if 'man' in Scenes_Names and 'man' not in Results_Clses_gender:
                        flag_gender = 0
                    if 'woman' in Scenes_Names and 'woman' not in Results_Clses_gender:
                        flag_gender = 0

                    Results_Locs += Results_Locs_gender
                    Results_Clses += Results_Clses_gender
                    Results_Confs += Results_Confs_gender

                    if flag_gender:
                        if count_gender > fps * 60:
                            writer_gender.close()

                            name = 'gender'
                            if 'man' in Scenes_Names:
                                name = 'man'
                            if 'woman' in Scenes_Names:
                                name = 'woman'
                            send_alarm_end(name, tcp_server, Task_ID, f'/home/icetc/saves/video/{name}/{clock_gender}.mp4', Stream_URL)

                            save_gender = 0
                            num_gender_true = 0
                            alarm_gender = 0
                            count_gender = 0

                        num_gender_true += 1
                        frames_gender.append(frame)

                        if save_gender == 1:
                            count_gender += 1
                            writer_gender.append_data(imageio.core.util.Array(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)))

                        if len(frames_gender) > fps * 2:
                            for i in range(len(frames_gender) - fps * 2):
                                frames_gender.pop(0)

                        if num_gender_true >= (fps // interval) or save_gender == 1:
                            right_gender = num_gender + fps * 2
                            save_gender = 1
                            if num_gender_true == (fps // interval) and alarm_gender == 0:
                                alarm_gender = 1
                                clock_gender = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime(time.time()))
                                
                                name = 'gender'
                                if 'man' in Scenes_Names:
                                    name = 'man'
                                if 'woman' in Scenes_Names:
                                    name = 'woman'
                                send_alarm(name, tcp_server, Stream_URL, Task_ID, clock_gender)

                                writer_gender = imageio.get_writer(f'/home/icetc/saves/video/{name}/{clock_gender}.mp4', fps=fps)
                                for frame_gender in frames_gender:
                                    count_gender += 1
                                    writer_gender.append_data(imageio.core.util.Array(cv2.cvtColor(frame_gender, cv2.COLOR_RGB2BGR)))
                    else:
                        if save_gender:
                            if num_gender > right_gender:  # 当前帧超出了边界
                                writer_gender.close()

                                name = 'gender'
                                if 'man' in Scenes_Names:
                                    name = 'man'
                                if 'woman' in Scenes_Names:
                                    name = 'woman'
                                send_alarm_end(name, tcp_server, Task_ID, f'/home/icetc/saves/video/{name}/{clock_gender}.mp4', Stream_URL)

                                save_gender = 0
                                num_gender_true = 0
                                alarm_gender = 0
                                count_gender = 0
                            else:
                                num_gender_true = 0
                                frames_gender.append(frame)

                                count_gender += 1
                                writer_gender.append_data(imageio.core.util.Array(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)))

                                if len(frames_gender) > fps * 2:
                                    for i in range(len(frames_gender) - fps * 2):
                                        frames_gender.pop(0)
                        else:
                            num_gender_true = 0
                            frames_gender.append(frame)

                            if len(frames_gender) > fps * 2:
                                for i in range(len(frames_gender) - fps * 2):
                                    frames_gender.pop(0)
                elif ('gender' in Scenes_Names or 'man' in Scenes_Names or 'woman' in Scenes_Names) and in_period(Monitoring_Period_gender) and num_gender % interval != 0:
                    for i, loc in enumerate(Results_Locs_gender):
                        tl = round(0.002 * (frame.shape[0] + frame.shape[1]) / 2) + 1
                        center_x, center_y, width, height = list(map(int, loc.split(',')))
                        c1, c2 = (center_x-width//2, center_y-height//2), (center_x+width//2, center_y+height//2)
                        cv2.rectangle(frame, c1, c2, [255, 0, 0], thickness=tl, lineType=cv2.LINE_AA)

                        tf = max(tl - 1, 1)
                        t_size = cv2.getTextSize(Results_Clses_gender[i], 0, fontScale=tl / 3, thickness=tf)[0]
                        c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
                        cv2.rectangle(frame, c1, c2, [255, 0, 0], -1, cv2.LINE_AA)  # filled
                        cv2.putText(
                            frame,
                            Results_Clses_gender[i],
                            (c1[0], c1[1] - 2),
                            0,
                            tl / 3,
                            [225, 255, 255],
                            thickness=tf,
                            lineType=cv2.LINE_AA,
                        )
                    frames_gender.append(frame)

                    if save_gender == 1:
                        count_gender += 1
                        writer_gender.append_data(imageio.core.util.Array(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)))

                    if len(frames_gender) > fps * 2:
                        for i in range(len(frames_gender) - fps * 2):
                            frames_gender.pop(0)

                if 'fight' in Scenes_Names and in_period(Monitoring_Period_fight):
                    frame = cv2.resize(frame, (224, 224), interpolation=cv2.INTER_AREA)
                    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                    frame = np.reshape(frame, (224, 224, 3))
                    frames.append(frame)
                    
                    if len(frames_fight) > 6 * fps:
                        frames_fight.pop(0)
                    frames_fight.append(frame_origin)

                    if len(frames) == fps * 3:
                        frames_copy = copy.deepcopy(frames)
                        # predict_fight(frames_copy, model, sess, seg_graph)
                        threading_predict_fight = threading.Thread(target=predict_fight, args=(frames_copy, model, sess, seg_graph))
                        threading_predict_fight.start()

                        frames = []

                    Count = fight_results.count('Fight') 
                    if flag_fight:  # start fight
                        if Count >= 2:
                            writer_fight.append_data(imageio.core.util.Array(cv2.cvtColor(frame_origin, cv2.COLOR_RGB2BGR)))
                        else:
                            writer_fight.close()
                            send_alarm_end('fight', tcp_server, Task_ID, f'/home/icetc/saves/video/fight/{clock_fight}.mp4', Stream_URL)
                            alarm_fight = 0
                            flag_fight = 0
                            frames_fight = []
                    else:
                        if Count >= 2:  # first intro
                            if not alarm_fight:  # start alarm
                                clock_fight = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime(time.time()))
                                send_alarm('fight', tcp_server, Stream_URL, Task_ID, clock_fight)
                                writer_fight = imageio.get_writer(f'/home/icetc/saves/video/fight/{clock_fight}.mp4', fps=fps)

                            alarm_fight = 1
                            flag_fight = 1
                            for frame_fight in frames_fight:
                                writer_fight.append_data(imageio.core.util.Array(cv2.cvtColor(frame_fight, cv2.COLOR_RGB2BGR)))
                        else:
                            pass


                if int(Frame_Time[-2:]) == second_before:
                    Frame_Num += 1
                else:
                    Frame_Num = 1
                second_before = int(Frame_Time[-2:])

                # cv2.imshow('', frame)

                cv2.waitKey(1)


                end = time.time()
                '''
                print('\n------------FPS: {}--------------'.format(round(1/(end-start), 1)))
                print('INS_Type:', 'IN4_CV_RST')
                print('Task_ID:', Task_ID)
                print('Results_Locs:', Results_Locs)
                print('Results_Clses:', Results_Clses)
                print('Results_Confs:', Results_Confs)
                print('Stream_URL:', Stream_URL)
                print('Frame_Num:', Frame_Num)
                print('Frame_Time:', Frame_Time)
                '''

                try:
                    detect_results = {'INS_Type': 'IN4_CV_RST', 'Task_ID': Task_ID,
                                      'Results_Locs': Results_Locs, 'Results_Clses': Results_Clses,
                                      'Results_Confs': Results_Confs, 'Stream_URL': Stream_URL,
                                      'Frame_Num': Frame_Num, 'Frame_Time': Frame_Time}
                    detect_results = json.dumps(detect_results)
                    tcp_server.send(detect_results.encode('utf-8'))
                except Exception as e:
                    print(2)
                    print(str(e))
                    break
    except (FunctionTimedOut,) as e:
        print('funtimout==================')
        print(str(e))
    finally:
        thread_status_lock.acquire()
        if Task_ID in thread_status.keys():
            del thread_status[Task_ID]
        thread_status_lock.release()
        if 'fall' in Scenes_Names:
            yolov5_wrapper_coco.destroy()
        if 'chair' in Scenes_Names:
            yolov5_wrapper_chair.destroy()
        if 'person' in Scenes_Names or 'intrude' in Scenes_Names or 'climb' in Scenes_Names:
            yolov5_wrapper_person.destroy()
        if 'knife' in Scenes_Names:
            yolov5_wrapper_knife.destroy()
        if 'fire' in Scenes_Names:
            yolov5_wrapper_fire.destroy()
        if 'fight' in Scenes_Names:
            pass
        if 'gender' in Scenes_Names or 'man' in Scenes_Names or 'woman' in Scenes_Names:
            yolov5_wrapper_gender.destroy()


if __name__ == '__main__':
    thread_status = {}
    fight_results = []
    socket_lock = threading.Lock()
    thread_status_lock = threading.Lock()

    seg_graph = tf.Graph()
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
    config = tf.ConfigProto(gpu_options=gpu_options)
    config.gpu_options.allow_growth = True
    sess = tf.Session(graph=seg_graph,config=config)
    K.set_session(sess)
    with sess.as_default():
        with seg_graph.as_default():
            sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
            model = load_model('fight.h5')
            model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])

    # # Connect to the Client
    # server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    # server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    # server.bind(('', 2333))
    # ip = '202.38.81.222'
    # server.connect((ip, 23333))

    # Connect to the Client
    server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    while True:
        try:
            server_tmp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            server_tmp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)

            server_tmp.bind(('', 2333))
            ip ='202.38.81.185'
            #ip = '192.168.8.226'
            server_tmp.connect((ip, 23333))
            break
        except Exception as e:
            server_tmp.close()
            print(e)
            print('连接失败， 尝试重新连接')
            time.sleep(3)
            continue
    print('连接成功！')
    server = server_tmp
    threading_IN4_CV_STATUS = threading.Thread(target=send_status, args=(server,))
    threading_IN4_CV_STATUS.start()
    threading_IN4_CV_TASK = threading.Thread(target=receive_task, args=(server,))
    threading_IN4_CV_TASK.start()
    threading_IN4_CV_SCAN = threading.Thread(target=scan)
    threading_IN4_CV_SCAN.start()

