import os
import sys
import time
import json
import queue
import psutil
import shutil
import threading
from loguru import logger
from filelock import FileLock
from datetime import datetime, timedelta
from multiprocessing import Process, Value, Lock
from kafka import KafkaConsumer, KafkaProducer
from kafka.errors import NoBrokersAvailable, KafkaError
from process_event import ProcessEvent
from util import format_conversion, fill_missing_odds


dir = os.path.dirname(os.path.abspath(__file__))
log_dir = os.path.join(dir, "Log")
if not os.path.exists(log_dir):
    os.makedirs(log_dir, exist_ok=True)
# 自定义日志格式
logger.remove()  # 移除默认的日志处理器
logger.add(sink=sys.stdout,
           format="{time:YYYY-MM-DD HH:mm:ss.SSS} | {level} | {function}:{line} - {message}",
           level="INFO")
logger.add(
    sink=os.path.join(log_dir, "event_{time:YYYY-MM-DD}.log"),  # 日志文件名带日期
    format="{time:YYYY-MM-DD HH:mm:ss.SSS} | {level} | {function}:{line} - {message}",
    rotation="00:00",  # 每天午夜轮转（与大小轮转二选一）
    retention="360 days",  # 保留最近360天的日志
    encoding="utf-8",  # 避免中文乱码
    level="INFO",  # 文件日志级别
)

COUNTER_FILE = os.path.join(dir, 'Config', "counter_state.json")
TEMP_COUNTER_FILE = os.path.join(dir, 'Config', "counter_state.tmp")
LOCK_FILE = COUNTER_FILE + ".lock"
file_lock = FileLock(LOCK_FILE)

# ====== 全局共享变量定义 ======
GLOBAL_ID = Value('i', 1)  # int 类型，初始值为 1
lock = Lock()

# 中国地区简称列表（省级行政区）
REGION_PREFIX = {
    '京', '津', '冀', '晋', '蒙', '辽', '吉', '黑',
    '沪', '苏', '浙', '皖', '闽', '赣', '鲁', '豫',
    '鄂', '湘', '粤', '桂', '琼', '渝', '川', '贵',
    '云', '藏', '陕', '甘', '青', '宁', '新'
}


class Event:
    def __init__(self, config_info, task_flag):
        self.config_data = config_info["ConfigData"]
        self.gantry_info = self.config_data["gantry_info"]
        # 是否使用kakou数据,轨迹数据时为False,门架卡口数据时为True
        self.kakou_flag = self.config_data["kakou_flag"]
        # 是否使用车牌数据,车牌数据时为True,全域ID数据时为False
        self.plate_flag = self.config_data["plate_flag"]
        # 全域编码
        self.task_flag = task_flag
        # 多门架数据队列
        self.q = queue.Queue(maxsize=1000)
        # 单区间门架数据队列
        self.q_dict = {}
        # 线程运行标志队列，用于判断是否结束线程
        self.flag_dict = {}
        # 阻断信息上报存储队列
        self.block_dict = {}
        # 门架监测数据上报存储队列
        # self.kafka_data_dict = {}
        # 门架编号字典，每个键为门架编号，值为门架对应的区间编号
        self.id_dict = {}
        # 门架编号字典2，以下游门架编号作为键，以区域编号作为值
        self.id_dict2 = {}
        # 使用字典存储每个区间的事件计数器  结构: {road_section: {"congestion": counter, "blockage": counter}}
        self.counters = {}
        # 事件类型映射
        self.event_types = {
            "congestion": 1,  # 拥堵类型代码
            "block": 2  # 阻断类型代码
        }
        # 存储当前活跃事件 {road_section: event_data}
        self.active_events = {}
        # 线程管理
        self.thread_flag = False
        # 日志保存地址
        self.save_event_path = None
        # 保存开始时间
        self.start_time = None
        # kafka 服务器
        self.kafka_producer = None
        self.id_list = {}
        self.kafka_status = 1
        self.current_time_str = None
        # 轨迹数据缓存
        self.trajectory_cache = {}
        # 事件ID列表
        self.event_id_list = []

    def subscribe_data_kafka(self):
        topic = self.config_data["kafka_cars_topic"]
        kafka_consumer = None
        while True:
            time.sleep(0.002)
            # kafka消费者连接，失败会自动重连
            if kafka_consumer is None:
                try:
                    kafka_consumer = KafkaConsumer(
                        topic,
                        bootstrap_servers=self.config_data["kafka_host"],
                        auto_offset_reset='latest',
                        enable_auto_commit=False
                    )
                    self.kafka_status = 0
                    logger.info(f"原始数据监听启动, topic:{topic}")
                except NoBrokersAvailable as e:
                    logger.error(f"kafka连接失败, topic:{topic}")
                    kafka_consumer = None
                    time.sleep(10)
                    continue
            while True:
                time.sleep(0.0002)
                if self.kafka_status == 1:
                    logger.error("kafka连接断开")
                    kafka_consumer = None
                    time.sleep(10)
                    break
                try:
                    records = kafka_consumer.poll(timeout_ms=1000)
                    if records:
                        for tp, messages in records.items():
                            for message in messages:
                                if message:
                                    # 数据处理，卡口数据/重识别数据
                                    message_value = message.value.decode('utf-8')
                                    vehicle_info = json.loads(message_value)
                                    # timestamp = vehicle_info.get('timestamp', "")
                                    # sn = vehicle_info.get('siteCode', "")
                                    # ip = vehicle_info.get('ip', "")
                                    # plateNumber = vehicle_info.get('plateNumber', "")
                                    # vehicleType = vehicle_info.get('vehicleType', 0)
                                    # laneNo = vehicle_info.get('laneNo', 0)
                                    if 'detectTimeStamp' not in vehicle_info.keys():
                                        continue
                                    sendTime = int(vehicle_info['detectTimeStamp'])
                                    if "siteCode" not in vehicle_info.keys():
                                        continue
                                    sn = vehicle_info['siteCode']
                                    if "e1FrameParticipant" not in vehicle_info.keys():
                                        continue
                                    e1FrameParticipant = vehicle_info['e1FrameParticipant']
                                    participantNum = vehicle_info["participantNum"]
                                    e1_data = {
                                        "device": sn,
                                        "time": sendTime,
                                        "participantNum": participantNum,
                                        'e1FrameParticipant': e1FrameParticipant
                                    }
                                    # vehicle_data = {
                                    #     "device": sn,
                                    #     "time": sendTime,
                                    #     "id": plateNumber,
                                    #     "type": vehicleType,
                                    #     'lane': laneNo
                                    # }
                                    self.q.put(e1_data, block=False)
                except queue.Full:
                    # 队列已满的异常处理
                    self.q.get(block=False)  # 手动丢弃队头数据（1）
                    logger.warning("队列已满，丢弃队头数据")
                except KafkaError as e:
                    # kafka异常处理
                    logger.error(f"kafka连接断开, topic:{topic}")
                    kafka_consumer = None
                    time.sleep(10)
                    break
                except Exception as e:
                    logger.opt(exception=e).error(f"解析数据失败, topic:{topic}")
                    pass
                # 退出线程
                if self.thread_flag:
                    logger.info("退出接收上行数据线程")
                    break
            # 退出线程
            if self.thread_flag:
                logger.info("退出接收上行数据线程")
                break

    def init(self):
        # 存储区间编号和对应的门架编号的字典 {区间编号: [上游门架编号,下游门架编号]}
        info_dict = {}
        # 初始化时间
        self.start_time = datetime.now()
        # 根据区间编号初始化
        for info in self.gantry_info.keys():
            self.q_dict[info] = queue.Queue(maxsize=1000)
            self.flag_dict[info] = queue.Queue(maxsize=10)
            self.block_dict[info] = queue.Queue(maxsize=1000)
            # 统一转换为列表形式
            gantry_up = self.gantry_info[info]['up_gantry_id']
            gantry_down = self.gantry_info[info]['down_gantry_id']
            self.gantry_info[info]["up_gantry_id"] = gantry_up if isinstance(gantry_up, list) else [gantry_up]
            self.gantry_info[info]["down_gantry_id"] = gantry_down if isinstance(gantry_down, list) else [gantry_down]
            # 获取下游门架ID对应的区间编号
            for down_gantry_id in self.gantry_info[info]["down_gantry_id"]:
                self.id_dict2[down_gantry_id] = info
            info_dict[info] = [self.gantry_info[info]["up_gantry_id"], self.gantry_info[info]["down_gantry_id"]]
        # 获取门架ID对应的区间编号
        self.id_dict = format_conversion(info_dict)
        if self.kakou_flag:
            for sn in self.id_dict.keys():
                self.trajectory_cache[sn] = {}

        logger.info("初始化完成！！！")
        try:
            # 实时监听最新门架数据
            threading.Thread(target=self.subscribe_data_kafka).start()
        except Exception as e:
            logger.opt(exception=e).error("实时数据监听线程启动失败")
        # 创建多个线程事件，每个线程处理一个区间
        for info in self.gantry_info.keys():
            try:
                config_data = self.config_data
                config_data["gantry_info"] = self.gantry_info[info]
                config_data["gantry_info"]["info"] = info
                config_data["gantry_info"]["time_move"] = self.config_data["all_time_move"]
                config_data["gantry_info"]["time_interval"] = self.config_data["all_time_interval"]
                p = threading.Thread(target=self.process_main, args=(config_data, info,))
                p.start()
            except Exception as e:
                logger.opt(exception=e).error("创建process_main线程失败")
        logger.info("多线程启动完成")

    def generate_id(self, road_section_str, event_type, event_time, event_source):
        # 是否为新事件
        is_new_event = False
        # 计算全局唯一事件ID
        tt = datetime.now()
        current_day = tt.strftime("%Y%m%d")
        year = current_day[:4]
        month = current_day[4:6]
        day = current_day[6:]
        if self.id_list:
            # 判断日期是否改变，如果是新日期则清空id_list，GLOBAL_ID置1
            last_event_id = list(self.id_list.keys())[0]
            last_id = self.id_list[last_event_id]
            last_day = str(last_id)[:8]
            if last_day != current_day:
                logger.info(f"日期改变，清空id_list, {last_day}, and {current_day}")
                self.id_list = {}
                with lock:
                    GLOBAL_ID.value = 1
        else:
            # self.id_list为空，考虑是否为中途重新启动，如果是则从文件中读取上次最后一个GLOBAL_ID，如果不是，则是新日期置1
            with file_lock:
                try:
                    with open(COUNTER_FILE, 'r', encoding="utf-8") as f:
                        data = json.load(f)
                        date_saved = data.get("date")
                        counter = data.get("counter", 0)
                        with lock:
                            if date_saved == current_day:
                                GLOBAL_ID.value = counter
                            else:
                                GLOBAL_ID.value = 1
                except (IOError, json.JSONDecodeError) as e:
                    logger.warning(f"[警告] 读取计数器失败: {e}")
        # 确保road_section是整数
        road_section = int(road_section_str)
        # 验证事件类型
        if event_type not in self.event_types:
            logger.error(f"无效的事件类型: {event_type}。请使用'congestion'或'block'")
            return 0
        # 初始化计数器(如果不存在)
        if road_section not in self.counters:
            self.counters[road_section] = {"congestion": 0, "block": 0}
            self.active_events[road_section] = {}
        # 获取当前计数器值并递增
        if (self.counters[road_section][event_type] > 0) and (event_type in self.active_events[road_section].keys()):
            last_event_source = self.active_events[road_section][event_type]["event_source"]
            event_duration = event_time - self.active_events[road_section][event_type]["event_time"]
            min_duration = timedelta(minutes=float(self.config_data['all_time_move']) * 4)
            # if last_event_source != event_source or event_duration > min_duration:
            # 如果当前事件时间与该区间上次事件时间间隔超过4倍移动时间间隔，则递增计数器，更新活动事件开始时间
            if event_duration > min_duration:
                self.counters[road_section][event_type] += 1
                self.active_events[road_section][event_type]["start_time"] = event_time.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3]
                is_new_event = True
        else:
            self.counters[road_section][event_type] += 1
            self.active_events[road_section][event_type] = {}
            self.active_events[road_section][event_type]["start_time"] = event_time.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3]
            is_new_event = True
        # 检查是否超出范围(确保不超过99)
        if self.counters[road_section][event_type] > 999:
            self.counters[road_section][event_type] = (self.counters[road_section][event_type] % 999) + 1
        current_counter = self.counters[road_section][event_type]
        # 生成ID: 道路编号×1000 + 类型代码×100 + 序列号
        event_id = (road_section * 10000) + (self.event_types[event_type] * 1000) + current_counter
        # # 检查是否超过int最大值(虽然Python int理论上无限大，但为了兼容其他系统)
        # if event_id > 2147483647:
        #     event_id = event_id % 2147483647
        #     logger.error("event_id超出int最大值，将使用除法取余后的值")
        # 更新区间事件信息
        self.active_events[road_section][event_type]["event_id"] = event_id
        self.active_events[road_section][event_type]["event_time"] = event_time
        self.active_events[road_section][event_type]["event_source"] = event_source
        # 如果该事件ID不存在于id_list中，则生成新的GLOBAL_ID
        if event_id not in self.id_list.keys():
            with lock:
                current_counter = GLOBAL_ID.value
                GLOBAL_ID.value += 1
                current_counter = (current_counter + 1) % 1000
                # logger.info(f"生成新的event_id: {event_id}, {current_counter}, {self.id_list}")
                new_id = int(year) * 10000000 + int(month) * 100000 + int(day) * 1000 + current_counter
                self.id_list[event_id] = new_id
                # 将新的GLOBAL_ID信息保存到文件中
                with file_lock:
                    try:
                        with open(TEMP_COUNTER_FILE, 'w', encoding="utf-8") as f:
                            data = {"date": tt.strftime("%Y%m%d"), "counter": current_counter}
                            json.dump(data, f, ensure_ascii=False)
                        # 原子性替换
                        shutil.move(TEMP_COUNTER_FILE, COUNTER_FILE)
                    except IOError as e:
                        logger.warning(f"[警告] 写入计数器失败: {e}")
        return event_id

    def process_main(self, config_data, info):
        define = {
            "TIME": "time",
            "TYPE": "type",
            "GANTRY": "device",
            "PLATE": "id",
            "LANE": "lane"
        }
        # 创建ProcessEvent实例，传入配置数据进行初始化
        process_event = ProcessEvent(config_data, define)
        # 初始化进程事件，进行必要的设置和准备
        process_event.init()
        # 启动进程事件，开始处理任务
        process_event.run(self.q_dict[info], self.block_dict[info], self.flag_dict[info])

    def deal_kafka_message(self, message_data):
        for vehicle in message_data["e1FrameParticipant"]:
            sn = vehicle["baseStationSource"]
            if not isinstance(sn, str):
                sn = str(int(sn))
            if sn in self.id_dict:
                info_list = self.id_dict[sn]
                for info in info_list:
                    if self.kakou_flag:
                        if "picLicense" not in vehicle.keys():
                            continue
                        message = {
                            "device": sn,
                            "time": message_data["time"],
                            "id": vehicle["picLicense"].strip(),
                            "type": int(vehicle["originalType"]),
                            'lane': vehicle.get("laneNum", 0)
                        }
                        self.q_dict[info].put(message, block=False)
                    else:
                        """轨迹数据转断面数据"""
                        if self.plate_flag:
                            if "picLicense" not in vehicle.keys():
                                continue
                            plate = vehicle["picLicense"].strip()
                            if plate == "" or plate == "车牌" or (plate[0] not in REGION_PREFIX):
                                continue
                            if plate not in self.trajectory_cache[sn]:
                                message = {
                                    "device": sn,
                                    "time": message_data["time"],
                                    "id": plate,
                                    "type": int(vehicle["originalType"]),
                                    'lane': vehicle.get("laneNum", 0)
                                }
                                self.q_dict[info].put(message, block=False)
                        else:
                            current_time = message_data["time"]
                            if "id" not in vehicle.keys():
                                continue
                            vehicle_id = str(int(vehicle["id"]))
                            if vehicle_id not in self.trajectory_cache[sn]:
                                message = {
                                    "device": sn,
                                    "time": message_data["time"],
                                    "id": vehicle_id,
                                    "type": int(vehicle["originalType"]),
                                    'lane': vehicle.get("laneNum", 0)
                                }
                                self.trajectory_cache[sn][vehicle_id] = message
                                self.trajectory_cache[sn][vehicle_id]["time_list"] = [message_data["time"]]
                                # 是否放到了队列中
                                self.trajectory_cache[sn][vehicle_id]["put_flag"] = 0
                            else:
                                self.trajectory_cache[sn][vehicle_id]["time_list"].append(message_data["time"])
                                self.trajectory_cache[sn][vehicle_id]["new_time"] = message_data["time"]
                                if len(self.trajectory_cache[sn][vehicle_id]["time_list"]) >= 10 and self.trajectory_cache[sn][vehicle_id]["put_flag"] == 0:
                                    msg = {
                                        "device": sn,
                                        "time": message_data["time"],
                                        "id": vehicle_id,
                                        "type": int(vehicle["originalType"]),
                                        'lane': vehicle.get("laneNum", 0)
                                    }
                                    self.q_dict[info].put(msg, block=False)
                                    self.trajectory_cache[sn][vehicle_id]["put_flag"] = 1
                            del_list = []
                            for vid in self.trajectory_cache[sn].keys():
                                if self.trajectory_cache[sn][vid]["new_time"] < (current_time - 60 * 1000):
                                    del_list.append(vid)
                            for vid in del_list:
                                del self.trajectory_cache[sn][vid]

    def add_message(self):
        # 事件字典
        event_dict = {}
        # 循环监听心跳
        last_time = time.time()
        while True:
            try:
                time.sleep(0.1)
                # 本地当前时间
                now_time = datetime.now()
                # 连接kafka 服务器
                if self.kafka_producer is None:
                    # logger.info(f"add_message {self.kafka_producer}")
                    try:
                        self.kafka_producer = KafkaProducer(
                            bootstrap_servers=self.config_data["kafka_host"],
                            request_timeout_ms=10000,
                            value_serializer=lambda x: json.dumps(x).encode('utf-8')
                        )
                        self.kafka_status = 0
                        logger.info(f"kafka_producer连接成功")
                    except NoBrokersAvailable as e:
                        self.kafka_status = 1
                        logger.error("kafka连接失败")
                        self.kafka_producer = None
                        time.sleep(10)
                        continue
                # 心跳状态实时上报
                if time.time() - last_time >= 1:
                    m = {
                        "timestamp": str(int(time.time())),
                        "status": 0,
                        "type": 1
                    }
                    try:
                        future = self.kafka_producer.send(self.config_data["kafka_status"], value=m)
                        record_metadata = future.get(timeout=10)  # 正常情况下很快返回
                    except KafkaError as e:
                        self.kafka_status = 1
                        logger.error("kafka连接失败，导致算法心跳上报失败")
                        self.kafka_producer = None
                        time.sleep(10)
                    except Exception as e:
                        logger.error("算法心跳上报失败")
                    # 更新时间
                    last_time = time.time()
                # 分发门架数据到各个区间队列中
                while not self.q.empty():
                    try:
                        message = self.q.get(block=False)

                        self.deal_kafka_message(message)

                        # gantry_id = message["device"]
                        # if gantry_id in self.id_dict:
                        #     info_list = self.id_dict[gantry_id]
                        #     for info in info_list:
                        #         self.q_dict[info].put(message, block=False)

                    except Exception as e:
                        logger.opt(exception=e).error("门架数据分发失败")
                # 上报事故信息到kafka
                for info in self.gantry_info.keys():
                    # 循环遍历事件队列中的事件列表，并对事件赋予唯一事件ID，将事件信息保存到事件字典event_dict中
                    while not self.block_dict[info].empty():
                        event_data = self.block_dict[info].get(block=False)
                        try:
                            secLevel = 0
                            if event_data['content'] == "重度阻塞":
                                # 日志输出：区间ID，事件信息，上下游所有设备状态信息
                                logger.info(f"重度阻塞: {info}, {str(event_data)}")
                                # 保持唯一性的事件ID获取（generate_id()函数）
                                event_id2 = self.generate_id(info, "congestion", event_data['time'], event_data['content'])
                                if event_data['current_flow'] == "中流量":
                                    secLevel = 2
                                elif event_data['current_flow'] == "高流量":
                                    secLevel = 4
                                event_dict[info] = {
                                    "secLevel": secLevel,
                                    "eventId": f"{self.id_list[event_id2]}_{secLevel}"
                                }
                            elif event_data['con'] == "中度拥堵" or event_data['con'] == "严重拥堵":
                                # 如果没有阻断或重度阻塞，则考虑拥堵事件
                                # 日志输出：区间ID，事件信息，上下游所有设备状态信息
                                logger.info(f"拥堵： {info}, {str(event_data)}")
                                # 保持唯一性的事件ID获取（generate_id()函数）
                                event_id = self.generate_id(info, "congestion", event_data['time'], event_data['con'])
                                if event_data['current_flow'] == "中流量":
                                    secLevel = 2
                                elif event_data['current_flow'] == "高流量":
                                    secLevel = 4
                                event_dict[info] = {
                                    "secLevel": secLevel,
                                    "eventId": f"{self.id_list[event_id]}_{secLevel}"
                                }
                            elif event_data['current_flow'] != "低流量":
                                # 日志输出：区间ID，事件信息，上下游所有设备状态信息
                                logger.info(f"中高流量: {info}, {str(event_data)}")
                                # 保持唯一性的事件ID获取（generate_id()函数）
                                event_id = self.generate_id(info, "congestion", event_data['time'], event_data['current_flow'])
                                if event_data['current_flow'] == "中流量":
                                    secLevel = 1
                                elif event_data['current_flow'] == "高流量":
                                    secLevel = 3
                                event_dict[info] = {
                                    "secLevel": secLevel,
                                    "eventId": f"{self.id_list[event_id]}_{secLevel}"
                                }
                            # 无事件的区间默认事件信息为空
                            if info not in event_dict.keys():
                                event_dict[info] = {}
                        except Exception as e:
                            logger.error(f"事故信息处理失败. 区间：{info},数据：{str(event_data)}")
                            logger.opt(exception=e).error("事故信息处理失败")
                # 上报事故信息到主控kafka
                if self.gantry_info:
                    try:
                        # 每隔time_move（3/5分钟）时间计算一次事故信息，若有事故，则上报
                        if (now_time - self.start_time) >= timedelta(minutes=float(self.config_data['all_time_move'])):
                            # 完整的事故信息字典
                            event_data_dict = {}
                            # 遍历所有区间
                            for info in self.gantry_info.keys():
                                gantry_up = self.gantry_info[info]['up_gantry_id']
                                gantry_down = self.gantry_info[info]['down_gantry_id']
                                # 事件数据初始模板
                                event_data = {
                                    "timestamp": int(time.time()),
                                    "globalCode": self.task_flag,
                                    "upCode": gantry_up[0],
                                    "startOrgCode": gantry_down[0],
                                    "secLevel": 0
                                }
                                # 添加事故字典event_dict中的信息
                                try:
                                    if info in event_dict.keys():
                                        if "secLevel" in event_dict[info].keys():
                                            event_data["secLevel"] = event_dict[info]["secLevel"]
                                        if "eventId" in event_dict[info].keys():
                                            if event_dict[info]["eventId"] not in self.event_id_list:
                                                self.event_id_list.append(event_dict[info]["eventId"])
                                            else:
                                                event_data["secLevel"] = 0
                                    while len(self.event_id_list) > 100:
                                        self.event_id_list.pop(0)
                                except KeyError as e:
                                    logger.opt(exception=e).error("event_data字段缺失错误")
                                except ValueError as e:
                                    logger.opt(exception=e).error("event_data数据格式错误")
                                except Exception as e:
                                    logger.opt(exception=e).error("event_data未知错误")
                                # 将事件数据存入当前完整的事件数据字典event_data_dict中
                                event_data_dict[info] = event_data
                            # 事件上报kafka
                            for event_info in list(event_data_dict.keys()):
                                try:
                                    # 如果区间畅通，则跳过
                                    if event_data_dict[event_info]["secLevel"] == 0:
                                        continue
                                    # 区间非畅通，则将事件数据发送到kafka，并保存相应日志
                                    time_str = now_time.strftime("%Y-%m-%d %H:%M:%S")
                                    self.kafka_producer.send(self.config_data["kafka_event_topic"], value=event_data_dict[event_info], key=time_str.encode("utf-8"))
                                    logger.info(f"event_data:{str(event_data_dict[event_info])}")
                                except Exception as e:
                                    logger.opt(exception=e).error("event_data发送到kafka失败")

                            event_dict = {}
                            self.start_time = now_time
                    except Exception as e:
                        logger.opt(exception=e).error("上报事故信息到主控kafka失败")
            except BaseException as e:
                logger.opt(exception=e).error("异常退出")
                self.thread_flag = True
                for info in self.gantry_info.keys():
                    self.flag_dict[info].put(self.thread_flag, block=False)


def monitor_process_memory(pids, interval=5):
    '''
    进程内存占用情况实时检测，检测间隔：interval秒
    '''
    while True:
        for pid in pids:
            try:
                process = psutil.Process(pid)
                mem_info = process.memory_info()
                print(f"Process (PID: {pid}) Memory Usage: RSS={mem_info.rss / 1024 / 1024:.2f} MB")
            except psutil.NoSuchProcess:
                print(f"Process with PID {pid} no longer exists.")
        time.sleep(interval)


def process_task(config_info, task_id):
    '''
    单进程启动函数
    '''
    event = Event(config_info, task_id)
    event.init()
    event.add_message()


def event_main(config_info):
    '''
    主进程
    '''
    thread_list = []
    try:
        gantry_info = config_info["ConfigData"]["gantry_info"]
        for info in gantry_info.keys():
            thread_list.append(threading.Thread(target=process_task, args=(config_info, info,)))

        for tt in thread_list:
            tt.start()
            time.sleep(0.5)

        for t in thread_list:
            t.join()

    except KeyboardInterrupt:
        print("检测到 Ctrl+C，正在终止子进程...")
        for t in thread_list:
            if t.is_alive():
                t.join(timeout=5)  # 最多等待5秒
        print("子进程已终止")
    except Exception as e:
        logger.opt(exception=e).error("未知错误")
