import os
import sys
import time
import json
import queue
import psutil
import shutil
import threading
from loguru import logger
from filelock import FileLock
from datetime import datetime, timedelta
from multiprocessing import Process, Value, Lock
from kafka import KafkaConsumer, KafkaProducer, TopicPartition
from kafka.errors import NoBrokersAvailable, KafkaError
from process_event import ProcessEvent
from util import format_conversion, fill_missing_odds


dir = os.path.dirname(os.path.abspath(__file__))
log_dir = os.path.join(dir, "Log", "logs")
if not os.path.exists(log_dir):
    os.makedirs(log_dir, exist_ok=True)
# 自定义日志格式
logger.remove()  # 移除默认的日志处理器
# logger.add(sink=sys.stdout,
#            format="{time:YYYY-MM-DD HH:mm:ss.SSS} | {level} | {function}:{line} - {message}",
#            level="INFO")
logger.add(
    sink=os.path.join(log_dir, "event_{time:YYYY-MM-DD}.log"),  # 日志文件名带日期
    format="{time:YYYY-MM-DD HH:mm:ss.SSS} | {level} | {function}:{line} - {message}",
    rotation="00:00",  # 每天午夜轮转（与大小轮转二选一）
    retention="360 days",  # 保留最近360天的日志
    encoding="utf-8",  # 避免中文乱码
    level="INFO",  # 文件日志级别
)

COUNTER_FILE = os.path.join(dir, 'Config', "counter_state.json")
TEMP_COUNTER_FILE = os.path.join(dir, 'Config', "counter_state.tmp")
LOCK_FILE = COUNTER_FILE + ".lock"
file_lock = FileLock(LOCK_FILE)

# ====== 全局共享变量定义 ======
GLOBAL_ID = Value('i', 1)  # int 类型，初始值为 1
GLOBAL_UP_FALG = Value('b', False)  # bool 类型，初始值为 False
GLOBAL_DOWN_FALG = Value('b', False)  # bool 类型，初始值为 False
lock = Lock()


class Event:
    def __init__(self, device_info, config_info, task_flag):
        self.device_info = device_info
        self.config_data = config_info["ConfigData"]
        self.task_flag = task_flag
        # self.format_str = self.config_data["format_str"]
        self.gantry_info = self.config_data["gantry_info"][self.task_flag]
        # self.route_list = self.config_data["route"]
        self.save_json_path = os.path.join(dir, "Log", "json")
        self.config_data["save_json_path"] = self.save_json_path
        # 存储门架状态信息
        self.gantry_status = {}
        # 多门架数据队列
        self.q = queue.Queue(maxsize=1000)
        # 单区间门架数据队列
        self.q_dict = {}
        # 线程运行标志队列，用于判断是否结束线程
        self.flag_dict = {}
        # 阻断信息上报存储队列
        self.block_dict = {}
        # 门架监测数据上报存储队列
        # self.kafka_data_dict = {}
        # 门架编号字典，每个键为门架编号，值为门架对应的区间编号
        self.id_dict = {}
        # 门架编号字典2，以下游门架编号作为键，以区域编号作为值
        self.id_dict2 = {}
        # 存储门架经纬度等信息
        # self.kako_dict = {item['pileNumber']: {key: value for key, value in item.items() if key != 'pileNumber'} for item in kakoList}
        # 使用字典存储每个区间的事件计数器  结构: {road_section: {"congestion": counter, "blockage": counter}}
        self.counters = {}
        # 事件类型映射
        self.event_types = {
            "congestion": 1,  # 拥堵类型代码
            "block": 2  # 阻断类型代码
        }
        # 存储当前活跃事件 {road_section: event_data}
        self.active_events = {}
        # 线程管理
        self.thread_flag = False
        # 日志保存地址
        self.save_event_path = None
        # 保存开始时间
        self.start_time = None
        # kafka 服务器
        self.kafka_producer = None
        self.id_list = {}
        self.kafka_status = 1
        self.current_time_str = None
        # 是否使用kakou数据,重识别数据时为False,门架卡口数据时为True
        self.new_flag = False
        # 获取有价值的设备号，也可以说是区间完全封闭的下游设备列表
        self.valuable_devices = []
        # 切换确认时间为30分钟
        self.switch_timeout = 60 * 60 * self.config_data["switch_timeout"]
        # 切换最小时间间隔为2小时
        self.min_switch_interval = 60 * 60 * self.config_data["min_switch_interval"]
        # 上次切换时间
        self.last_switch_time = None
        # 开始切换时间
        self.start_switch_time = None

    def subscribe_data_kafka(self):
        # 将消费者的偏移量设置为最后一条消息的偏移量
        if self.task_flag == "up":
            topic = self.config_data["kafka_up_cars_topic"]
        elif self.task_flag == "down":
            topic = self.config_data["kafka_down_cars_topic"]
        else:
            topic = self.config_data["kafka_kakou_cars_topic"]
        kafka_consumer = None
        while True:
            time.sleep(0.002)
            # kafka消费者连接，失败会自动重连
            if kafka_consumer is None:
                try:
                    kafka_consumer = KafkaConsumer(
                        topic,
                        bootstrap_servers=self.config_data["kafka_host"],
                        auto_offset_reset='latest',
                        enable_auto_commit=False
                    )
                    self.kafka_status = 0
                    logger.info(f"原始数据监听启动, topic:{topic}")
                except NoBrokersAvailable as e:
                    logger.error(f"kafka连接失败, topic:{topic}")
                    kafka_consumer = None
                    time.sleep(10)
                    continue
            while True:
                time.sleep(0.0002)
                if self.kafka_status == 1:
                    logger.error("kafka连接断开")
                    kafka_consumer = None
                    time.sleep(10)
                    break
                try:
                    records = kafka_consumer.poll(timeout_ms=1000)
                    if records:
                        for tp, messages in records.items():
                            for message in messages:
                                if message:
                                    # 数据处理，卡口数据/重识别数据
                                    message_value = message.value.decode('utf-8')
                                    vehicle_info = json.loads(message_value)
                                    if self.task_flag == "kakou":
                                        if self.new_flag:
                                            timestamp = vehicle_info.get('timestamp', "")
                                            sn = vehicle_info.get('sn', "")
                                            ip = vehicle_info.get('ip', "")
                                            plateNumber = vehicle_info.get('plateNumber', "").strip()
                                            if plateNumber:
                                                if plateNumber is None or plateNumber == "车牌" or plateNumber == "" or "默" in plateNumber:
                                                    continue
                                                vehicleType = vehicle_info.get('vehicleType', 0)
                                                laneNo = vehicle_info.get('laneNo', 0)
                                                sendTime = vehicle_info.get('sendTime', 0)
                                                vehicle_data = {
                                                    "device": sn,
                                                    "time": sendTime,
                                                    "id": plateNumber,
                                                    "type": vehicleType,
                                                    'lane': laneNo
                                                }
                                                self.q.put(vehicle_data, block=False)
                                    else:
                                        if not self.new_flag:
                                            orgCode = vehicle_info.get('orgCode', "")
                                            globalTime = vehicle_info.get('globalTime', 0)
                                            if "targetList" in vehicle_info:
                                                for target in vehicle_info["targetList"]:
                                                    receiveTime = vehicle_info.get('receiveTime', 0)
                                                    sendTime = vehicle_info.get('sendTime', 0)
                                                    id = target.get("id", 0)
                                                    carType = target.get("carType", 0)
                                                    carColor = target.get("carColor", 0)
                                                    station = str(target.get("station", 0))
                                                    lane = target.get("lane", 0)
                                                    if receiveTime == 0 or receiveTime == 0 or id == 0 or station == "0":
                                                        continue
                                                    vehicle_data = {
                                                        "device": station,
                                                        "time": globalTime,
                                                        "id": id,
                                                        "type": carType,
                                                        'lane': lane
                                                    }
                                                    self.q.put(vehicle_data, block=False)
                except queue.Full:
                    # 队列已满的异常处理
                    self.q.get(block=False)  # 手动丢弃队头数据（1）
                    logger.warning("队列已满，丢弃队头数据")
                except KafkaError as e:
                    # kafka异常处理
                    logger.error(f"kafka连接断开, topic:{topic}")
                    kafka_consumer = None
                    time.sleep(10)
                    break
                except Exception as e:
                    logger.opt(exception=e).error(f"解析数据失败, topic:{topic}")
                    pass
                # 退出线程
                if self.thread_flag:
                    logger.info("退出接收上行数据线程")
                    break
            # 退出线程
            if self.thread_flag:
                logger.info("退出接收上行数据线程")
                break

    def subscribe_device_data_kafka(self):
        # 将消费者的偏移量设置为最后一条消息的偏移量
        topic = self.config_data["kafka_device_status"]
        kafka_consumer = None
        while True:
            time.sleep(0.02)
            # kafka消费者连接，失败会自动重连
            if kafka_consumer is None:
                try:
                    kafka_consumer = KafkaConsumer(
                        topic,
                        bootstrap_servers=self.config_data["kafka_host"],
                        auto_offset_reset='latest',
                        enable_auto_commit=False
                    )
                    self.kafka_status = 0
                    logger.info(f"设备状态数据监听启动, topic:{topic}")
                except NoBrokersAvailable as e:
                    logger.error(f"kafka连接失败, topic:{topic}")
                    kafka_consumer = None
                    time.sleep(10)
                    continue
            while True:
                time.sleep(0.0002)
                if self.kafka_status == 1:
                    logger.error("kafka连接断开")
                    kafka_consumer = None
                    time.sleep(10)
                    break
                try:
                    records = kafka_consumer.poll(timeout_ms=1000)
                    if records:
                        for tp, messages in records.items():
                            for message in messages:
                                if message:
                                    # 解析设备数据
                                    message_value = message.value.decode('utf-8')
                                    data = json.loads(message_value)
                                    data_info = data["devices"]
                                    for i in range(len(data_info)):
                                        gantry = data_info[i].get("deviceId", None)
                                        if (gantry is not None) and (gantry in self.gantry_status.keys()):
                                            heartbeat_pre = data_info[i].get("heartbeat", 1)
                                            clock = data_info[i].get("clock", 1)
                                            delay_status = data_info[i].get("delay", 1)
                                            precision_status = data_info[i].get("precision", 1)
                                            if heartbeat_pre > 90:
                                                heartbeat = 0
                                            elif heartbeat_pre > 80:
                                                heartbeat = 1
                                            else:
                                                heartbeat = 2
                                            # 设备状态字典
                                            self.gantry_status[gantry] = [heartbeat, clock, delay_status, precision_status]
                except KafkaError as e:
                    logger.error(f"kafka连接断开, topic:{topic}")
                    kafka_consumer = None
                    time.sleep(10)
                    break
                except Exception as e:
                    logger.opt(exception=e).error(f"解析数据失败, topic:{topic}")
                    pass
                # 退出线程
                if self.thread_flag:
                    logger.info("退出接收设备状态线程")
                    break
            # 退出线程
            if self.thread_flag:
                logger.info("退出接收设备状态线程")
                break

    def deal_data(self, vehicle_info):
        try:
            if self.task_flag == "kakou":
                if self.new_flag:
                    timestamp = vehicle_info.get('timestamp', "")
                    sn = vehicle_info.get('sn', "")
                    ip = vehicle_info.get('ip', "")
                    plateNumber = vehicle_info.get('plateNumber', "").strip()
                    vehicleType = vehicle_info.get('vehicleType', 0)
                    laneNo = vehicle_info.get('laneNo', 0)
                    sendTime = vehicle_info.get('sendTime', 0)
                    if plateNumber:
                        if plateNumber is None or plateNumber == "车牌" or plateNumber == "" or "默" in plateNumber:
                            pass
                        else:
                            vehicle_data = {
                                "device": sn,
                                "time": sendTime,
                                "id": plateNumber,
                                "type": vehicleType,
                                'lane': laneNo
                            }
                            self.q.put(vehicle_data, block=False)
            else:
                if not self.new_flag:
                    orgCode = vehicle_info.get('orgCode', "")
                    globalTime = vehicle_info.get('globalTime', 0)
                    if "targetList" in vehicle_info:
                        for target in vehicle_info["targetList"]:
                            receiveTime = vehicle_info.get('receiveTime', 0)
                            sendTime = vehicle_info.get('sendTime', 0)
                            id = target.get("id", 0)
                            carType = target.get("carType", 0)
                            station = str(target.get("station", 0))
                            lane = target.get("lane", 0)
                            if receiveTime == 0 or sendTime == 0 or id == 0 or station == "0":
                                continue
                            vehicle_data = {
                                "device": station,
                                "time": globalTime,
                                "id": id,
                                "type": carType,
                                'lane': lane
                            }
                            self.q.put(vehicle_data, block=False)
        except queue.Full:
            # 队列已满的异常处理
            self.q.get(block=False)  # 手动丢弃队头数据（1）
            logger.warning("队列已满，丢弃队头数据")

    def deal_device_status(self, status_info):
        try:
            data_info = status_info["devices"]
            for i in range(len(data_info)):
                gantry = data_info[i].get("deviceId", None)
                if (gantry is not None) and (gantry in self.gantry_status.keys()):
                    heartbeat_pre = data_info[i].get("heartbeat", 1)
                    clock = data_info[i].get("clock", 1)
                    delay_status = data_info[i].get("delay", 1)
                    precision_status = data_info[i].get("precision", 1)
                    if heartbeat_pre > 90:
                        heartbeat = 0
                    elif heartbeat_pre > 80:
                        heartbeat = 1
                    else:
                        heartbeat = 2
                    # 设备状态字典
                    self.gantry_status[gantry] = [heartbeat, clock, delay_status, precision_status]
        except Exception as e:
            logger.warning(f"设备数据解析失败，数据{status_info}")

    def subscribe_kafka(self, topic, callbacks, partition=None):
        """
        kafak重连机制，实时监听数据，并通过callbacks进行数据处理
        """
        kafka_consumer = None
        while True:
            time.sleep(0.002)
            # kafka消费者连接，失败会自动重连
            if kafka_consumer is None:
                try:
                    if partition is None:
                        kafka_consumer = KafkaConsumer(
                            topic,
                            bootstrap_servers=self.config_data["kafka_host"],
                            auto_offset_reset='latest',
                            enable_auto_commit=False
                        )
                        self.kafka_status = 0
                        logger.info(f"原始数据监听启动, topic:{topic}")
                    else:
                        kafka_consumer = KafkaConsumer(
                            topic,
                            bootstrap_servers=self.config_data["kafka_host"],
                            auto_offset_reset='latest',
                            enable_auto_commit=False
                        )
                        self.kafka_status = 0
                        # 绑定到指定分区
                        tp = TopicPartition(topic, partition)
                        kafka_consumer.assign([tp])
                        logger.info(f"原始数据监听启动, topic:{topic}, partition:{partition}")
                except NoBrokersAvailable as e:
                    logger.error(f"kafka连接失败, topic:{topic}")
                    kafka_consumer = None
                    time.sleep(10)
                    continue
            while True:
                time.sleep(0.0002)
                if self.kafka_status == 1:
                    logger.error("kafka连接断开")
                    kafka_consumer = None
                    time.sleep(10)
                    break
                try:
                    records = kafka_consumer.poll(timeout_ms=1000)
                    if records:
                        for tp, messages in records.items():
                            for message in messages:
                                if message:
                                    # 数据处理，卡口数据/重识别数据
                                    message_value = message.value.decode('utf-8')
                                    vehicle_info = json.loads(message_value)
                                    callbacks(vehicle_info)
                except KafkaError as e:
                    # kafka异常处理
                    logger.error(f"kafka连接断开, topic:{topic}")
                    kafka_consumer = None
                    time.sleep(10)
                    break
                except Exception as e:
                    logger.opt(exception=e).error(f"解析数据失败, topic:{topic}")
                    pass
                # 退出线程
                if self.thread_flag:
                    logger.info("退出接收设备状态线程")
                    break
                # 退出线程
            if self.thread_flag:
                logger.info("退出接收设备状态线程")
                break

    def init(self):
        # 存储区间编号和对应的门架编号的字典 {区间编号: [上游门架编号,下游门架编号]}
        info_dict = {}
        # 创建当前时间的文件夹
        if not os.path.exists(self.save_json_path):
            os.makedirs(self.save_json_path, exist_ok=True)
            logger.info(f"创建文件夹{self.save_json_path}")
        # 初始化时间
        self.start_time = datetime.now()
        # 根据区间编号初始化
        for info in self.gantry_info.keys():
            # if info not in self.config_data["route"][self.task_flag]:
            #     continue
            self.q_dict[info] = queue.Queue(maxsize=1000)
            self.flag_dict[info] = queue.Queue(maxsize=10)
            self.block_dict[info] = queue.Queue(maxsize=1000)
            # self.kafka_data_dict[info] = queue.Queue(maxsize=1000)
            # 统一转换为列表形式
            gantry_up = self.gantry_info[info]['up_gantry_id']
            gantry_down = self.gantry_info[info]['down_gantry_id']
            self.gantry_info[info]["up_gantry_id"] = gantry_up if isinstance(gantry_up, list) else [gantry_up]
            self.gantry_info[info]["down_gantry_id"] = gantry_down if isinstance(gantry_down, list) else [gantry_down]
            # 获取下游门架ID对应的区间编号
            for down_gantry_id in self.gantry_info[info]["down_gantry_id"]:
                self.id_dict2[down_gantry_id] = info
            info_dict[info] = [self.gantry_info[info]["up_gantry_id"], self.gantry_info[info]["down_gantry_id"]]
            # 获取有价值的基站用作后续判断是否需要进行进程间的切换
            if self.gantry_info[info]["is_fork"] == 0:
                for down_gantry_id in self.gantry_info[info]["down_gantry_id"]:
                    self.valuable_devices.append(down_gantry_id)
        # 获取门架ID对应的区间编号
        self.id_dict = format_conversion(info_dict)
        for gantry in self.id_dict.keys():
            self.gantry_status[gantry] = [1, 1, 1, 1]
        logger.info("初始化完成！！！")
        try:
            # # 实时监听最新门架数据
            # threading.Thread(target=self.subscribe_data_kafka).start()
            # # 实时监听最新门架状态数据
            # threading.Thread(target=self.subscribe_device_data_kafka).start()

            # 监听
            if self.task_flag == "up":
                data_topic = self.config_data["kafka_up_cars_topic"]
            elif self.task_flag == "down":
                data_topic = self.config_data["kafka_down_cars_topic"]
            else:
                data_topic = self.config_data["kafka_kakou_cars_topic"]
            device_topic = self.config_data["kafka_device_status"]
            threading.Thread(target=self.subscribe_kafka, args=(data_topic, self.deal_data,)).start()
            threading.Thread(target=self.subscribe_kafka, args=(device_topic, self.deal_device_status,)).start()

        except Exception as e:
            logger.opt(exception=e).error("实时数据监听线程启动失败")
        # 创建多个线程事件，每个线程处理一个区间
        for info in self.gantry_info.keys():
            try:
                config_data = self.config_data
                config_data["gantry_info"] = self.gantry_info[info]
                config_data["gantry_info"]["info"] = info
                config_data["gantry_info"]["time_move"] = self.config_data["all_time_move"]
                config_data["gantry_info"]["time_interval"] = self.config_data["all_time_interval"]
                p = threading.Thread(target=self.process_main, args=(config_data, info,))
                p.start()
            except Exception as e:
                logger.opt(exception=e).error("创建process_main线程失败")
        logger.info("多线程启动完成")

    def save_json(self, time_str):
        json_dir = os.path.join(self.config_data["save_json_path"], time_str)
        if not os.path.exists(json_dir):
            os.makedirs(json_dir, exist_ok=True)
        return os.path.join(json_dir, f"event_{self.task_flag}.jsonl")

    def generate_id(self, road_section_str, event_type, event_time, event_source):
        # 计算全局唯一事件ID
        tt = datetime.now()
        current_day = tt.strftime("%Y%m%d")
        year = current_day[:4]
        month = current_day[4:6]
        day = current_day[6:]
        if self.id_list:
            # 判断日期是否改变，如果是新日期则清空id_list，GLOBAL_ID置1
            last_event_id = list(self.id_list.keys())[0]
            last_id = self.id_list[last_event_id]
            last_day = str(last_id)[:8]
            if last_day != current_day:
                logger.info(f"日期改变，清空id_list, {last_day}, and {current_day}")
                self.id_list = {}
                with lock:
                    GLOBAL_ID.value = 1
        else:
            # self.id_list为空，考虑是否为中途重新启动，如果是则从文件中读取上次最后一个GLOBAL_ID，如果不是，则是新日期置1
            with file_lock:
                try:
                    with open(COUNTER_FILE, 'r', encoding="utf-8") as f:
                        data = json.load(f)
                        date_saved = data.get("date")
                        counter = data.get("counter", 0)
                        with lock:
                            if date_saved == current_day:
                                GLOBAL_ID.value = counter
                            else:
                                GLOBAL_ID.value = 1
                except (IOError, json.JSONDecodeError) as e:
                    logger.warning(f"[警告] 读取计数器失败: {e}")
        # 确保road_section是整数
        road_section = int(road_section_str)
        # 验证事件类型
        if event_type not in self.event_types:
            logger.error(f"无效的事件类型: {event_type}。请使用'congestion'或'block'")
            return 0
        # 初始化计数器(如果不存在)
        if road_section not in self.counters:
            self.counters[road_section] = {"congestion": 0, "block": 0}
            self.active_events[road_section] = {}
        # 获取当前计数器值并递增
        if (self.counters[road_section][event_type] > 0) and (event_type in self.active_events[road_section].keys()):
            last_event_source = self.active_events[road_section][event_type]["event_source"]
            event_duration = event_time - self.active_events[road_section][event_type]["event_time"]
            min_duration = timedelta(minutes=float(self.config_data['all_time_move']) * 4)
            # if last_event_source != event_source or event_duration > min_duration:
            # 如果当前事件时间与该区间上次事件时间间隔超过4倍移动时间间隔，则递增计数器，更新活动事件开始时间
            if event_duration > min_duration:
                self.counters[road_section][event_type] += 1
                self.active_events[road_section][event_type]["start_time"] = event_time.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3]
        else:
            self.counters[road_section][event_type] += 1
            self.active_events[road_section][event_type] = {}
            self.active_events[road_section][event_type]["start_time"] = event_time.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3]
        # 检查是否超出范围(确保不超过99)
        if self.counters[road_section][event_type] > 999:
            self.counters[road_section][event_type] = (self.counters[road_section][event_type] % 999) + 1
        current_counter = self.counters[road_section][event_type]
        # 生成ID: 道路编号×1000 + 类型代码×100 + 序列号
        event_id = (road_section * 10000) + (self.event_types[event_type] * 1000) + current_counter
        # # 检查是否超过int最大值(虽然Python int理论上无限大，但为了兼容其他系统)
        # if event_id > 2147483647:
        #     event_id = event_id % 2147483647
        #     logger.error("event_id超出int最大值，将使用除法取余后的值")
        # 更新区间事件信息
        self.active_events[road_section][event_type]["event_id"] = event_id
        self.active_events[road_section][event_type]["event_time"] = event_time
        self.active_events[road_section][event_type]["event_source"] = event_source
        # 如果该事件ID不存在于id_list中，则生成新的GLOBAL_ID
        if event_id not in self.id_list.keys():
            with lock:
                current_counter = GLOBAL_ID.value
                GLOBAL_ID.value += 1
                current_counter = (current_counter + 1) % 1000
                logger.info(f"生成新的event_id: {event_id}, {current_counter}, {self.id_list}")
                new_id = int(year) * 10000000 + int(month) * 100000 + int(day) * 1000 + current_counter
                self.id_list[event_id] = new_id
                # 将新的GLOBAL_ID信息保存到文件中
                with file_lock:
                    try:
                        with open(TEMP_COUNTER_FILE, 'w', encoding="utf-8") as f:
                            data = {"date": tt.strftime("%Y%m%d"), "counter": current_counter}
                            json.dump(data, f, ensure_ascii=False)
                        # 原子性替换
                        shutil.move(TEMP_COUNTER_FILE, COUNTER_FILE)
                    except IOError as e:
                        logger.warning(f"[警告] 写入计数器失败: {e}")
        return event_id

    def process_main(self, config_data, info):
        define = {
            "TIME": "time",
            "TYPE": "type",
            "GANTRY": "device",
            "PLATE": "id",
            "LANE": "lane"
        }
        # 创建ProcessEvent实例，传入配置数据进行初始化
        process_event = ProcessEvent(config_data, define)
        # 初始化进程事件，进行必要的设置和准备
        process_event.init()
        # 启动进程事件，开始处理任务
        # process_event.run(self.q_dict[info], self.block_dict[info], self.kafka_data_dict[info], self.flag_dict[info])
        process_event.run(self.q_dict[info], self.block_dict[info], self.flag_dict[info])

    def global_flag(self):
        # 是否需要切换数据源，基于异常数据占总数量的比例，上行或下行数据占比高于阈值（50%），则切换数据源
        if self.task_flag != "kakou":
            # 如果上次切换时间较近，则不允许切换
            if self.last_switch_time is not None and (time.time() - self.last_switch_time) < self.min_switch_interval:
                return 0
            # 计算不同数据质量设备的数量
            T0_num = 0
            T1_num = 0
            T2_num = 0
            T_num = len(self.valuable_devices) * 0.5
            for device in self.valuable_devices:
                if self.gantry_status[device][0] == 0:
                    T0_num += 1
                elif self.gantry_status[device][0] == 1:
                    T1_num += 1
                elif self.gantry_status[device][0] == 2:
                    T2_num += 1
            # 判断是否需要切换数据源为门架卡口数据
            if not self.new_flag:
                if T2_num >= T_num:
                    if self.start_switch_time is None:
                        # 开始计算异常时间，如果中间某次数据恢复正常，则重新计算异常时间
                        self.start_switch_time = time.time()
                    else:
                        # 连续异常时间超过30分钟，则切换到门架卡口数据
                        if time.time() - self.start_switch_time >= self.switch_timeout:
                            self.new_flag = True
                            self.last_switch_time = time.time()
                            logger.info(f"进程{self.task_flag}在时间{time.time()}根据数据质量切换数据源为门架卡口数据。")
                            return 1
                else:
                    # 初始化异常开始时间
                    self.start_switch_time = None
            else:
                # 判断是否需要切换数据源为重识别数据
                if T0_num >= T_num:
                    if self.start_switch_time is None:
                        # 开始计算异常时间，如果中间某次数据恢复正常，则重新计算异常时间
                        self.start_switch_time = time.time()
                    else:
                        # 连续异常时间超过30分钟，则切换回重识别数据
                        if time.time() - self.start_switch_time >= self.switch_timeout:
                            self.new_flag = False
                            self.last_switch_time = time.time()
                            logger.info(f"进程{self.task_flag}在时间{time.time()}根据数据质量切换数据源为门架卡口数据。")
                            return 1
                else:
                    # 初始化异常开始时间
                    self.start_switch_time = None
        return 0

    def add_message(self):
        # 定义事件等级
        level_dict = {"畅通": 1, "轻度拥堵": 2, "中度拥堵": 3, "严重拥堵": 4, "重度阻塞": 4, "阻断": 5}
        # 初始化日志文件
        now_time = datetime.now()
        time_str = now_time.strftime("%Y%m%d")
        json_dir = os.path.join(self.config_data["save_json_path"], time_str)
        if not os.path.exists(json_dir):
            os.makedirs(json_dir, exist_ok=True)
        self.current_time_str = time_str
        self.save_event_path = os.path.join(json_dir, f"event_{self.task_flag}.json")
        # 日志保存文件
        json_file = open(self.save_event_path, 'a', encoding='utf-8')
        # 事件字典
        event_dict = {}
        # 循环监听心跳
        last_time = time.time()
        while True:
            try:
                time.sleep(0.1)
                # 本地当前时间
                now_time = datetime.now()
                # 检查是否是新的一天，按天保存日志数据
                time_str = now_time.strftime("%Y%m%d")
                if time_str != self.current_time_str:
                    if json_file:
                        json_file.close()
                    # 如果是新的一天，创建日志文件保存文件夹并更新日志文件保存地址
                    self.save_event_path = self.save_json(time_str)
                    json_file = open(self.save_event_path, 'a', encoding='utf-8')
                    self.current_time_str = time_str
                # 连接kafka 服务器
                if self.kafka_producer is None:
                    logger.info(f"add_message {self.kafka_producer}")
                    try:
                        self.kafka_producer = KafkaProducer(
                            bootstrap_servers=self.config_data["kafka_host"],
                            request_timeout_ms=10000,
                            value_serializer=lambda x: json.dumps(x).encode('utf-8')
                        )
                        self.kafka_status = 0
                        logger.info(f"kafka_producer连接成功")
                    except NoBrokersAvailable as e:
                        self.kafka_status = 1
                        logger.error("kafka连接失败")
                        self.kafka_producer = None
                        time.sleep(10)
                        continue
                # 心跳状态实时上报
                if time.time() - last_time >= 1:
                    m = {
                        "timestamp": str(int(time.time())),
                        "status": 0,
                        "type": 1
                    }
                    try:
                        future = self.kafka_producer.send(self.config_data["kafka_status"], value=m)
                        record_metadata = future.get(timeout=10)  # 正常情况下很快返回
                    except KafkaError as e:
                        self.kafka_status = 1
                        logger.error("kafka连接失败，导致算法心跳上报失败")
                        self.kafka_producer = None
                        time.sleep(10)
                    except Exception as e:
                        logger.error("算法心跳上报失败")
                    # 更新时间
                    last_time = time.time()
                # 分发门架数据到各个区间队列中
                while not self.q.empty():
                    try:
                        message = self.q.get(block=False)
                        gantry_id = message["device"]
                        if gantry_id in self.id_dict:
                            info_list = self.id_dict[gantry_id]
                            for info in info_list:
                                self.q_dict[info].put(message, block=False)
                    except Exception as e:
                        logger.opt(exception=e).error("门架数据分发失败")
                # 上报事故信息到kafka
                for info in self.gantry_info.keys():
                    # 获取区间的上下游基站/设备ID列表
                    gantry_up = self.gantry_info[info]['up_gantry_id']
                    gantry_down = self.gantry_info[info]['down_gantry_id']
                    # 遍历上/下游所有基站/设备ID，取出对应状态值，判断所有状态值是否都为0，如果都为0，则说明上/下游所有基站/设备都正常
                    up_status = all(all(value == 0 for value in self.gantry_status.get(key, [1])) for key in gantry_up)
                    down_status = all(all(value == 0 for value in self.gantry_status.get(key, [1])) for key in gantry_down)
                    # 循环遍历事件队列中的事件列表，并对事件赋予唯一事件ID，将事件信息保存到事件字典event_dict中
                    while not self.block_dict[info].empty():
                        con = self.block_dict[info].get(block=False)
                        try:
                            # tt = con['time'].strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3]
                            if con['content'] == "阻断" or con['content'] == "重度阻塞":
                                status_up = {k: self.gantry_status.get(k) for k in gantry_up}
                                status_down = {k: self.gantry_status.get(k) for k in gantry_down}
                                # 日志输出：区间ID，事件信息，上下游所有设备状态信息
                                logger.info(f"{info}, {str(con)}, status_up: {status_up}, status_down: {status_down}")
                                # 保持唯一性的事件ID获取（generate_id()函数）
                                # event_id = self.generate_id(info, "block", con['time'], con['content'])
                                event_id2 = self.generate_id(info, "congestion", con['time'], con['content'])
                                if con['content'] == "阻断" and up_status and down_status:
                                    event_dict[info] = {
                                        "secLevel": level_dict[con['content']],
                                        "eventId": self.id_list[event_id2],
                                        "isBreakdown": 1,
                                        "eventInfo": con['content']
                                    }
                                elif con['content'] == "重度阻塞":
                                    event_dict[info] = {
                                        "secLevel": level_dict[con['content']],
                                        "eventId": self.id_list[event_id2],
                                        "isBreakdown": 0,
                                        "eventInfo": con['content']
                                    }
                            elif con['con'] != "畅通" and con['content'] != "畅通":
                                # 如果没有阻断或重度阻塞，则考虑拥堵事件
                                status_up = {k: self.gantry_status.get(k) for k in gantry_up}
                                status_down = {k: self.gantry_status.get(k) for k in gantry_down}
                                # 日志输出：区间ID，事件信息，上下游所有设备状态信息
                                logger.info(f"{info}, {str(con)}, status_up: {status_up}, status_down: {status_down}")
                                # 保持唯一性的事件ID获取（generate_id()函数）
                                event_id = self.generate_id(info, "congestion", con['time'], con['con'])
                                event_dict[info] = {
                                    "secLevel": level_dict[con['con']],
                                    "eventId": self.id_list[event_id],
                                    "isBreakdown": 0,
                                    "eventInfo": con['con']
                                }
                            # 无事件的区间默认事件信息为空
                            if info not in event_dict.keys():
                                event_dict[info] = {}
                            # 为所有区间添加过车数量和区间平均速度
                            event_dict[info]["totalFlow"] = con['num']
                            event_dict[info]["intervalSpeed"] = float(con['speed'])
                        except Exception as e:
                            logger.error(f"事故信息处理失败. 区间：{info},数据：{str(con)}")
                            logger.opt(exception=e).error("事故信息处理失败")
                # 上报事故信息到主控kafka
                if self.gantry_info:
                    try:
                        # 每隔time_move（3/5分钟）时间计算一次事故信息，若有事故，则上报
                        if (now_time - self.start_time) >= timedelta(minutes=float(self.config_data['all_time_move'])):
                            # logger.info(f"now_time:{str(now_time)}, start_time:{self.start_time}")
                            # 检查全局标识，确认是否需要进行切换
                            result = self.global_flag()
                            with lock:
                                # 修改全局变量 GLOBAL
                                if self.task_flag == "up" and result == 1:
                                    GLOBAL_UP_FALG.value = self.new_flag
                                elif self.task_flag == "down" and result == 1:
                                    GLOBAL_DOWN_FALG.value = self.new_flag
                                # 获取最新的全局变量 GLOBAL
                                up_flag = GLOBAL_UP_FALG.value
                                down_flag = GLOBAL_DOWN_FALG.value
                                # 根据全局变量决定是否切换数据源
                                if up_flag or down_flag:
                                    if not self.new_flag:  # 如果当前不是True
                                        self.start_switch_time = None
                                        self.last_switch_time = time.time()
                                        self.new_flag = True
                                        logger.info(f"进程{self.task_flag}在时间{str(now_time)}根据全局变量切换数据源为门架卡口数据。")
                                else:
                                    if self.new_flag:  # 如果当前是True
                                        self.start_switch_time = None
                                        self.last_switch_time = time.time()
                                        self.new_flag = False
                                        logger.info(f"进程{self.task_flag}在时间{str(now_time)}根据全局变量切换数据源为相机重识别数据。")
                            # 完整的事故信息字典
                            event_data_dict = {}
                            # timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
                            # 遍历所有区间
                            for info in self.gantry_info.keys():
                                gantry_up = self.gantry_info[info]['up_gantry_id']
                                gantry_down = self.gantry_info[info]['down_gantry_id']
                                # 遍历所有 key，取出对应值，判断是否都为 0
                                up_status = all(all(value == 0 for value in self.gantry_status.get(key, [1])) for key in gantry_up)
                                down_status = all(all(value == 0 for value in self.gantry_status.get(key, [1])) for key in gantry_down)
                                # 事件数据初始模板
                                event_data = {
                                    "timestamp": int(time.time()),
                                    "direction": self.gantry_info[info]["direction"],
                                    "orgCode": gantry_down[0],
                                    "startOrgCode": gantry_up[0],
                                    # "sNum": self.kako_dict[gantry_up]['RealPileNumber'],
                                    # "eNum": self.kako_dict[gantry_down]['RealPileNumber'],
                                    # "sLon": self.kako_dict[gantry_up]['lon'],
                                    # "sLat": self.kako_dict[gantry_up]['lat'],
                                    # "eLon": self.kako_dict[gantry_down]['lon'],
                                    # "eLat": self.kako_dict[gantry_down]['lat'],
                                    "totalFlow": 0,
                                    "intervalSpeed": 0,
                                    "secLevel": 1,
                                    "isBreakdown": 0,
                                    "eventId": 0,
                                    "eventInfo": "",
                                    "status": [up_status, down_status]
                                }
                                # 添加事故字典event_dict中的信息
                                try:
                                    if info in event_dict.keys():
                                        if "totalFlow" in event_dict[info].keys():
                                            event_data["totalFlow"] = event_dict[info]["totalFlow"]
                                        if "intervalSpeed" in event_dict[info].keys():
                                            event_data["intervalSpeed"] = event_dict[info]["intervalSpeed"]
                                        if "secLevel" in event_dict[info].keys():
                                            event_data["secLevel"] = event_dict[info]["secLevel"]
                                        if "isBreakdown" in event_dict[info].keys():
                                            event_data["isBreakdown"] = event_dict[info]["isBreakdown"]
                                        if "eventId" in event_dict[info].keys():
                                            event_data["eventId"] = event_dict[info]["eventId"]
                                        if "eventInfo" in event_dict[info].keys():
                                            event_data["eventInfo"] = event_dict[info]["eventInfo"]
                                except KeyError as e:
                                    logger.opt(exception=e).error("event_data字段缺失错误")
                                except ValueError as e:
                                    logger.opt(exception=e).error("event_data数据格式错误")
                                except Exception as e:
                                    logger.opt(exception=e).error("event_data未知错误")
                                # 将事件数据存入当前完整的事件数据字典event_data_dict中
                                event_data_dict[info] = event_data
                            # 存在事件的区间列表
                            info_list = []
                            for event_info in list(event_data_dict.keys()):
                                try:
                                    # 如果区间畅通，则跳过
                                    if event_data_dict[event_info]["secLevel"] == 1:
                                        continue
                                    # 区间非畅通，则将区间数据字典添加到info_list中
                                    info_list.append(event_info)
                                    # 区间非畅通，则将事件数据发送到kafka，并保存相应日志
                                    self.kafka_producer.send(self.config_data["kafka_event_topic"], value=event_data_dict[event_info], key="congestion".encode("utf-8"))
                                    logger.info(f"event_data:{str(event_data_dict[event_info])}")
                                    json_file.write(json.dumps(event_data_dict[event_info], ensure_ascii=False) + "\n")
                                except Exception as e:
                                    logger.opt(exception=e).error("event_data发送到kafka失败")

                            # TODO 特殊情况处理，若间隔为1个区间的两个区间均有拥堵事件，而中间区间畅通，且三个区间之间都没有上下匝道，则强制将中间区间设为拥堵并上报
                            # 不需要关注的区间
                            not_list = ["10010", "20010"]
                            if info_list:
                                # 获取前面写的特殊情况下，由畅通需要转为拥堵的区间列表new_nums
                                new_up_id, new_nums = fill_missing_odds(info_list, not_list, add_num=1)
                                for new_num in new_nums:
                                    front_str = str(new_num - 1)
                                    back_str = str(new_num + 1)
                                    front_num = self.gantry_info[front_str]
                                    back_num = self.gantry_info[back_str]
                                    if front_num["is_fork"] or back_num["is_fork"] or self.gantry_info[str(new_num)]["is_fork"]:
                                        continue
                                    new_level = int((event_data_dict[front_str]["secLevel"] + event_data_dict[back_str]["secLevel"]) / 2)
                                    event_data_dict[str(new_num)]["secLevel"] = new_level
                                    self.kafka_producer.send(self.config_data["kafka_event_topic"], value=event_data_dict[str(new_num)], key="add_congestion".encode("utf-8"))
                                    logger.info(f"add event_data:{str(event_data_dict[str(new_num)])}")
                                    json_file.write(json.dumps(event_data_dict[str(new_num)], ensure_ascii=False) + "\n")
                            event_dict = {}
                            self.start_time = now_time
                            # 设备数据更新
                            for gantry in self.id_dict.keys():
                                self.gantry_status[gantry] = [1, 1, 1, 1]
                    except Exception as e:
                        logger.opt(exception=e).error("上报事故信息到主控kafka失败")
                # 初版事件上报代码，这里做留存
                # 上报事故信息到kafka
                # for info in self.gantry_info.keys():
                #     gantry_up = self.gantry_info[info]['up_gantry_id']
                #     gantry_down = self.gantry_info[info]['down_gantry_id']
                #     # 遍历所有 key，取出对应值，判断是否都为 0
                # up_status = all(
                #     all(value == 0 for value in self.gantry_status.get(key, [1])) for key in gantry_up)
                # down_status = all(
                #     all(value == 0 for value in self.gantry_status.get(key, [1])) for key in gantry_down)
                # while not self.block_dict[info].empty():
                #     try:
                #         event_data = {}
                #         con = self.block_dict[info].get(block=False)
                #         tt = con['time'].strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3]
                #         if con['content'] == "阻断" or con['content'] == "重度阻塞":
                #             # 获取状态值（使用 .get 避免 KeyError）
                #             status_up = {k: self.gantry_status.get(k) for k in gantry_up}
                #             status_down = {k: self.gantry_status.get(k) for k in gantry_down}
                #             # 日志输出
                #             logger.info(f"{info}, {str(con)}, status_up: {status_up}, status_down: {status_down}")
                #
                #             event_id = self.generate_id(info, "congestion", con['time'], con['content'])
                #             event_data = {
                #                 "timestamp": int(con['time'].timestamp()),
                #                 "direction": self.gantry_info[info]["direction"],
                #                 "orgCode": gantry_down[0],
                #                 "startOrgCode": gantry_up[0],
                #                 # "sNum": self.kako_dict[gantry_up[0]]['RealPileNumber'],
                #                 # "eNum": self.kako_dict[gantry_down[0]]['RealPileNumber'],
                #                 # "sLon": self.kako_dict[gantry_up[0]]['lon'],
                #                 # "sLat": self.kako_dict[gantry_up[0]]['lat'],
                #                 # "eLon": self.kako_dict[gantry_down[0]]['lon'],
                #                 # "eLat": self.kako_dict[gantry_down[0]]['lat'],
                #                 "totalFlow": con['num'],
                #                 "intervalSpeed": float(con['speed']),
                #                 "secLevel": level_dict[con['content']],
                #                 "isBreakdown": 1 if con['content'] == "阻断" else 0,
                #                 "eventId": self.id_list[event_id],
                #                 "eventInfo": con['content'],
                #                 "status": [up_status, down_status]
                #             }
                #             if not (status_up and status_down) and con['content'] == "阻断":
                #                 event_data = {}
                #
                #         if con['con'] != "畅通" and con['content'] != "畅通" and event_data == {}:
                #             # 获取状态值（使用 .get 避免 KeyError）
                #             status_up = {k: self.gantry_status.get(k) for k in gantry_up}
                #             status_down = {k: self.gantry_status.get(k) for k in gantry_down}
                #             # 日志输出
                #             logger.info(f"{info}, {str(con)}, status_up: {status_up}, status_down: {status_down}")
                #             event_id = self.generate_id(info, "congestion", con['time'], con['con'])
                #             event_data = {
                #                 "timestamp": int(con['time'].timestamp()),
                #                 "direction": self.gantry_info[info]["direction"],
                #                 "orgCode": gantry_down[0],
                #                 "startOrgCode": gantry_up[0],
                #                 # "sNum": self.kako_dict[gantry_up[0]]['RealPileNumber'],
                #                 # "eNum": self.kako_dict[gantry_down[0]]['RealPileNumber'],
                #                 # "sLon": self.kako_dict[gantry_up[0]]['lon'],
                #                 # "sLat": self.kako_dict[gantry_up[0]]['lat'],
                #                 # "eLon": self.kako_dict[gantry_down[0]]['lon'],
                #                 # "eLat": self.kako_dict[gantry_down[0]]['lat'],
                #                 "totalFlow": con['num'],
                #                 "intervalSpeed": float(con['speed']),
                #                 "secLevel": level_dict[con['con']],
                #                 "isBreakdown": 0,
                #                 "eventId": self.id_list[event_id],
                #                 "eventInfo": con['con'],
                #                 "status": [up_status, down_status]
                #             }
                #
                #         if event_data:
                #             self.kafka_producer.send(self.config_data["kafka_event_topic"], value=event_data)
                #             # 保存事故上报信息到本地txt文件
                #             logger.info(f"event_data:{str(event_data)}")
                #             json_file.write(json.dumps(event_data, ensure_ascii=False) + "\n")
                #
                #     except Exception as e:
                #         logger.error(f"事故信息到kafka失败. 数据：{str(self.block_dict)}")
                #         logger.opt(exception=e).error("事故信息到kafka失败")
            except BaseException as e:
                logger.opt(exception=e).error("异常退出")
                self.thread_flag = True
                for info in self.gantry_info.keys():
                    self.flag_dict[info].put(self.thread_flag, block=False)


def monitor_process_memory(pids, interval=5):
    '''
    进程内存占用情况实时检测，检测间隔：interval秒
    '''
    while True:
        for pid in pids:
            try:
                process = psutil.Process(pid)
                mem_info = process.memory_info()
                print(f"Process (PID: {pid}) Memory Usage: RSS={mem_info.rss / 1024 / 1024:.2f} MB")
            except psutil.NoSuchProcess:
                print(f"Process with PID {pid} no longer exists.")
        time.sleep(interval)


def process_task(device_info, config_info, task_id):
    '''
    单进程启动函数
    '''
    event = Event(device_info, config_info, task_id)
    event.init()
    event.add_message()


def event_main(device_info, config_info):
    '''
    主进程
    '''
    # 启动多个进程
    p1 = Process(target=process_task, args=(device_info, config_info, "up"))
    p2 = Process(target=process_task, args=(device_info, config_info, "down"))
    # p3 = Process(target=process_task, args=(device_info, config_info, "kakou"))

    p1.start()
    p2.start()
    # p3.start()

    # # 开始监控进程内存使用
    # monitor_process = Process(target=monitor_process_memory, args=([p.pid for p in (p1, p2, p3)], 5))
    # monitor_process.daemon = True  # 设置为守护进程，当主线程退出时自动结束
    # monitor_process.start()

    try:
        p1.join()
        p2.join()
        # p3.join()
    except KeyboardInterrupt:
        print("检测到 Ctrl+C，正在终止子进程...")
        p1.terminate()
        p2.terminate()
        # p3.terminate()
        p1.join()
        p2.join()
        # p3.join()
        print("子进程已终止")
