import json
import os
import csv
import re
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import chardet
from show_car_speed import ShowCarSpeed
from show_time_duration import ShowTimeDuration
from show_car_slow_num import ShowCarSlowNum
from show_flow_diff import ShowFlowDiff
from show_pass_rate import ShowPassRate
from utils import *


# 设置字体以支持中文
plt.rcParams['font.sans-serif'] = ['SimHei']  # 使用黑体
plt.rcParams['axes.unicode_minus'] = False  # 解决保存图像是负号'-'显示为方块的问题
dist_dict = {
        "HW7": 2009,
        "TLS_1_1": 1800,
        "TLS_1_2": 1605,
        "TLS_2_1": 1686,
        "TLS_2_2": 3290,
        "ZW4": 850,
        "ZW3": 3295,
        "HW1": 3370,
        "ZW1": 850,
        "ZW2": 3263,
        "TLS_3_1": 1684,
        "TLS_3_2": 1693,
        "TLS_4_1": 1704,
        "TLS_4_2": 1986
    }


class Gantry:
    def __init__(self, dir_path):
        self.dir_path = dir_path
        self.ctype_time = None
        self.max_speed = 120
        self.dist = None
        self.time_interval = '5T'
        self.showCarSpeed = ShowCarSpeed()
        self.showTimeDuration = ShowTimeDuration()
        self.showCarSlowNum = ShowCarSlowNum()
        self.showFlowDiff = ShowFlowDiff()
        self.showPassRate = ShowPassRate()

    def parse_duration(self, duration_str):
        # 匹配分钟和秒
        if pd.isna(duration_str) or duration_str.strip() == '':
            return None
        match = re.match(r'(\d+)m(\d+)s', duration_str)
        if match:
            minutes = int(match.group(1))
            seconds = int(match.group(2))
            total_seconds = minutes * 60 + seconds
            return total_seconds
        else:
            return None

    def get_files_containing_substring(self, directory, substring):
        file_name = None
        # 遍历指定目录下的所有文件
        for filename in os.listdir(directory):
            # 构建完整的文件路径
            full_path = os.path.join(directory, filename)
            # 检查是否为文件
            if os.path.isfile(full_path):
                # 检查文件名是否包含指定的子字符串
                if substring in filename:
                    file_name = full_path
        return file_name

    def get_match_data(self, path):
        # 使用 chardet 检测文件编码
        with open(path, 'rb') as f:
            result = chardet.detect(f.read(10000))
        detected_encoding = result['encoding']
        # 使用检测到的编码读取 CSV 文件
        df = pd.read_csv(path, encoding=detected_encoding)
        # 转换为字典
        # a = df.to_dict(orient='records')
        # 转换为numpy数组
        df_np = df.to_numpy()
        # 获取第五列（索引为 4）
        fifth_column = df_np[:, 5]
        # 找出第五列值大于 0 的条件
        mask = (fifth_column >= 0) & (fifth_column < 3)
        # 使用布尔索引选择满足条件的行
        filtered_arr = df_np[mask]
        return filtered_arr

    def get_mean_time(self, df_np):
        total_num = len(df_np)
        ctype_time = {}
        for row in df_np:
            ctype = row[1]
            if ctype not in ctype_time:
                ctype_time[ctype] = {}
                ctype_time[ctype]['list'] = []
            # 解析时间差字符串
            time_diff = row[4]
            parts = time_diff.split('m')
            fen = int(parts[0])  # 分钟数
            miao = int(parts[1].split('s')[0])  # 秒数
            # 计算总秒数
            seconds = fen * 60 + miao
            ctype_time[ctype]['list'].append(seconds)
            if seconds < 0:
                print("time_diff: ", time_diff, row)
        for ctype, data in ctype_time.items():
            total_seconds = data['list']
            total_seconds = np.array(total_seconds)
            data['sum'] = np.sum(total_seconds)
            data['mean'] = np.mean(total_seconds)
            data['max'] = np.max(total_seconds)
            data['min'] = np.min(total_seconds)

        return ctype_time

    def get_car_speed(self, path, time_interval, csv_name, alll_data, type_flag=None):
        # 读取CSV文件
        df = pd.read_csv(path, parse_dates=['transtime_up'],
                         date_parser=lambda x: pd.to_datetime(x, format='%Y/%m/%d %H:%M:%S'))
        if type_flag is not None:
            # 筛选特定车辆类型的数据
            df = df[df['feevehicletype'] == 1]
        df = df[(df['match_type'] >= 0) & (df['match_type'] <= 2)]
        # 应用函数，将耗时转换为秒
        df['duration_seconds'] = df['time_diff'].apply(self.parse_duration)
        # 过滤掉包含空值的行
        df = df.dropna(subset=['duration_seconds'])
        # 根据时间间隔分组并计算平均速度以及每个分组的样本数量
        grouped = (df.groupby(pd.Grouper(key='transtime_up', freq=time_interval))
                   .agg({'duration_seconds': ['mean', 'count']})
                   .reset_index())
        # 重命名列以便更清晰
        grouped.columns = ['transtime_up', 'duration_seconds', 'count']
        mean_speed_data = {}
        if self.dist is not None and self.dist > 0:
            df = grouped.copy()
            # 计算平均速度
            # df["mean_speed"] = df.apply(lambda x: round(self.dist / x['duration_seconds'], 2), axis=1)
            df["mean_speed"] = df.apply(
                lambda x: round(self.dist / x['duration_seconds'], 2) if x['count'] != 0 else np.nan, axis=1)
            # 遍历数据，处理 duration_seconds 为空且 count 为零的情况
            prev_mean_speed = np.nan
            for i in range(len(df)):
                if pd.isna(df.at[i, 'duration_seconds']) and df.at[i, 'count'] == 0:
                    if not pd.isna(prev_mean_speed) and alll_data[df.at[i, 'transtime_up']]['up_flow'] > 0\
                            and alll_data[df.at[i, 'transtime_up']]['down_flow'] > 0:
                        df.at[i, 'mean_speed'] = prev_mean_speed
                    elif i < 5:
                        df.at[i, 'mean_speed'] = prev_mean_speed
                    else:
                        df.at[i, 'mean_speed'] = 0
                # else:
                #     prev_mean_speed = df.at[i, 'mean_speed']
                prev_mean_speed = df.at[i, 'mean_speed']
            output_file_path = os.path.join(self.dir_path, csv_name)  # 替换为实际文件路径
            df[['transtime_up', 'duration_seconds', 'mean_speed', 'count']].to_csv(
                output_file_path, index=False, encoding='utf-8')
            mean_speed_data = df.to_dict('records')
        else:
            print("dist is None")
        return mean_speed_data

    def get_all_flow(self, up_path, down_path, time_interval, csv_name):
        # 读取CSV文件
        df_up = pd.read_csv(up_path, parse_dates=["transtime_up"],
                         date_parser=lambda x: pd.to_datetime(x, format='%Y/%m/%d %H:%M:%S'))
        # 根据时间间隔分组，并计算平均速度
        grouped_up = df_up.groupby(pd.Grouper(key="transtime_up", freq=time_interval))
        # 计算每组中 "time" 列为空和不为空的数量
        result_up = grouped_up.agg({
            "transtime_down": lambda x: x.notna().sum()
        }).assign(
            total_count=lambda x: grouped_up.size()
        )
        result_up = result_up.reset_index()
        np_up = result_up.to_numpy()
        # print(result_up)
        # 读取CSV文件
        df_down = pd.read_csv(down_path, parse_dates=["transtime_down"],
                         date_parser=lambda x: pd.to_datetime(x, format='%Y/%m/%d %H:%M:%S'))
        # 根据时间间隔分组，并计算平均速度
        grouped_down = df_down.groupby(pd.Grouper(key="transtime_down", freq=time_interval))
        # 计算每组中 "time" 列为空和不为空的数量
        result_down = grouped_down.agg({
            "transtime_up": lambda x: x.notna().sum()
        }).assign(
            total_count=lambda x: grouped_down.size()
        )
        result_down = result_down.reset_index()
        np_down = result_down.to_numpy()
        alll_data = {}
        fieldnames = ['time', 'total_flow', 'up_flow', 'down_flow', 'flow_diff']
        output_file_path = os.path.join(self.dir_path, csv_name)  # 替换为实际文件路径
        with open(output_file_path, mode='w', newline='', encoding='utf-8') as file:
            writer = csv.DictWriter(file, fieldnames=fieldnames)
            # 写入表头
            writer.writeheader()
            # 写入行数据
            up_num = np_up.shape[0]
            down_num = np_down.shape[0]
            num = min(up_num, down_num)
            rows = []
            for i in range(num):
                up = np_up[i]
                down = np_down[i]
                # 使用字典来构造每一行
                row_dict = {
                    'time': up[0],
                    'total_flow': (up[2] - up[1] + down[2]),
                    'up_flow': up[2],
                    'down_flow': down[2],
                    'flow_diff': (up[2] - down[2])
                }
                alll_data[row_dict['time']] = {
                    'up_flow': row_dict['up_flow'] if row_dict['up_flow'] is not None else 0,  # 如果没有up_flow数据，默认为0
                    'down_flow': row_dict['down_flow'] if row_dict['down_flow'] is not None else 0  # 如果没有down_flow数据，默认为0
                    }
                rows.append(row_dict)

            writer.writerows(rows)
        return alll_data

    def get_mate_all_flow(self, up_path, down_path, time_interval, csv_name):
        # 读取CSV文件
        df_up = pd.read_csv(up_path, parse_dates=["transtime_up"],
                            date_parser=lambda x: pd.to_datetime(x, format='%Y/%m/%d %H:%M:%S'))
        # df_up = df_up[df_up['feevehicletype'] == 1]
        df_up = df_up[(df_up['match_type'] >= 0)]
        # 应用函数，将耗时转换为秒
        df_up['duration_seconds'] = df_up['time_diff'].apply(self.parse_duration)
        # 过滤掉包含空值的行
        df_up = df_up.dropna(subset=['duration_seconds'])
        # 根据时间间隔分组，并计算平均速度
        grouped_up = df_up.groupby(pd.Grouper(key="transtime_up", freq=time_interval)).size().reset_index(
            name='up_flow')
        grouped_up.set_index('transtime_up', inplace=True)
        np_up = grouped_up.to_dict()
        # 读取CSV文件
        df_down = pd.read_csv(down_path, parse_dates=["transtime_down"],
                              date_parser=lambda x: pd.to_datetime(x, format='%Y/%m/%d %H:%M:%S'))
        # df_down = df_down[df_down['feevehicletype'] == 1]
        df_down = df_down[(df_down['match_type'] >= 0)]
        # 应用函数，将耗时转换为秒
        df_down['duration_seconds'] = df_down['time_diff'].apply(self.parse_duration)
        # 过滤掉包含空值的行
        df_down = df_down.dropna(subset=['duration_seconds'])
        # 根据时间间隔分组，并计算平均速度
        grouped_down = df_down.groupby(pd.Grouper(key="transtime_down", freq=time_interval)).size().reset_index(
            name='down_flow')
        grouped_down.set_index('transtime_down', inplace=True)
        np_down = grouped_down.to_dict()
        up_flow_data = np_up['up_flow']
        down_flow_data = np_down['down_flow']

        merged_data = {}
        # 遍历所有的时间点（确保涵盖两个字典中的所有时间）
        all_times = set(up_flow_data.keys()).union(set(down_flow_data.keys()))
        for time in sorted(all_times):
            up_flow = up_flow_data.get(time)
            down_flow = down_flow_data.get(time)
            merged_data[time] = {
                'up_flow': up_flow if up_flow is not None else 0,  # 如果没有up_flow数据，默认为0
                'down_flow': down_flow if down_flow is not None else 0  # 如果没有down_flow数据，默认为0
            }
        fieldnames = ['time', 'total_flow', 'up_flow', 'down_flow', 'flow_diff']
        output_file_path = os.path.join(self.dir_path, csv_name)  # 替换为实际文件路径
        with open(output_file_path, mode='w', newline='', encoding='utf-8') as file:
            writer = csv.DictWriter(file, fieldnames=fieldnames)
            writer.writeheader()
            rows = []
            for time, data in merged_data.items():
                # 使用字典来构造每一行
                row_dict = {
                    'time': time,
                    'total_flow': (data['up_flow'] + data['down_flow']),
                    'up_flow': data['up_flow'],
                    'down_flow': data['down_flow'],
                    'flow_diff': (data['up_flow'] - data['down_flow'])
                }
                rows.append(row_dict)

            writer.writerows(rows)
        return merged_data

    def get_slow_vehicle(self, all_path, time_interval, csv_name):
        # 读取CSV文件
        df = pd.read_csv(all_path, parse_dates=["transtime_up"],
                         date_parser=lambda x: pd.to_datetime(x, format='%Y/%m/%d %H:%M:%S'))
        # 筛选特定车辆类型的数据
        # df = df[df['feevehicletype'] < 5]
        df = df[(df['match_type'] >= 1) & (df['match_type'] <= 3)]
        # 应用函数，将耗时转换为秒，并过滤掉无效值
        df['duration_seconds'] = df['time_diff'].apply(self.parse_duration)
        # # 过滤掉包含空值的行
        df = df.dropna(subset=['duration_seconds'])
        # 根据时间间隔分组，并计算平均速度
        grouped = df.groupby(pd.Grouper(key='transtime_up', freq=time_interval)).size().reset_index(name='count')
        result = grouped.reset_index()
        np_up = result.to_numpy()

        slow_vehicl_data = {}
        fieldnames = ['time', 'count']
        output_file_path = os.path.join(self.dir_path, csv_name)  # 替换为实际文件路径
        with open(output_file_path, mode='w', newline='', encoding='utf-8') as file:
            writer = csv.DictWriter(file, fieldnames=fieldnames)
            writer.writeheader()
            up_num = np_up.shape[0]
            num = up_num
            rows = []
            for i in range(num):
                up = np_up[i]
                # 使用字典来构造每一行
                row_dict = {
                    'time': up[1],
                    'count': up[2],
                }
                rows.append(row_dict)
                slow_vehicl_data[up[1]] = up[2]

            writer.writerows(rows)

        return slow_vehicl_data

    def get_type_time_change(self, down_path, csv_name):
        # 读取CSV文件
        df = pd.read_csv(down_path, parse_dates=["transtime_down"], date_parser=lambda x: pd.to_datetime(x,
                                                                                                     format='%Y/%m/%d %H:%M:%S'))
        # 筛选特定数据
        df = df[(df['match_type'] >= 0) & (df['match_type'] < 3)]
        # 应用函数，将耗时转换为秒
        df['duration_seconds'] = df['time_diff'].apply(self.parse_duration)
        # 过滤掉包含空值的行
        df = df.dropna(subset=['duration_seconds'])
        # print(df)
        df1 = df[['vlp', 'feevehicletype', 'transtime_down', 'duration_seconds']]
        output_file_path = os.path.join(self.dir_path, csv_name)  # 替换为实际文件路径
        df1.to_csv(output_file_path, index=False, encoding='utf-8')

        return df1

    def get_abnormal_data(self, all_path, csv_name):
        # 读取CSV文件
        df = pd.read_csv(all_path, parse_dates=["transtime_up"],
                         date_parser=lambda x: pd.to_datetime(x, format='%Y/%m/%d %H:%M:%S'))
        # 筛选异常的数据
        df = df[df['match_type'] == 3]
        # 应用函数，将耗时转换为秒，并过滤掉无效值
        df['duration_seconds'] = df['time_diff'].apply(self.parse_duration)
        # 过滤掉包含空值的行
        df = df.dropna(subset=['duration_seconds'])

        output_file_path = os.path.join(self.dir_path, csv_name)  # 替换为实际文件路径
        df.to_csv(output_file_path, index=False, encoding='utf-8')

    def get_car_pass_rate_data(self, up_path, seconds_average, time_interval, csv_name):
        # 读取CSV文件
        df = pd.read_csv(up_path, parse_dates=["transtime_up"],
                         date_parser=lambda x: pd.to_datetime(x, format='%Y/%m/%d %H:%M:%S'))
        # 筛选特定车辆类型的数据
        df = df[df['feevehicletype'] == 1]
        df = df[(df['match_type'] >= 0) & (df['match_type'] < 3)]
        df['duration_seconds'] = df['time_diff'].apply(self.parse_duration)
        # 过滤掉包含空值的行
        df = df.dropna(subset=['duration_seconds'])
        # 根据时间间隔分组，并计算平均速度
        grouped_up = df.groupby(pd.Grouper(key="transtime_up", freq=time_interval))
        # 初始化结果DataFrame
        result = pd.DataFrame()
        prev_percentage = np.nan
        # 遍历每个分组
        for name, group in grouped_up:
            # 筛选出 time_diff 不为空且小于10秒的数据
            valid_data = group[(group['duration_seconds'] <= seconds_average)]
            # 获取符合条件的数据数量
            valid_count = len(valid_data)
            # 获取该组的总数
            total_count = len(group)
            # 计算百分比
            percentage = (valid_count / total_count * 100) if total_count > 0 else 0

            # 创建一个新的行，并将其添加到结果DataFrame中
            new_row = pd.DataFrame({'Group_Time': [name], 'Valid_Count': [valid_count], 'Total_Count': [total_count], 'Percentage': [percentage]})
            # 使用concat函数合并新的行到结果DataFrame
            result = pd.concat([result, new_row], ignore_index=True)
        output_file_path = os.path.join(self.dir_path, csv_name)  # 替换为实际文件路径
        result.to_csv(output_file_path, index=False, encoding='utf-8')

        return result.to_dict('records')

    def check_mean_speed(self, ctype):
        if ctype in self.ctype_time:
            return self.ctype_time[ctype]['mean']
        else:
            return None

    def get_all_pass_rate_data(self, up_path, time_interval, csv_name):
        # 读取CSV文件
        df = pd.read_csv(up_path, parse_dates=["transtime_up"],
                         date_parser=lambda x: pd.to_datetime(x, format='%Y/%m/%d %H:%M:%S'))
        df = df[(df['match_type'] >= 0) & (df['match_type'] < 3)]
        df['duration_seconds'] = df['time_diff'].apply(self.parse_duration)
        df['type_mean_time'] = df['feevehicletype'].apply(self.check_mean_speed)
        # 过滤掉包含空值的行
        df = df.dropna(subset=['duration_seconds'])
        # 根据时间间隔分组，并计算平均速度
        grouped_up = df.groupby(pd.Grouper(key="transtime_up", freq=time_interval))
        # 初始化结果DataFrame
        result = pd.DataFrame()
        # 遍历每个分组
        for name, group in grouped_up:
            valid_data = group[group['duration_seconds'] <= group['type_mean_time']]
            # 获取符合条件的数据数量
            valid_count = len(valid_data)
            # 获取该组的总数
            total_count = len(group)
            # 计算百分比
            percentage = (valid_count / total_count * 100) if total_count > 0 else 0
            # 创建一个新的行，并将其添加到结果DataFrame中
            new_row = pd.DataFrame({'Group_Time': [name], 'Valid_Count': [valid_count], 'Total_Count': [total_count], 'Percentage': [percentage]})
            # 使用concat函数合并新的行到结果DataFrame
            result = pd.concat([result, new_row], ignore_index=True)
        output_file_path = os.path.join(self.dir_path, csv_name)  # 替换为实际文件路径
        result.to_csv(output_file_path, index=False, encoding='utf-8')

        return result.to_dict('records')

    def ce(self, df_up):
        data = df_up.to_dict(orient='records')
        self_data = {}
        for i in range(len(data)):
            time = data[i]['transtime_down'].strftime('%Y-%m-%d %H:%M:%S')
            ctype = data[i]['feevehicletype']
            if ctype not in self_data:
                self_data[ctype] = {}

            self_data[ctype][time] = {
                "vlp": data[i]['vlp'],
                "duration_seconds": data[i]['duration_seconds']
            }

        for key, value in self_data.items():
            value_data = [sub_dict["duration_seconds"] for sub_key, sub_dict in value.items()]
            # --------------------***计算85分位数,计算中位数***--------------------------------
            filtered_data = np.array(value_data)
            p85 = np.percentile(filtered_data, 85)
            p15 = np.percentile(filtered_data, 0)
            if len(value_data) <= 10:
                filtered_values = value_data
            else:
                filtered_values = [x for x in value_data if p15 < x < p85]
            if filtered_values:
                # 计算过滤后的数据的中位数作为最优值
                optimal_value = np.median(filtered_values)
                average_value = optimal_value
                for key1, value1 in value.items():
                    d = value1["duration_seconds"]
                    self_data[key][key1]["new_seconds"] = min(round(d / average_value * 40), 200)

        new_self_data = {}
        for outer_key, inner_dict in self_data.items():
            for inner_key, sub_inner_dict in inner_dict.items():
                new_dict = {
                    "time": inner_key,
                    "type": outer_key,
                    **sub_inner_dict
                }
                new_self_data[inner_key] = new_dict

        df = pd.DataFrame(new_self_data.values())
        # 将time列转换为datetime类型
        df['time'] = pd.to_datetime(df['time'])
        # 创建一个新列，表示每10分钟的时间间隔
        df['time_interval'] = df['time'].dt.floor('10T')
        # 统计每10分钟内new_seconds列大于60的数量
        count_greater_60 = df[df['new_seconds'] > 60].groupby('time_interval').size().reset_index(
            name='count_greater_60')
        # 统计每10分钟内的总数量
        total_count = df.groupby('time_interval').size().reset_index(name='total_count')
        # 合并两个统计结果
        result = pd.merge(count_greater_60, total_count, on='time_interval', how='outer').fillna(0)
        # 将time_interval列转换回时间格式
        result['time_interval'] = result['time_interval'].dt.strftime('%Y-%m-%d %H:%M:%S')
        new_result = result.to_dict('records')
        for i in range(len(new_result)):
            if int(new_result[i]['total_count']) > 3:
                new_result[i]['percentage'] = round(new_result[i]['count_greater_60'] / new_result[i]['total_count'] * 100, 2)
            else:
                new_result[i]['percentage'] = 0

        return new_result

    def run(self, event_true_dict, name):
        up_path = self.get_files_containing_substring(self.dir_path, 'up_')
        down_path = self.get_files_containing_substring(self.dir_path, 'down_')
        all_path = self.get_files_containing_substring(self.dir_path, 'all_')

        # 获取合并后数据的numpy数组 （0/1/2）无异常值
        df_np = self.get_match_data(all_path)
        # 计算总间隔时长，平均间隔时长， 最大间隔时长，最小间隔时长
        self.ctype_time = self.get_mean_time(df_np)
        # print("最小间隔时长: ", ctype_time)
        # m = list(self.ctype_time.keys())

        if 1 in self.ctype_time:
            seconds_min = self.ctype_time[1]['min']
        else:
            return False

        if seconds_min <= 10:
            print("最小间隔时长小于10秒，跳过该文件")
            return False

        if "铜锣山" in name:
            up_g = os.path.basename(self.dir_path).split("-")[1]
            self.dist = dist_dict[up_g]

        # self.dist = 1000
        # print(self.dist)
        # 估算区间距离(米)
        if self.dist is None:
            self.dist = round(self.max_speed / 3.6 * seconds_min, 2)
            print("估算区间距离(米): ", self.dist)

        true_time_list = []
        # info_data_json_name = "info.json"
        true_value_list = []
        namefile_list = os.path.basename(self.dir_path).split('-')
        up_gantry = namefile_list[1]
        down_gantry = namefile_list[2]
        year = namefile_list[0][:4]
        month = namefile_list[0][4:6]
        day = namefile_list[0][6:8]
        event_dict = {}
        if up_gantry in event_true_dict.keys():
            event_dict = event_true_dict[up_gantry]
        if event_dict:
            if down_gantry == event_dict["down"]:
                event_dict_list = event_dict["list"]
                if event_dict_list:
                    for event_dict in event_dict_list:
                        if event_dict["年"] == year and event_dict["月"] == month and event_dict["日"] == day:
                            true_dict = {
                                "eventTime": year + "-" + month + "-" + day + " " + event_dict["时"] + ":" + event_dict["分"],
                                "direction": str(event_dict["方向"]),
                                "pileNumber": str(event_dict["桩号"]),
                                "roadType": str(event_dict["道路类型"]),
                                "eventType": str(event_dict["事故分类"])
                            }
                            true_time_str = event_dict["时"] + ":" + event_dict["分"]
                            true_time_list.append(true_time_str)
                            true_value_list.append(true_dict)

        # 获取上下门架车流量、车流量差值以及总车流量
        alll_flow_data = "alll_flow_data.csv"
        alll_data = self.get_all_flow(up_path=up_path, down_path=down_path, time_interval=self.time_interval, csv_name=alll_flow_data)

        # 获取上下门架匹配的车流量、车流量差值以及总车流量
        mate_all_flow_csv_name = "mate_flow_data.csv"
        merged_data = self.get_mate_all_flow(up_path=up_path, down_path=down_path, time_interval=self.time_interval,
                                             csv_name=mate_all_flow_csv_name)

        # 小客车全天通行时间/平均车速分布
        car_speed_csv_name = "car_time_data.csv"
        mean_speed_data = self.get_car_speed(path=all_path, time_interval=self.time_interval, type_flag=1,
                                             csv_name=car_speed_csv_name, alll_data=merged_data)

        # 获取慢速车（match_type为1/2/3）数据
        slow_vehicle_csv_name = "car_slow_move_data.csv"
        slow_vehicl_data = self.get_slow_vehicle(all_path=all_path, time_interval=self.time_interval,
                                                 csv_name=slow_vehicle_csv_name)

        # 获取车型-时间散点图
        type_time_csv_name = "time_duration_data.csv"
        type_duration_data = self.get_type_time_change(down_path=down_path, csv_name=type_time_csv_name)
        new_data_list = self.ce(type_duration_data)
        greater_time_intervals = get_greater_time_duration(new_data_list, 50, 600)

        # 计算通过率
        car_pass_rate_csv_name = "car_pass_rate_data.csv"
        seconds_average = self.dist / 60 * 3600
        car_pass_rate_data = self.get_car_pass_rate_data(up_path=up_path, seconds_average=seconds_average,
                                                         time_interval=self.time_interval,
                                                         csv_name=car_pass_rate_csv_name)

        all_pass_rate_csv_name = "alll_pass_rate_data.csv"
        all_pass_rate_data = self.get_all_pass_rate_data(up_path=up_path, time_interval=self.time_interval,
                                                         csv_name=all_pass_rate_csv_name)

        # 获取异常数据
        abnormal_data_csv_name = 'abnormal_data.csv'
        self.get_abnormal_data(all_path=all_path, csv_name=abnormal_data_csv_name)


        a_intervals = get_mean_speed_duration(mean_speed_data, 40, 900)
        # b_intervals = get_slow_vehicl_duration(slow_vehicl_data, 1, 900)
        c_intervals = get_all_pass_rate_duration(all_pass_rate_data, 50, 900)
        time_intervals = get_time_duration(type_duration_data, 3, 900, "5T")

        if a_intervals and c_intervals:
            intervals_list = [a_intervals, c_intervals]
            intersections = calculate_intersections(intervals_list)
        elif a_intervals:
            intervals_list = [a_intervals]
            intersections = calculate_intersections(intervals_list)
        elif c_intervals:
            intervals_list = [c_intervals]
            intersections = calculate_intersections(intervals_list)
        else:
            intersections = []

        merge_r = intersections + time_intervals + greater_time_intervals
        if merge_r:
            merged = merge_ranges(merge_r)

            sorted_intervals = sorted(merged, key=lambda x: x[0])
            merged = [sorted_intervals[0]]
            for current_start, current_end in sorted_intervals[1:]:
                last_start, last_end = merged[-1]
                if (current_start - last_end).total_seconds() / 60 <= 5:
                    new_end = max(last_end, current_end)
                    merged[-1] = (last_start, new_end)
                else:
                    merged.append((current_start, current_end))
        else:
            merged = []
        # print(merged)

        detection_value_list = []
        save_value_dict = []
        for i in merged:
            detection_value_list.append(str(i[0].hour)+":"+str(i[0].minute))
            detection_value_list.append(str(i[1].hour) + ":" + str(i[1].minute))
            save_value_dict.append(
                {
                    "startTime": i[0].strftime('%Y-%m-%d %H:%M:%S'),
                    "endTime": i[1].strftime('%Y-%m-%d %H:%M:%S'),
                }
            )

        speed_intervals = get_mean_speed_duration(mean_speed_data, 40, 900)
        # print(speed_intervals)
        yongdu_value_list = []
        save_yongdu_value_dict = []
        for i in speed_intervals:
            yongdu_value_list.append(str(i[0].hour)+":"+str(i[0].minute))
            yongdu_value_list.append(str(i[1].hour) + ":" + str(i[1].minute))
            save_yongdu_value_dict.append(
                {
                    "startTime": i[0].strftime('%Y-%m-%d %H:%M:%S'),
                    "endTime": i[1].strftime('%Y-%m-%d %H:%M:%S'),
                }
            )

        true_time_dict = {"true_value_list": true_time_list,
                          "detection_value_list": detection_value_list,
                          "yongdu_value_list": yongdu_value_list}

        json_data = {
            "upGantryId": up_gantry,
            "downGantryId": down_gantry,
            "time": year + "-" + month + "-" + day,
            "source": name,
            "kmNumber": int(self.dist / 1000),
            "truthKM": round(self.dist / 1000, 2),
            "truthValue": true_value_list,
            "detectionValue": save_value_dict,
            "congestionValue": save_yongdu_value_dict,
        }

        file_path = os.path.join(self.dir_path, os.path.basename(self.dir_path) + ".json")
        # 将字典保存为 JSON 文件
        with open(file_path, 'w', encoding='utf-8') as f:
            json.dump(json_data, f, ensure_ascii=False)

        self.showFlowDiff.read_file_true(os.path.join(self.dir_path, alll_flow_data), true_time_dict)
        self.showFlowDiff.read_file_true(os.path.join(self.dir_path, mate_all_flow_csv_name), true_time_dict)
        self.showCarSpeed.read_file_true(os.path.join(self.dir_path, car_speed_csv_name), true_time_dict)
        self.showCarSlowNum.read_file_true(os.path.join(self.dir_path, slow_vehicle_csv_name), true_time_dict)
        self.showTimeDuration.read_file2_true(os.path.join(self.dir_path, type_time_csv_name), true_time_dict)
        self.showPassRate.read_file_true(os.path.join(self.dir_path, car_pass_rate_csv_name), true_time_dict)
        self.showPassRate.read_file_true(os.path.join(self.dir_path, all_pass_rate_csv_name), true_time_dict)

        self.clear()
        return True

    def clear(self):
        self.ctype_time = None
        self.dist = None


def get_direct_subdirectories(directory):
    """
    获取目录下的直接子文件夹（不递归）。

    :param directory: 要检查的目录路径。
    """
    try:
        for entry in os.scandir(directory):
            if entry.is_dir():
                yield entry.path
    except FileNotFoundError:
        print(f"目录 {directory} 不存在")
    except PermissionError:
        print(f"没有权限访问目录 {directory}")


if __name__ == '__main__':

    dist_dict = {
        "HW7": 2009,
        "TLS_1_1": 1800,
        "TLS_1_2": 1605,
        "TLS_2_1": 1686,
        "TLS_2_2": 3290,
        "ZW4": 850,
        "ZW3": 3295,
        "HW1": 3370,
        "ZW1": 850,
        "ZW2": 3263,
        "TLS_3_1": 1684,
        "TLS_3_2": 1693,
        "TLS_4_1": 1704,
        "TLS_4_2": 1986
    }

# ZW1,ZW2-20250125    ZW2,TLS_3_1-20250125    ZW3,HW3-20250125   ZW4,ZW3-20250125
#     dir_path = r'D:\GJ\项目\铜锣山\data\data_output\HW1,ZW1-20250124'
#     gantry = Gantry(dir_path)
#     gantry.run()
#     d_path = r'D:\GJ\项目\铜锣山\data\data_output\ZW4,ZW3-20250125'
#     up_g = os.path.basename(d_path).split(",")[0]
#     print(up_g)


    d_path = r'D:\GJ\项目\事故检测\模拟数据\sn_ip\output\new\2025-03-25'

    for dir_path in get_direct_subdirectories(d_path):
        print(dir_path)
        # up_g = os.path.basename(dir_path).split(",")[0]
        # print(up_g)
        # if up_g == "TLS_2_2":
        gantry = Gantry(dir_path)
        gantry.run({}, "铜锣山(仿真)")


    # error = []
    # ii = 0
    # k = 0
    # output_file = r'D:\GJ\项目\事故检测\output\纳黔高速\error.txt'
    # last_time = time.time()
    # top_dir = r'D:\GJ\项目\事故检测\output\纳黔高速'
    # # 使用 os.walk 遍历文件夹
    # for root, dirs, files in os.walk(top_dir):
    #     # root 是当前正在遍历的这个文件夹的本身的路径
    #     # dirs 是一个 list，内容是该文件夹中所有的目录的名字(不包括子目录)
    #     # files 同样是 list, 内容是该文件夹中所有的文件(不包括子目录)
    #     # 输出当前遍历到的文件夹路径
    #     # print(f"Folder: {root}")
    #     # 输出该文件夹下的所有子文件夹
    #     for dir_name in dirs:
    #         dir_path = os.path.join(root, dir_name)
    #         print(f"  Directory: {os.path.join(root, dir_name)}")
    #         k -= 1
    #         if k <= 0:
    #             gantry = Gantry(dir_path)
    #             F = gantry.run()
    #             now_time = time.time()
    #             T = now_time - last_time
    #             last_time = now_time
    #             ii += 1
    #             print(ii, T)
    #             if not F:
    #                 error.append(dir_name)
    #                 print('error', dir_name)
    #     # # 输出该文件夹下的所有文件
    #     # for file_name in files:
    #     #     print(f"  File: {os.path.join(root, file_name)}")
    #     # 打开文件以写入模式
    #     # with open(output_file, 'w') as file:
    #     #     # 遍历列表并将每个元素写入文件，每行一个元素
    #     #     for item in error:
    #     #         file.write(f"{item}\n")
    #
    #     break   # 不遍历深层次的文件目录
    # dir_path = r'D:\GJ\项目\事故检测\output\纳黔高速\ZW4,ZW3-20250125'
    # a = os.path.basename(dir_path)
    # print(a)
