import os
import uuid
from datetime import datetime
from concurrent.futures import ProcessPoolExecutor
import numpy as np
import pandas as pd
import chardet
from parse_data_png import Gantry



Define = {
    "门架编号": "gantryid",
    "车牌": "vlp",
    "车型": "feevehicletype",
    "通行时间": "transtime"
}


def get_match_data(path):
    # 使用 chardet 检测文件编码
    with open(path, 'rb') as f:
        result = chardet.detect(f.read(10000))
    detected_encoding = result['encoding']
    # 使用检测到的编码读取 CSV 文件
    df = pd.read_csv(path, encoding=detected_encoding)
    df_np = df.to_numpy()
    return df_np


class Preprocess:
    def __init__(self, file_path, flag):
        self.file_path = file_path
        self.up_gantry = None
        self.down_gantry = None
        self.up_gantry_df = None
        self.down_gantry_df = None
        self.max_speed = 120
        self.dist = None
        self.flag = flag

    def get_gantry_ID(self):
        file_name = os.path.basename(self.file_path)
        if self.flag == 0:
            # 使用逗号分隔字符串
            parts = file_name.split(',')
            # 提取前两个部分
            self.up_gantry = parts[0]
            self.down_gantry = parts[1].split('-')[0]  # 使用连字符进一步分割第二个部分
        elif self.flag == 1:
            parts = file_name.split('-')
            self.up_gantry = parts[0]
            self.down_gantry = parts[1]

    def get_data(self):
        # 使用 chardet 检测文件编码
        with open(self.file_path, 'rb') as f:
            result = chardet.detect(f.read(10000))
        detected_encoding = result['encoding']
        # 使用pandas读取CSV文件
        if self.flag == 0:
            df = pd.read_csv(self.file_path, encoding=detected_encoding)
        elif self.flag == 1:
            df = pd.read_excel(self.file_path, engine='openpyxl')
        # 清理列名，去除多余的空格和逗号
        df.columns = [col.strip().replace(',', '') for col in df.columns]
        # 清理每行数据中的多余符号
        for col in df.columns:
            df[col] = df[col].astype(str).str.strip()

        # 使用 rename 方法修改列名
        df = df.rename(columns=Define)

        # # 筛选 gantryid 列值为 up_gantry 的所有行
        self.up_gantry_df = df[df['gantryid'] == self.up_gantry]
        self.up_gantry_df = self.up_gantry_df[self.up_gantry_df['vlp'] != "默A00000"].reset_index(drop=True)

        self.down_gantry_df = df[df['gantryid'] == self.down_gantry]
        self.down_gantry_df = self.down_gantry_df[self.down_gantry_df['vlp'] != "默A00000"].reset_index(drop=True)

        # 将通行时间列转换为 datetime 类型，方便比较
        self.up_gantry_df.loc[:, 'transtime'] = pd.to_datetime(self.up_gantry_df['transtime'],
                                                               format='%Y-%m-%d %H:%M:%S')
        self.down_gantry_df.loc[:, 'transtime'] = pd.to_datetime(self.down_gantry_df['transtime'],
                                                                 format='%Y-%m-%d %H:%M:%S')

        self.up_gantry_df = self.dataframe(self.up_gantry_df)
        self.down_gantry_df = self.dataframe(self.down_gantry_df)

        self.up_gantry_df = self.up_gantry_df.sort_values(by='transtime', ascending=True)
        self.down_gantry_df = self.down_gantry_df.sort_values(by='transtime', ascending=True)

        # # 找出两个 DataFrame 中相同的车牌
        # common_licenses = set(self.up_gantry_df['vlp']).intersection(set(self.down_gantry_df['vlp']))
        #
        # # 取前 3 组相同的车牌
        # common_licenses = list(common_licenses)[:5]
        # n = 0
        # # 分析每组车牌的通行时间先后顺序
        # for license_plate in common_licenses:
        #     time1 = self.up_gantry_df[self.up_gantry_df['vlp'] == license_plate]['transtime'].values[0]
        #     time2 = self.down_gantry_df[self.down_gantry_df['vlp'] == license_plate]['transtime'].values[0]
        #     time11 = datetime.strptime(time1, "%Y-%m-%d %H:%M:%S")
        #     time22 = datetime.strptime(time2, "%Y-%m-%d %H:%M:%S")
        #     if time11 < time22:
        #         n += 1
        # if n < 3:
        #     df_up_temp = self.up_gantry_df.copy()
        #     df_down_temp = self.down_gantry_df.copy()
        #     gantry_temp = self.up_gantry
        #     self.up_gantry_df = df_down_temp
        #     self.down_gantry_df = df_up_temp
        #     self.up_gantry = self.down_gantry
        #     self.down_gantry = gantry_temp

    def dataframe(self, df):
        # 分组并筛选符合条件的行
        groups = df.groupby(['gantryid', 'vlp'])
        keep_rows = []
        for _, group in groups:
            prev_time = None
            for index, row in group.iterrows():
                current_time = row['transtime']
                if prev_time is None or (current_time - prev_time).total_seconds() >= 10 * 60:
                    keep_rows.append(index)
                    prev_time = current_time

        # 根据保留的行索引筛选数据
        df = df.loc[keep_rows]
        return df

    def matching_json(self, up_gantry_df, down_gantry_df, up_path, down_path, up_flag=True, down_flag=True):
        # up_gantry_df['transtime'] = pd.to_datetime(up_gantry_df['transtime'], format='%Y-%m-%d %H:%M:%S')
        up_gantry_df['timestamp'] = up_gantry_df['transtime'].apply(lambda x: x.timestamp())
        up_gantry_df_dict = up_gantry_df.to_dict(orient='records')
        # down_gantry_df['transtime'] = pd.to_datetime(down_gantry_df['transtime'], format='%Y-%m-%d %H:%M:%S')
        down_gantry_df['timestamp'] = down_gantry_df['transtime'].apply(lambda x: x.timestamp())
        down_gantry_df_dict = down_gantry_df.to_dict(orient='records')

        # 对上游vlp去重
        up_gantry_df_dict_all = {}
        repeat_list_up = []
        for up_gantry_one in up_gantry_df_dict:
            if up_gantry_one['vlp'] not in up_gantry_df_dict_all.keys():
                up_gantry_df_dict_all[up_gantry_one['vlp']] = [up_gantry_one]
            else:
                # print("多趟次上游", up_gantry_one['vlp'])
                if up_gantry_one['vlp'] not in repeat_list_up:
                    repeat_list_up.append(up_gantry_one['vlp'])
                up_gantry_df_dict_all[up_gantry_one['vlp']].append(up_gantry_one)
        # print("重复上游", repeat_list_up)

        # 对下游vlp去重
        down_gantry_df_dict_all = {}
        repeat_list_down = []
        for down_gantry_one in down_gantry_df_dict:
            if down_gantry_one['vlp'] not in down_gantry_df_dict_all.keys():
                down_gantry_df_dict_all[down_gantry_one['vlp']] = [down_gantry_one]
            else:
                # print("多趟次下游", down_gantry_one['vlp'])
                if down_gantry_one['vlp'] not in repeat_list_down:
                    repeat_list_down.append(down_gantry_one['vlp'])
                down_gantry_df_dict_all[down_gantry_one['vlp']].append(down_gantry_one)
        # print("重复下游", repeat_list_down)

        for repeat_id in repeat_list_up:
            if repeat_id in down_gantry_df_dict_all.keys():
                # 上游匹配下游
                up_gantry_df_dict_list = up_gantry_df_dict_all[repeat_id]  # 多个
                down_gantry_df_dict_list = down_gantry_df_dict_all[repeat_id]  # 一个或多个
                for up_one in up_gantry_df_dict_list:
                    up_one_time = up_one['timestamp']
                    min_diff = 100000
                    target_one = None
                    for down_one in down_gantry_df_dict_list:
                        down_one_time = down_one['timestamp']
                        if up_one_time < down_one_time:
                            diff = down_one_time - up_one_time
                            if diff < min_diff:
                                min_diff = diff
                                target_one = down_one

                    uuid_one = str(uuid.uuid4())
                    if target_one is not None:
                        up_gantry_df_dict_all[repeat_id + uuid_one] = [up_one]
                        down_gantry_df_dict_all[repeat_id + uuid_one] = [target_one]
                        # print("匹配成功", up_one['vlp'], target_one['vlp'])
                        down_gantry_df_dict_all[repeat_id] = [x for x in down_gantry_df_dict_all[repeat_id] if
                                                              x != target_one]
                    else:
                        # print("匹配失败", up_one['vlp'])
                        up_gantry_df_dict_all[repeat_id + uuid_one] = [up_one]

                # print("删除重复车辆", repeat_id)
                del up_gantry_df_dict_all[repeat_id]
                if len(down_gantry_df_dict_all[repeat_id]) == 0:
                    del down_gantry_df_dict_all[repeat_id]
            else:
                # print("上游多趟次车辆未在下游出现", repeat_id)
                pass

        for repeat_id in repeat_list_down:
            if repeat_id in down_gantry_df_dict_all.keys():
                if repeat_id in up_gantry_df_dict_all.keys():
                    down_gantry_df_dict_list = down_gantry_df_dict_all[repeat_id]  # 多个
                    up_gantry_df_dict_list = up_gantry_df_dict_all[repeat_id]  # 一个
                    for up_one in up_gantry_df_dict_list:
                        up_one_time = up_one['timestamp']
                        min_diff = 100000
                        target_one = None
                        for down_one in down_gantry_df_dict_list:
                            down_one_time = down_one['timestamp']
                            if up_one_time < down_one_time:
                                diff = down_one_time - up_one_time
                                if diff < min_diff:
                                    min_diff = diff
                                    target_one = down_one

                        if target_one is not None:
                            uuid_one = str(uuid.uuid4())
                            up_gantry_df_dict_all[repeat_id + uuid_one] = [up_one]
                            down_gantry_df_dict_all[repeat_id + uuid_one] = [target_one]
                            for i in range(len(down_gantry_df_dict_all[repeat_id])):
                                if down_gantry_df_dict_all[repeat_id][i] != target_one:
                                    uuid_two = str(uuid.uuid4())
                                    down_gantry_df_dict_all[repeat_id + uuid_two] = [
                                        down_gantry_df_dict_all[repeat_id][i]]
                        else:
                            # print("下游匹配失败ID", repeat_id)
                            for i in range(len(down_gantry_df_dict_all[repeat_id])):
                                uuid_two = str(uuid.uuid4())
                                down_gantry_df_dict_all[repeat_id + uuid_two] = [down_gantry_df_dict_all[repeat_id][i]]
                    # print("删除重复车辆", repeat_id)
                    del up_gantry_df_dict_all[repeat_id]
                    del down_gantry_df_dict_all[repeat_id]
                else:
                    # print("下游多趟次车辆未在上游出现", repeat_id)
                    pass
            else:
                # print("下游多趟次车辆与上游多趟次车辆一一匹配", repeat_id)
                pass

        if up_flag:
            time_list = []
            for lience, info in up_gantry_df_dict_all.items():
                if lience in down_gantry_df_dict_all.keys():
                    # print("匹配成功", lience, down_gantry_df_dict_all[lience][0]['timestamp'] - info[0]['timestamp'])
                    # time_list.append(down_gantry_df_dict_all[lience][0]['timestamp'] - info[0]['timestamp'])
                    duration = down_gantry_df_dict_all[lience][0]['timestamp'] - info[0]['timestamp']
                    if duration > 0:
                        time_list.append(duration)
            time_mean = np.mean(time_list)
            print("平均时间", time_mean)

            result = []
            for lience, info in up_gantry_df_dict_all.items():
                if lience in down_gantry_df_dict_all.keys():
                    # print("匹配成功", lience, down_gantry_df_dict_all[lience][0]['timestamp'] - info[0]['timestamp'])
                    to_write = info[0]
                    fen = int((down_gantry_df_dict_all[lience][0]['timestamp'] - info[0]['timestamp']) / 60)
                    miao = int((down_gantry_df_dict_all[lience][0]['timestamp'] - info[0]['timestamp']) % 60)
                    temp = str(fen) + "m" + str(miao) + "s"
                    to_write['time_diff'] = temp
                    to_write['transtime_down'] = down_gantry_df_dict_all[lience][0]['transtime']
                    to_write['transtime_up'] = to_write['transtime']

                    if down_gantry_df_dict_all[lience][0]['timestamp'] - info[0]['timestamp'] < time_mean:
                        to_write['match_type'] = 0
                    if time_mean <= down_gantry_df_dict_all[lience][0]['timestamp'] - info[0][
                        'timestamp'] < time_mean * 1.5:
                        to_write['match_type'] = 1
                    if time_mean * 1.5 <= down_gantry_df_dict_all[lience][0]['timestamp'] - info[0][
                        'timestamp'] < time_mean * 3:
                        to_write['match_type'] = 2
                    if time_mean * 3 <= down_gantry_df_dict_all[lience][0]['timestamp'] - info[0]['timestamp']:
                        to_write['match_type'] = 3
                    if down_gantry_df_dict_all[lience][0]['timestamp'] < info[0]['timestamp']:
                        to_write['match_type'] = -2
                    if "警" in lience:
                        if lience[2] == '9':
                            to_write['police_car'] = 1
                        else:
                            to_write['police_car'] = 2
                    else:
                        to_write['police_car'] = 0

                    result.append(to_write)

                else:
                    # print("匹配失败", lience)
                    to_write = info[0]
                    to_write['time_diff'] = ""
                    to_write['transtime_down'] = ""
                    to_write['transtime_up'] = to_write['transtime']
                    to_write['match_type'] = -1
                    if "警" in lience:
                        if lience[2] == '9':
                            to_write['police_car'] = 1
                        else:
                            to_write['police_car'] = 2
                    else:
                        to_write['police_car'] = 0
                    result.append(to_write)
            df = pd.DataFrame(result)
            # 保存 DataFrame 到 CSV 文件
            output_file_path = up_path  # 替换为实际文件路径
            df[['vlp', 'feevehicletype', 'transtime_up', 'transtime_down', 'time_diff', 'match_type', 'police_car']].to_csv(output_file_path, index=False, encoding='utf-8')
        if down_flag:
            time_list = []
            for lience, info in down_gantry_df_dict_all.items():
                if lience in up_gantry_df_dict_all.keys():
                    # time_list.append(info[0]['timestamp'] - up_gantry_df_dict_all[lience][0]['timestamp'])
                    duration = info[0]['timestamp'] - up_gantry_df_dict_all[lience][0]['timestamp']
                    if duration > 0:
                        time_list.append(duration)
            time_mean = np.mean(time_list)
            print("平均时间", time_mean)
            result = []

            for lience, info in down_gantry_df_dict_all.items():
                if lience in up_gantry_df_dict_all.keys():
                    to_write = info[0]
                    fen = int((info[0]['timestamp'] - up_gantry_df_dict_all[lience][0]['timestamp']) / 60)
                    miao = int((info[0]['timestamp'] - up_gantry_df_dict_all[lience][0]['timestamp']) % 60)
                    temp = str(fen) + "m" + str(miao) + "s"
                    if info[0]['timestamp'] - up_gantry_df_dict_all[lience][0]['timestamp'] < time_mean:
                        to_write['match_type'] = 0
                    if time_mean <= info[0]['timestamp'] - up_gantry_df_dict_all[lience][0][
                        'timestamp'] < time_mean * 1.5:
                        to_write['match_type'] = 1
                    if time_mean * 1.5 <= info[0]['timestamp'] - up_gantry_df_dict_all[lience][0][
                        'timestamp'] < time_mean * 3:
                        to_write['match_type'] = 2
                    if time_mean * 3 <= info[0]['timestamp'] - up_gantry_df_dict_all[lience][0]['timestamp']:
                        to_write['match_type'] = 3
                    if info[0]['timestamp'] < up_gantry_df_dict_all[lience][0]['timestamp']:
                        to_write['match_type'] = -2
                    if "警" in lience:
                        if lience[2] == '9':
                            to_write['police_car'] = 1
                        else:
                            to_write['police_car'] = 2
                    else:
                        to_write['police_car'] = 0
                    to_write['time_diff'] = temp
                    to_write['transtime_up'] = up_gantry_df_dict_all[lience][0]['transtime']
                    to_write['transtime_down'] = to_write['transtime']
                    result.append(to_write)
                else:
                    # print("匹配失败", lience)
                    to_write = info[0]
                    to_write['time_diff'] = ""
                    to_write['transtime_up'] = ""
                    to_write['transtime_down'] = to_write['transtime']
                    to_write['match_type'] = -1
                    if "警" in lience:
                        if lience[2] == '9':
                            to_write['police_car'] = 1
                        else:
                            to_write['police_car'] = 2
                    else:
                        to_write['police_car'] = 0
                    result.append(to_write)
            df = pd.DataFrame(result)
            # 保存 DataFrame 到 CSV 文件
            output_file_path = down_path  # 替换为实际文件路径
            df[['vlp', 'feevehicletype', 'transtime_up', 'transtime_down', 'time_diff', 'match_type', 'police_car']].to_csv(output_file_path, index=False, encoding='utf-8')

    def megred_old(self, up_path, down_path, all_path, flag=1):
        # 将上下游匹配数据合并为一个总文件
        up_data = get_match_data(up_path).copy()
        down_data = get_match_data(down_path).copy()
        # 提取时间戳列
        timestamps = up_data[:, 2]
        # 将时间戳列转换为 pandas.Timestamp 对象
        timestamps = pd.to_datetime(timestamps, format='%Y-%m-%d %H:%M:%S')
        # 使用 argsort 方法获取排序后的索引
        sort_indices = np.argsort(timestamps)
        # 根据排序后的索引对整个数组进行排序
        sorted_data = up_data[sort_indices]
        # 根据上游时间戳排序插入部分下游车辆到csv中
        up_time_list = []
        for row in sorted_data:
            up_time_list.append(pd.to_datetime(row[2], format='%Y-%m-%d %H:%M:%S'))
        time_list = [up_time_list, 0]
        i = 0
        # 遍历原始数组
        for row in down_data:
            if type(row[2]) == str:
                continue
            new_row_time = pd.to_datetime(row[3], format='%Y-%m-%d %H:%M:%S')
            # 使用 numpy.searchsorted 查找插入位置
            insert_index = np.searchsorted(time_list[flag], new_row_time)
            insert_position = insert_index + i
            i += 1
            sorted_data = np.insert(sorted_data, insert_position, row, axis=0)

        df = pd.DataFrame(sorted_data)

        # 定义新的列名映射
        new_column_names = {
            0: 'vlp',
            1: 'feevehicletype',
            2: 'transtime_up',
            3: 'transtime_down',
            4: 'time_diff',
            5: 'match_type',
            6: 'police_car'
        }

        # 修改列名
        df = df.rename(columns=new_column_names)
        # 保存 DataFrame 到 CSV 文件
        output_file_path = all_path  # 替换为实际文件路径
        df[['vlp', 'feevehicletype', 'transtime_up', 'transtime_down', 'time_diff', 'match_type', 'police_car']].to_csv(
            output_file_path, index=False, encoding='utf-8')

    def megred(self, up_path, down_path, all_path, flag=1):
        # 将上下游匹配数据合并为一个总文件
        up_data = get_match_data(up_path).copy()
        down_data = get_match_data(down_path).copy()
        # 提取时间戳列
        timestamps = up_data[:, 2]
        # print(timestamps)
        # 将时间戳列转换为 pandas.Timestamp 对象
        timestamps = pd.to_datetime(timestamps)
        # 使用 argsort 方法获取排序后的索引
        sort_indices = np.argsort(timestamps)
        # 根据排序后的索引对整个数组进行排序
        sorted_data = up_data[sort_indices]
        # 根据上游时间戳排序插入部分下游车辆到csv中
        up_time_list = []
        for row in sorted_data:
            up_time_list.append(pd.to_datetime(row[2]))
        time_list = [up_time_list, 0]
        # 将原始数据和新数据都转换为 DataFrame
        df_sorted = pd.DataFrame(sorted_data)
        df_new = pd.DataFrame([row for row in down_data if not isinstance(row[2], str)])
        if len(df_new.columns) == 0:
            df_combined = df_sorted
        else:
            # 解析新数据中的时间
            df_new['time_parsed'] = pd.to_datetime(df_new.iloc[:, 3], errors='coerce')
            # 过滤掉无效的时间
            df_new = df_new.dropna(subset=['time_parsed'])
            # 合并两个 DataFrame 并按时间排序
            df_combined = pd.concat([df_sorted, df_new.drop(columns=['time_parsed'])])
            df_combined['time_parsed'] = pd.concat([pd.to_datetime(pd.Series(time_list[flag])), df_new['time_parsed']])
            df_combined = df_combined.sort_values(by='time_parsed').drop(columns=['time_parsed'])
        # # 如果需要，可以再转换回 NumPy 数组
        # sorted_data = df_combined.values
        # df = pd.DataFrame(sorted_data)

        # 定义新的列名映射
        new_column_names = {
            0: 'vlp',
            1: 'feevehicletype',
            2: 'transtime_up',
            3: 'transtime_down',
            4: 'time_diff',
            5: 'match_type',
            6: 'police_car'
        }

        # 修改列名
        df = df_combined.rename(columns=new_column_names)
        # 保存 DataFrame 到 CSV 文件
        output_file_path = all_path  # 替换为实际文件路径
        df[['vlp', 'feevehicletype', 'transtime_up', 'transtime_down', 'time_diff', 'match_type', 'police_car']].to_csv(
            output_file_path, index=False, encoding='utf-8')

    def run(self, output_path):
        self.get_gantry_ID()
        self.get_data()

        # 生成上下游匹配csv文件
        file_name = os.path.basename(self.file_path)

        if self.flag == 0:
            # 邻垫高速
            namefile_list = os.path.basename(file_name).split('-')
            up_gantry = namefile_list[0].split(',')[0]
            down_gantry = namefile_list[0].split(',')[1]
            year = namefile_list[1].split('.')[0][:4]
            month = namefile_list[1].split('.')[0][4:6]
            day = namefile_list[1].split('.')[0][6:8]
        elif self.flag == 1:
            # 绵广高速、纳黔高速
            namefile_list = os.path.basename(file_name).split('-')
            up_gantry = namefile_list[0]
            down_gantry = namefile_list[1]
            year = namefile_list[2]
            month = namefile_list[3]
            day = namefile_list[4]
        filename = year + month + day + "-" + self.up_gantry + "-" + self.down_gantry
        output = os.path.join(output_path, filename)
        if not os.path.exists(output):
            os.makedirs(output)
        up_path = os.path.join(output, 'up_' + filename + ".csv")
        down_path = os.path.join(output, 'down_' + filename + ".csv")
        all_path = os.path.join(output, 'all_' + filename + ".csv")
        print("output path: ", output)
        print(up_path, down_path, all_path)

        # 上下游各自匹配文件
        self.matching_json(self.up_gantry_df, self.down_gantry_df, up_path, down_path)

        # 合并文件 flag为0（根据上游时间戳排序）或1（根据下游时间戳排序）
        self.megred(up_path, down_path, all_path, flag=0)

        return filename


def get_direct_subdirectories(directory):
    """
    获取目录下的直接子文件夹（不递归）。

    :param directory: 要检查的目录路径。
    """
    try:
        for entry in os.scandir(directory):
            if entry.is_dir():
                yield entry.path
    except FileNotFoundError:
        print(f"目录 {directory} 不存在")
    except PermissionError:
        print(f"没有权限访问目录 {directory}")


def get_true(event_file_path):
    event_true_dict = {}
    # 读取 Excel 文件
    df = pd.read_excel(event_file_path)
    # 遍历 DataFrame 的每一行
    for index, row_i in df.iterrows():
        up = str(row_i["事故最近门架编号2"])
        if up not in event_true_dict.keys():
            event_true_dict[up] = {}
            event_true_dict[up]["list"] = []
            event_true_dict[up]["down"] = str(row_i["事故最近门架编号1"])
        dd = {}
        dd["年"] = str(row_i["年"])
        dd["月"] = '{:02d}'.format(row_i["月"])
        dd["日"] = '{:02d}'.format(row_i["日"])
        dd["时"] = '{:02d}'.format(row_i["时"])
        dd["分"] = '{:02d}'.format(row_i["分"])
        dd["方向"] = str(row_i["方向"])
        dd["桩号"] = str(row_i["桩号"])
        dd["道路类型"] = str(row_i["道路类型"])
        dd["事故分类"] = str(row_i["事故分类"])
        event_true_dict[up]["list"].append(dd)

    return event_true_dict


def process_csv_file(csv_file, folder_path, output_path, event_true_dict, flag, name):
    """处理单个CSV文件的函数"""
    file_path = os.path.join(folder_path, csv_file)

    # 预处理
    preprocess = Preprocess(file_path, flag)
    filename = preprocess.run(output_path)

    dir_path = os.path.join(output_path, filename)
    print(f"Processing {csv_file}, save data and png in: {dir_path}")

    # Gantry分析
    gantry = Gantry(dir_path)
    gantry.run(event_true_dict, name)

    return csv_file


if __name__ == '__main__':

    # event_true_dict = get_true(r'D:\GJ\项目\事故检测\xlsx\绵广路段交通事故统计表111.xlsx')
    # # 指定文件夹路径
    # folder_path = r"D:\GJ\项目\事故检测\绵广高速事故相关流水及牌识\流水"
    # # 获取文件夹下的所有文件
    # all_files = os.listdir(folder_path)
    # # 筛选出 CSV 文件
    # csv_files = [file for file in all_files if file.endswith('.xlsx')]
    # # csv_files = [file for file in all_files if file.endswith('.csv')]
    # for csv_file in csv_files:
    #     file_path = os.path.join(folder_path, csv_file)
    #     output_path = r'D:\GJ\项目\事故检测\output0228'
    #     preprocess = Preprocess(file_path, 1)
    #     filename = preprocess.run(output_path)
    #
    #     # root, ext = os.path.splitext(os.path.basename(file_path))
    #     dir_path = os.path.join(output_path, filename)
    #     print("save data and png in: ", dir_path)
    #     gantry = Gantry(dir_path)
    #     gantry.run(event_true_dict)

    # file_path = r'D:\GJ\项目\事故检测\邻垫高速事故当日门架流水明细\G004251001000320010,G004251001000220010-20240428.csv'
    # event_true_dict = get_true(r'D:\GJ\项目\事故检测\xlsx\邻垫路事故汇总11.xlsx')
    # preprocess = Preprocess(file_path, 0)

    # G000551002000320010-G000551003000120010-2024-01-19-流水
    # 21-31-20240120
    # file_path = r'D:\GJ\项目\事故检测\绵广高速事故相关流水及牌识\流水\G000551002000310010-G000551003000110010-2024-02-17-流水.xlsx'
    # event_true_dict = get_true(r'D:\GJ\项目\事故检测\xlsx\绵广路段交通事故统计表111.xlsx')
    # preprocess = Preprocess(file_path, 1)
    #
    # # file_path = r'D:\GJ\项目\事故检测\纳黔高速事故相关流水及牌识\流水\G007651003000810010-G007651003000610010-2023-07-13-流水.xlsx'
    # # event_true_dict = get_true(r'D:\GJ\项目\事故检测\xlsx\纳黔高速路段事件事故附近门架信息表11.xlsx')
    # # preprocess = Preprocess(file_path, 1)
    #
    # output_path = r'D:\GJ\项目\事故检测\output0228'
    # filename = preprocess.run(output_path)
    #
    # # root, ext = os.path.splitext(os.path.basename(file_path))
    # dir_path = os.path.join(output_path, filename)
    # print("save data and png in: ", dir_path)
    # gantry = Gantry(dir_path)
    # gantry.run(event_true_dict, name)

    # 铜锣山模拟数据
    # event_true_dict = {}
    # # 指定文件夹路径
    # folder_path = r"D:\GJ\项目\事故检测\模拟数据\output\up_down\2025-03-26"
    # # 获取文件夹下的所有文件
    # all_files = os.listdir(folder_path)
    # # 筛选出 CSV 文件
    # # csv_files = [file for file in all_files if file.endswith('.xlsx')]
    # csv_files = [file for file in all_files if file.endswith('.csv')]
    # # csv_files = ["ZW3,HW3-2025-03-25.csv"]
    # for csv_file in csv_files:
    #     file_path = os.path.join(folder_path, csv_file)
    #     output_path = r'D:\GJ\项目\事故检测\模拟数据\output\new\2025-03-26'
    #     if not os.path.exists(output_path):
    #         os.makedirs(output_path)
    #     preprocess = Preprocess(file_path, 0)
    #     filename = preprocess.run(output_path)
    #
    #     # root, ext = os.path.splitext(os.path.basename(file_path))
    #     dir_path = os.path.join(output_path, filename)
    #     print("save data and png in: ", dir_path)
    #     gantry = Gantry(dir_path)
    #     gantry.run(event_true_dict, "铜锣山(仿真)")

    # event_true_dict = get_true(r'D:\GJ\项目\事故检测\xlsx\达陕公司达陕段11.xlsx')
    # # 指定文件夹路径
    # folder_path = r"D:\GJ\项目\事故检测\达陕公司达陕段"
    # # 获取文件夹下的所有文件
    # all_files = os.listdir(folder_path)
    # # 筛选出 CSV 文件
    # # csv_files = [file for file in all_files if file.endswith('.xlsx')]
    # csv_files = [file for file in all_files if file.endswith('.csv')]
    # for csv_file in csv_files:
    #     file_path = os.path.join(folder_path, csv_file)
    #     output_path = r'D:\GJ\项目\事故检测\output0321'
    #     if not os.path.exists(output_path):
    #         os.makedirs(output_path)
    #     preprocess = Preprocess(file_path, 0)
    #     filename = preprocess.run(output_path)
    #
    #     # root, ext = os.path.splitext(os.path.basename(file_path))
    #     dir_path = os.path.join(output_path, filename)
    #     print("save data and png in: ", dir_path)
    #     gantry = Gantry(dir_path)
    #     gantry.run(event_true_dict, "达陕公司达陕段")

    # 铜锣山模拟数据
    event_true_dict = {}
    # 指定文件夹路径
    folder_path = r"D:\GJ\项目\事故检测\模拟数据\output\up_down\2025-03-26"
    output_path = r'D:\GJ\项目\事故检测\模拟数据\output\new\2025-03-26'
    # 创建输出目录（如果不存在）
    if not os.path.exists(output_path):
        os.makedirs(output_path)

    # 获取所有CSV文件
    all_files = os.listdir(folder_path)
    csv_files = [file for file in all_files if file.endswith('.csv')]

    # 使用 ProcessPoolExecutor（多进程）
    with ProcessPoolExecutor(max_workers=os.cpu_count()) as executor:  # 默认使用所有CPU核心
        futures = []
        for csv_file in csv_files:
            future = executor.submit(
                process_csv_file,
                csv_file,
                folder_path,
                output_path,
                event_true_dict,
                flag=0,
                name="铜锣山(仿真)"
            )
            futures.append(future)

        # 等待所有任务完成并获取结果
        for future in futures:
            try:
                result = future.result()  # 阻塞，直到任务完成
                print(f"完成处理: {result}")
            except Exception as e:
                print(f"处理文件时出错: {e}")

