import os
import csv
import uuid
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import chardet


class Gantry:
    def __init__(self, file_path):
        self.file_path = file_path
        self.up_gantry = None
        self.down_gantry = None
        self.up_gantry_df = None
        self.down_gantry_df = None
        self.max_speed = 120
        self.dist = None

    def get_gantry_ID(self):
        file_name = os.path.basename(self.file_path)
        # 使用逗号分隔字符串
        parts = file_name.split(',')
        # 提取前两个部分
        self.up_gantry = parts[0]
        self.down_gantry = parts[1].split('-')[0]  # 使用连字符进一步分割第二个部分
        # print("gantryid:", self.up_gantry, self.down_gantry)

    def get_data(self):
        # 使用pandas读取CSV文件
        # df = pd.read_csv(self.file_path, sep='\t')
        df = pd.read_csv(self.file_path)
        # 清理列名，去除多余的空格和逗号
        df.columns = [col.strip().replace(',', '') for col in df.columns]
        # df.columns = [col.strip() for col in df.columns]
        print("DataFrame Columns:", df.columns)
        df['feevehicletype'] = df['feevehicletype'].astype(str)
        # 清理每行数据中的多余符号
        for col in df.columns:
            df[col] = df[col].str.strip(' ')
        # # 筛选 gantryid 列值为 up_gantry 的所有行
        self.up_gantry_df = df[df['gantryid'] == self.up_gantry]
        self.down_gantry_df = df[df['gantryid'] == self.down_gantry]
        # print("up_gantry_df:", self.up_gantry_df.head())

        # # 获取清理后的列名
        # cleaned_column_names = list(df.columns)
        # # 打印列名
        # print("Cleaned Column Names:", cleaned_column_names)

    def get_flow(self, df, time_interval):
        # 将transtime列转换为datetime类型
        df['transtime'] = pd.to_datetime(df['transtime'], format='%Y-%m-%dT%H:%M:%S.%f')
        # 假设我们按每10分钟统计一次流量
        # 使用pd.Grouper(freq='10T')来定义10分钟的时间窗口, 然后使用size()计算每个时间窗口内的车辆数量, 并重置索引
        # traffic_flow = df.groupby([pd.Grouper(key='transtime', freq='10T')]).size().reset_index(name='vehicle_count')
        grouped = df.groupby([pd.Grouper(key='transtime', freq=time_interval), 'feevehicletype']).size().unstack(fill_value=0)
        # 计算总车流量
        total_flow = grouped.sum(axis=1)
        # 计算各车型占比
        vehicle_percentage = grouped.div(total_flow, axis=0)

        return total_flow, vehicle_percentage

    def matching_json(self, up_gantry_df, down_gantry_df, up_path, down_path, up_flag=True, down_flag=True):

        up_gantry_df['transtime'] = pd.to_datetime(up_gantry_df['transtime'], format='%Y-%m-%dT%H:%M:%S.%f')
        up_gantry_df['timestamp'] = up_gantry_df['transtime'].apply(lambda x: x.timestamp())
        up_gantry_df_dict = up_gantry_df.to_dict(orient='records')
        down_gantry_df['transtime'] = pd.to_datetime(down_gantry_df['transtime'], format='%Y-%m-%dT%H:%M:%S.%f')
        down_gantry_df['timestamp'] = down_gantry_df['transtime'].apply(lambda x: x.timestamp())
        down_gantry_df_dict = down_gantry_df.to_dict(orient='records')

        # 对上游vlp去重
        up_gantry_df_dict_all = {}
        repeat_list_up = []
        for up_gantry_one in up_gantry_df_dict:
            if up_gantry_one['vlp'] not in up_gantry_df_dict_all.keys():
                up_gantry_df_dict_all[up_gantry_one['vlp']] = [up_gantry_one]
            else:
                # print("多趟次上游", up_gantry_one['vlp'])
                if up_gantry_one['vlp'] not in repeat_list_up:
                    repeat_list_up.append(up_gantry_one['vlp'])
                up_gantry_df_dict_all[up_gantry_one['vlp']].append(up_gantry_one)
        # print("重复上游", repeat_list_up)

        # 对下游vlp去重
        down_gantry_df_dict_all = {}
        repeat_list_down = []
        for down_gantry_one in down_gantry_df_dict:
            if down_gantry_one['vlp'] not in down_gantry_df_dict_all.keys():
                down_gantry_df_dict_all[down_gantry_one['vlp']] = [down_gantry_one]
            else:
                # print("多趟次下游", down_gantry_one['vlp'])
                if down_gantry_one['vlp'] not in repeat_list_down:
                    repeat_list_down.append(down_gantry_one['vlp'])
                down_gantry_df_dict_all[down_gantry_one['vlp']].append(down_gantry_one)
        # print("重复下游", repeat_list_down)

        for repeat_id in repeat_list_up:
            if repeat_id in down_gantry_df_dict_all.keys():
                # 上游匹配下游
                up_gantry_df_dict_list = up_gantry_df_dict_all[repeat_id]   # 多个
                down_gantry_df_dict_list = down_gantry_df_dict_all[repeat_id]   # 一个或多个
                for up_one in up_gantry_df_dict_list:
                    up_one_time = up_one['timestamp']
                    min_diff = 100000
                    target_one = None
                    for down_one in down_gantry_df_dict_list:
                        down_one_time = down_one['timestamp']
                        if up_one_time < down_one_time:
                            diff = down_one_time - up_one_time
                            if diff < min_diff:
                                min_diff = diff
                                target_one = down_one

                    uuid_one = str(uuid.uuid4())
                    if target_one is not None:
                        up_gantry_df_dict_all[repeat_id + uuid_one] = [up_one]
                        down_gantry_df_dict_all[repeat_id + uuid_one] = [target_one]
                        # print("匹配成功", up_one['vlp'], target_one['vlp'])
                        down_gantry_df_dict_all[repeat_id] = [x for x in down_gantry_df_dict_all[repeat_id] if x != target_one]
                    else:
                        # print("匹配失败", up_one['vlp'])
                        up_gantry_df_dict_all[repeat_id + uuid_one] = [up_one]

                # print("删除重复车辆", repeat_id)
                del up_gantry_df_dict_all[repeat_id]
                if len(down_gantry_df_dict_all[repeat_id]) == 0:
                    del down_gantry_df_dict_all[repeat_id]
            else:
                # print("上游多趟次车辆未在下游出现", repeat_id)
                pass

        for repeat_id in repeat_list_down:
            if repeat_id in down_gantry_df_dict_all.keys():
                if repeat_id in up_gantry_df_dict_all.keys():
                    down_gantry_df_dict_list = down_gantry_df_dict_all[repeat_id]   # 多个
                    up_gantry_df_dict_list = up_gantry_df_dict_all[repeat_id]   # 一个
                    for up_one in up_gantry_df_dict_list:
                        up_one_time = up_one['timestamp']
                        min_diff = 100000
                        target_one = None
                        for down_one in down_gantry_df_dict_list:
                            down_one_time = down_one['timestamp']
                            if up_one_time < down_one_time:
                                diff = down_one_time - up_one_time
                                if diff < min_diff:
                                    min_diff = diff
                                    target_one = down_one

                        if target_one is not None:
                            uuid_one = str(uuid.uuid4())
                            up_gantry_df_dict_all[repeat_id + uuid_one] = [up_one]
                            down_gantry_df_dict_all[repeat_id + uuid_one] = [target_one]
                            for i in range(len(down_gantry_df_dict_all[repeat_id])):
                                if down_gantry_df_dict_all[repeat_id][i] != target_one:
                                    uuid_two = str(uuid.uuid4())
                                    down_gantry_df_dict_all[repeat_id + uuid_two] = [down_gantry_df_dict_all[repeat_id][i]]
                        else:
                            # print("下游匹配失败ID", repeat_id)
                            for i in range(len(down_gantry_df_dict_all[repeat_id])):
                                    uuid_two = str(uuid.uuid4())
                                    down_gantry_df_dict_all[repeat_id + uuid_two] = [down_gantry_df_dict_all[repeat_id][i]]
                    # print("删除重复车辆", repeat_id)
                    del up_gantry_df_dict_all[repeat_id]
                    del down_gantry_df_dict_all[repeat_id]
                else:
                    # print("下游多趟次车辆未在上游出现", repeat_id)
                    pass
            else:
                # print("下游多趟次车辆与上游多趟次车辆一一匹配", repeat_id)
                pass

        if up_flag:
            time_list = []
            for lience, info in up_gantry_df_dict_all.items():
                if lience in down_gantry_df_dict_all.keys():
                    # print("匹配成功", lience, down_gantry_df_dict_all[lience][0]['timestamp'] - info[0]['timestamp'])
                    time_list.append(down_gantry_df_dict_all[lience][0]['timestamp'] - info[0]['timestamp'])
            time_mean = np.mean(time_list)
            print("平均时间", time_mean)

            result = []
            for lience, info in up_gantry_df_dict_all.items():
                if lience in down_gantry_df_dict_all.keys():
                    # print("匹配成功", lience, down_gantry_df_dict_all[lience][0]['timestamp'] - info[0]['timestamp'])
                    to_write = info[0]
                    fen = int((down_gantry_df_dict_all[lience][0]['timestamp'] - info[0]['timestamp']) / 60)
                    miao = int((down_gantry_df_dict_all[lience][0]['timestamp'] - info[0]['timestamp']) % 60)
                    temp = str(fen) + "m" + str(miao) + "s"
                    to_write['time_diff'] = temp
                    to_write['transtime_down'] = down_gantry_df_dict_all[lience][0]['transtime']
                    to_write['transtime_up'] = to_write['transtime']

                    if down_gantry_df_dict_all[lience][0]['timestamp'] - info[0]['timestamp'] < time_mean:
                        to_write['match_type'] = 0
                    if time_mean <= down_gantry_df_dict_all[lience][0]['timestamp'] - info[0][
                        'timestamp'] < time_mean * 1.5:
                        to_write['match_type'] = 1
                    if time_mean * 1.5 <= down_gantry_df_dict_all[lience][0]['timestamp'] - info[0][
                        'timestamp'] < time_mean * 3:
                        to_write['match_type'] = 2
                    if time_mean * 3 <= down_gantry_df_dict_all[lience][0]['timestamp'] - info[0]['timestamp']:
                        to_write['match_type'] = 3
                    if down_gantry_df_dict_all[lience][0]['timestamp'] < info[0]['timestamp']:
                        to_write['match_type'] = -2
                    if "警" in lience:
                        if lience[2] == '9':
                            to_write['police_car'] = 1
                        else:
                            to_write['police_car'] = 2
                    else:
                        to_write['police_car'] = 0

                    result.append(to_write)

                else:
                    # print("匹配失败", lience)
                    to_write = info[0]
                    to_write['time_diff'] = ""
                    to_write['transtime_down'] = ""
                    to_write['transtime_up'] = to_write['transtime']
                    to_write['match_type'] = -1
                    if "警" in lience:
                        if lience[2] == '9':
                            to_write['police_car'] = 1
                        else:
                            to_write['police_car'] = 2
                    else:
                        to_write['police_car'] = 0
                    result.append(to_write)
            df = pd.DataFrame(result)
            # 保存 DataFrame 到 CSV 文件
            output_file_path = up_path  # 替换为实际文件路径
            df[['vlp', 'feevehicletype', 'transtime_up', 'transtime_down', 'time_diff', 'match_type', 'police_car']].to_csv(output_file_path, index=False, encoding='utf-8')
        if down_flag:
            time_list = []
            for lience, info in down_gantry_df_dict_all.items():
                if lience in up_gantry_df_dict_all.keys():
                    time_list.append(info[0]['timestamp'] - up_gantry_df_dict_all[lience][0]['timestamp'])
            time_mean = np.mean(time_list)
            print("平均时间", time_mean)
            result = []

            for lience, info in down_gantry_df_dict_all.items():
                if lience in up_gantry_df_dict_all.keys():
                    to_write = info[0]
                    fen = int((info[0]['timestamp'] - up_gantry_df_dict_all[lience][0]['timestamp']) / 60)
                    miao = int((info[0]['timestamp'] - up_gantry_df_dict_all[lience][0]['timestamp']) % 60)
                    temp = str(fen) + "m" + str(miao) + "s"
                    if info[0]['timestamp'] - up_gantry_df_dict_all[lience][0]['timestamp'] < time_mean:
                        to_write['match_type'] = 0
                    if time_mean <= info[0]['timestamp'] - up_gantry_df_dict_all[lience][0][
                        'timestamp'] < time_mean * 1.5:
                        to_write['match_type'] = 1
                    if time_mean * 1.5 <= info[0]['timestamp'] - up_gantry_df_dict_all[lience][0][
                        'timestamp'] < time_mean * 3:
                        to_write['match_type'] = 2
                    if time_mean * 3 <= info[0]['timestamp'] - up_gantry_df_dict_all[lience][0]['timestamp']:
                        to_write['match_type'] = 3
                    if info[0]['timestamp'] < up_gantry_df_dict_all[lience][0]['timestamp']:
                        to_write['match_type'] = -2
                    if "警" in lience:
                        if lience[2] == '9':
                            to_write['police_car'] = 1
                        else:
                            to_write['police_car'] = 2
                    else:
                        to_write['police_car'] = 0
                    to_write['time_diff'] = temp
                    to_write['transtime_up'] = up_gantry_df_dict_all[lience][0]['transtime']
                    to_write['transtime_down'] = to_write['transtime']
                    result.append(to_write)
                else:
                    # print("匹配失败", lience)
                    to_write = info[0]
                    to_write['time_diff'] = ""
                    to_write['transtime_up'] = ""
                    to_write['transtime_down'] = to_write['transtime']
                    to_write['match_type'] = -1
                    if "警" in lience:
                        if lience[2] == '9':
                            to_write['police_car'] = 1
                        else:
                            to_write['police_car'] = 2
                    else:
                        to_write['police_car'] = 0
                    result.append(to_write)
            df = pd.DataFrame(result)
            # 保存 DataFrame 到 CSV 文件
            output_file_path = down_path  # 替换为实际文件路径
            # print("保存成功", df.head(5))
            df[['vlp', 'feevehicletype', 'transtime_up', 'transtime_down', 'time_diff', 'match_type', 'police_car']].to_csv(output_file_path, index=False, encoding='utf-8')

    def megred_old(self, up_path, down_path, all_path, flag=1):
        # 将上下游匹配数据合并为一个总文件
        up_data = self.get_match_data(up_path).copy()
        down_data = self.get_match_data(down_path).copy()
        # 提取时间戳列
        timestamps = up_data[:, 2]
        print(timestamps)
        # 将时间戳列转换为 pandas.Timestamp 对象
        timestamps = pd.to_datetime(timestamps, format='mixed')
        # 使用 argsort 方法获取排序后的索引
        sort_indices = np.argsort(timestamps)
        # 根据排序后的索引对整个数组进行排序
        sorted_data = up_data[sort_indices]
        # # 根据下游时间戳排序插入部分下游车辆到csv中
        # down_time_list = []
        # for row in sorted_data:
        #     if type(row[3]) == str:
        #         down_time = pd.to_datetime(row[3], format='%Y-%m-%d %H:%M:%S')
        #         down_time_list.append(down_time)
        #     else:
        #         down_time_list.append(down_time_list[-1])
        # 根据上游时间戳排序插入部分下游车辆到csv中
        up_time_list = []
        for row in sorted_data:
            up_time_list.append(pd.to_datetime(row[2], format='mixed'))
        time_list = [up_time_list, 0]
        i = 0
        # 遍历原始数组
        for row in down_data:
            if type(row[2]) == str:
                continue
            new_row_time = pd.to_datetime(row[3], format='mixed')
            # 使用 numpy.searchsorted 查找插入位置
            insert_index = np.searchsorted(time_list[flag], new_row_time)
            insert_position = insert_index + i
            i += 1
            sorted_data = np.insert(sorted_data, insert_position, row, axis=0)

        df = pd.DataFrame(sorted_data)

        # 定义新的列名映射
        new_column_names = {
            0: 'vlp',
            1: 'feevehicletype',
            2: 'transtime_up',
            3: 'transtime_down',
            4: 'time_diff',
            5: 'match_type',
            6: 'police_car'
        }

        # 修改列名
        df = df.rename(columns=new_column_names)
        # # 打印更新后的列名
        # print("Updated columns in DataFrame:")
        # print(df.columns)
        # 保存 DataFrame 到 CSV 文件
        output_file_path = all_path  # 替换为实际文件路径
        df[['vlp', 'feevehicletype', 'transtime_up', 'transtime_down', 'time_diff', 'match_type', 'police_car']].to_csv(output_file_path, index=False, encoding='utf-8')

    def megred(self, up_path, down_path, all_path, flag=1):
        # 将上下游匹配数据合并为一个总文件
        up_data = self.get_match_data(up_path).copy()
        down_data = self.get_match_data(down_path).copy()
        # 提取时间戳列
        timestamps = up_data[:, 2]
        print(timestamps)
        # 将时间戳列转换为 pandas.Timestamp 对象
        timestamps = pd.to_datetime(timestamps, format='mixed')
        # 使用 argsort 方法获取排序后的索引
        sort_indices = np.argsort(timestamps)
        # 根据排序后的索引对整个数组进行排序
        sorted_data = up_data[sort_indices]
        # 根据上游时间戳排序插入部分下游车辆到csv中
        up_time_list = []
        for row in sorted_data:
            up_time_list.append(pd.to_datetime(row[2], format='mixed'))
        time_list = [up_time_list, 0]
        # 将原始数据和新数据都转换为 DataFrame
        df_sorted = pd.DataFrame(sorted_data)
        df_new = pd.DataFrame([row for row in down_data if not isinstance(row[2], str)])
        # 解析新数据中的时间
        df_new['time_parsed'] = pd.to_datetime(df_new.iloc[:, 3], errors='coerce', format='mixed')
        # 过滤掉无效的时间
        df_new = df_new.dropna(subset=['time_parsed'])
        # 合并两个 DataFrame 并按时间排序
        df_combined = pd.concat([df_sorted, df_new.drop(columns=['time_parsed'])])
        df_combined['time_parsed'] = pd.concat([pd.to_datetime(pd.Series(time_list[flag])), df_new['time_parsed']])
        df_combined = df_combined.sort_values(by='time_parsed').drop(columns=['time_parsed'])
        # # 如果需要，可以再转换回 NumPy 数组
        # sorted_data = df_combined.values
        # df = pd.DataFrame(sorted_data)

        # 定义新的列名映射
        new_column_names = {
            0: 'vlp',
            1: 'feevehicletype',
            2: 'transtime_up',
            3: 'transtime_down',
            4: 'time_diff',
            5: 'match_type',
            6: 'police_car'
        }

        # 修改列名
        df = df_combined.rename(columns=new_column_names)
        # 保存 DataFrame 到 CSV 文件
        output_file_path = all_path  # 替换为实际文件路径
        df[['vlp', 'feevehicletype', 'transtime_up', 'transtime_down', 'time_diff', 'match_type', 'police_car']].to_csv(output_file_path, index=False, encoding='utf-8')

    # def get_megred(self, df_np):


    def get_match_data(self, path):
        # 使用 chardet 检测文件编码
        with open(path, 'rb') as f:
            result = chardet.detect(f.read(10000))
        detected_encoding = result['encoding']
        # print(f"Detected encoding: {detected_encoding}")

        # 使用检测到的编码读取 CSV 文件
        df = pd.read_csv(path, encoding=detected_encoding)
        # print(df.head())

        # a = df.to_dict(orient='records')
        df_np = df.to_numpy()
        return df_np

    # def deal_np_data(self, df_np):
    #
    #     # 找到不包含值2的行
    #     rows_to_keep = ~np.any(df_np == 2, axis=1)
    #     # 使用布尔索引删除指定行
    #     filtered_arr = df_np[rows_to_keep]
    #     time_diff_list = []
    #     for row in filtered_arr:
    #         time_diff = row[4]
    #         # 解析时间差字符串
    #         parts = time_diff.split('m')
    #         fen = int(parts[0])  # 分钟数
    #         miao = int(parts[1].split('s')[0])  # 秒数
    #         # 计算总秒数
    #         seconds = fen * 60 + miao
    #         time_diff_list.append([seconds])
    #     # 假设我们有一个新的列要添加
    #     new_column = np.array(time_diff_list)
    #     arr_with_new_column = np.concatenate((filtered_arr, new_column), axis=1)
    #
    #     return arr_with_new_column


    def get_mean_time(self, df_np):
        total_num = len(df_np)
        # 初始化一个空列表来存储总秒数
        total_seconds = []
        for row in df_np:
            time_diff = row[4]
            # if type(time_diff) != str:
            #     continue
            if int(row[5]) < 0:
                continue
            # 解析时间差字符串
            parts = time_diff.split('m')
            fen = int(parts[0])  # 分钟数
            miao = int(parts[1].split('s')[0])  # 秒数
            # 计算总秒数
            seconds = fen * 60 + miao
            total_seconds.append(seconds)
            if seconds < 0:
                print("time_diff: ", time_diff, row)
        # 将列表转换为 NumPy 数组
        total_seconds_array = np.array(total_seconds)
        # 计算总和
        total_seconds_sum = np.sum(total_seconds_array)
        # 计算平均值
        seconds_average = np.mean(total_seconds_array)
        # 计算最大值
        seconds_max = np.max(total_seconds_array)
        # 计算最小值
        seconds_min = np.min(total_seconds_array)

        print(f"Total seconds: {total_seconds_sum}, seconds_average: {seconds_average}, num: {len(total_seconds)}, "
              f"total_num: {total_num}, max: {seconds_max}, min: {seconds_min}")
        return total_seconds_sum, seconds_average, seconds_max, seconds_min

    def get_vehicle_class_pass_time(self, df_np):
        vehicle_class_pass = {}

        for row in df_np:
            time_diff = row[4]
            # if type(time_diff) != str:
            #     continue
            if int(row[5]) < 0:
                continue
            # 解析时间差字符串
            parts = time_diff.split('m')
            fen = int(parts[0])  # 分钟数
            miao = int(parts[1].split('s')[0])  # 秒数
            # 计算总秒数
            seconds = fen * 60 + miao

            if row[1] not in vehicle_class_pass:
                vehicle_class_pass[row[1]] = {}
                vehicle_class_pass[row[1]]['time_data'] = [seconds]
            else:
                vehicle_class_pass[row[1]]['time_data'].append(seconds)

        for key, value in vehicle_class_pass.items():
            total_seconds_array = np.array(value['time_data'])
            value['sum_time'] = np.sum(total_seconds_array)
            value['mean_time'] = np.mean(total_seconds_array)
            value['num'] = len(total_seconds_array)

        for key, value in vehicle_class_pass.items():
            # 计算平均速度(m/s)
            vehicle_class_pass[key]['speed'] = self.dist / value['mean_time']
            # print(key, value['speed'])

        # print(vehicle_class_pass)

        return vehicle_class_pass

    def get_vlp_speed(self, df_np):
        vlp_speed_dict = {}
        for row in df_np:
            time_diff = row[4]
            # if type(time_diff) != str:
            #     continue
            if int(row[5]) < 0:
                continue
            # 解析时间差字符串
            parts = time_diff.split('m')
            fen = int(parts[0])  # 分钟数
            miao = int(parts[1].split('s')[0])  # 秒数
            # 计算总秒数
            seconds = fen * 60 + miao
            # m/s
            speed = round(self.dist / seconds, 2)
            if speed < 0:
                print("速度为负", speed, self.dist, seconds, row)
            if row[0] not in vlp_speed_dict:
                vlp_speed_dict[row[0]] = {
                    "type": row[1],
                    "speed": [speed],
                    "sum_time": [seconds],
                    "num": 1
                }
            else:
                vlp_speed_dict[row[0]]['speed'].append(speed)
                vlp_speed_dict[row[0]]['sum_time'].append(seconds)
                vlp_speed_dict[row[0]]['num'] += 1

        for vlp, info in vlp_speed_dict.items():
            info['mean_speed'] = round(sum(info['speed']) / info['num'], 2)

        return vlp_speed_dict

    def get_vehicle_data(self, df_np, vehicle_name):
        # 定义条件：第一列等于 "A123"
        condition = df_np[:, 0] == vehicle_name
        # 使用布尔索引获取符合条件的行
        matching_rows = df_np[condition]

        return matching_rows

    def time_speed(self, df_np):
        time_speed_dict = {}
        for row in df_np:
            time_diff = row[4]
            # if type(time_diff) != str:
            #     continue
            if int(row[5]) < 0:
                continue
            # 解析时间差字符串
            parts = time_diff.split('m')
            fen = int(parts[0])  # 分钟数
            miao = int(parts[1].split('s')[0])  # 秒数
            # 计算总秒数
            seconds = fen * 60 + miao
            # m/s
            speed = round(self.dist / seconds, 2)
            if row[2] not in time_speed_dict:
                time_speed_dict[row[2]] = speed

        return time_speed_dict

    def get_vlp_time_csv(self, vlp_speed_dict, filename):
        vlp_time_dict = {}
        for vlp, info in vlp_speed_dict.items():
            vlp_time_dict[vlp] = {
                "type": info['type'],
                "mean_speed": info['mean_speed'],
                "mean_time": sum(info['sum_time']) / info['num'],
                "num": info['num']
            }
            time_list = info['sum_time']
            i = 0
            for time in time_list:
                vlp_time_dict[vlp]["time" + str(i)] = time
                i += 1

        self.save_csv(vlp_time_dict, filename, "vlp")

    def get_vehicle_class_pass_time_csv(self, vehicle_class_data, filename):
        vehicle_class_pass_time = {}
        for key, value in vehicle_class_data.items():
            vehicle_class_pass_time[key] = {
                "sum_time": value['sum_time'],
                "mean_time": value['mean_time'],
                "num": value['num'],
                "mean_speed": value['speed']
            }

        self.save_csv(vehicle_class_pass_time, filename, "type")

        self.show_vlp_time_data(vehicle_class_pass_time)

    def save_csv(self, dict, filename, index):
        # 获取所有可能的列名
        all_keys = set()
        for sub_dict in dict.values():
            all_keys.update(sub_dict.keys())
        # 将嵌套字典扁平化为行数据
        rows = []
        for key, sub_dict in dict.items():
            row = {index: key}  # 添加索引列
            for k, v in sub_dict.items():
                row[k] = v
            rows.append(row)
        # 添加所有可能的列名
        fieldnames = [index] + sorted(all_keys)
        with open(filename, mode='w', newline='', encoding='utf-8') as file:
            writer = csv.DictWriter(file, fieldnames=fieldnames)
            # 写入表头
            writer.writeheader()
            # 写入行数据
            writer.writerows(rows)

    def show_vlp_time_data(self, data):
        # 提取数据
        keys = list(data.keys())
        mean_times = [d['mean_time'] for d in data.values() if 'mean_time' in d]
        # 按照期望的顺序排列 keys
        desired_order = sorted(keys)
        ordered_keys = [key for key in desired_order if key in keys]
        # 创建图形和坐标轴
        fig, ax = plt.subplots()
        # 绘制柱状图
        bar_width = 0.35
        indices = np.arange(len(ordered_keys))
        # 柱状图
        rects = ax.bar(indices, mean_times, bar_width, label='Mean_Time')
        # 使用 ax.bar_label 添加标签
        ax.bar_label(rects, labels=[int(v) for v in mean_times], padding=3)
        # 添加标签和标题
        ax.set_xlabel('Type')
        ax.set_ylabel('Time(s)')
        ax.set_title('Vehicle Type and Time Data')
        ax.set_xticks(indices)
        ax.set_xticklabels(ordered_keys)
        ax.legend()
        # 显示图表
        plt.show()

    # def get_one_type_time_csv(self, df_np, vtype, filename):
    #     for row in df_np:
    #
    #         # if type(time_diff) != str:
    #         #     continue
    #         if int(row[5]) < 0:
    #             continue
    #         time_diff = row[4]
    #         # 解析时间差字符串
    #         parts = time_diff.split('m')
    #         fen = int(parts[0])  # 分钟数
    #         miao = int(parts[1].split('s')[0])  # 秒数
    #         # 计算总秒数
    #         seconds = fen * 60 + miao
    #         total_seconds.append(seconds)
    #         if seconds < 0:
    #             print("time_diff: ", time_diff, row)

    def run(self):
        self.get_gantry_ID()
        self.get_data()
        # # "10T"表示10分钟，“1H”表示1小时
        # total_flow, vehicle_percentage = self.get_flow(self.up_gantry_df, time_interval='1H')
        # print(total_flow)

        # 生成上下游匹配csv文件
        file_name = os.path.basename(self.file_path)
        output_path = r'/home/gj/Download/新建文件夹/output'
        # .replace(',', '-')
        output = os.path.join(output_path, file_name.split('.')[0])
        if not os.path.exists(output):
            os.makedirs(output)
        up_path = os.path.join(output, 'up_' + file_name)
        down_path = os.path.join(output, 'down_' + file_name)
        all_path = os.path.join(output, 'all_' + file_name)
        print("output path: ", output)
        print(up_path, down_path, all_path)

        # 上下游各自匹配文件
        self.matching_json(self.up_gantry_df, self.down_gantry_df, up_path, down_path)

        # 合并文件 flag为0（根据上游时间戳排序）或1（根据下游时间戳排序）
        self.megred(up_path, down_path, all_path, flag=0)

        # # 获取合并后数据的numpy数组
        # df_np = self.get_match_data(all_path)
        #
        # # 计算总间隔时长，平均间隔时长， 最大间隔时长，最小间隔时长
        # total_seconds_sum, seconds_average, seconds_max, seconds_min = self.get_mean_time(df_np)
        # print("最小间隔时长: ", seconds_min)
        #
        # # 估算区间距离(米)
        # self.dist = round(self.max_speed / 3.6 * seconds_min, 2)
        # print("估算区间距离(米): ", self.dist)
        #
        # if self.dist is not None and self.dist > 0:
        #     # 车型：{"time_data": [], "sum_time": float, "mean_time": float, "num": int, "speed": float}
        #     vehicle_class_data = self.get_vehicle_class_pass_time(df_np)
        #     filename1 = r'..\output\vehicle_class_pass_time.csv'
        #     self.get_vehicle_class_pass_time_csv(vehicle_class_data, filename1)

            # # 车牌-车速
            # vlp_speed_dict = self.get_vlp_speed(df_np)
            # filename2 = r'..\output\vlp_time.csv'
            # self.get_vlp_time_csv(vlp_speed_dict, filename2)

            # # 异常判断
            # abnormal_data = []
            # for vlp, info in vlp_speed_dict.items():
            #     vtype = info["type"]
            #     mean_speed = info["mean_speed"]
            #     num = info["num"]
            #     vclass_speed = vehicle_class_data[vtype]["speed"]
            #     if mean_speed * 10.0 <= vclass_speed:
            #         matching_rows = self.get_vehicle_data(df_np, vlp)
            #         print("异常数据", vlp, vtype, mean_speed, vclass_speed, matching_rows)
            #         # print(matching_rows)


if __name__ == '__main__':
    # file_path = r'D:\GJ\项目\事故检测\邻垫高速事故当日门架流水明细\G004251002000620010,G004251001000320010-20240429.csv'
    # file_path = r'D:\GJ\项目\事故检测\邻垫高速事故当日门架流水明细\G004251001000110010,G004251001000210010-20240531.csv'
    # file_path = r'D:\GJ\项目\事故检测\邻垫高速事故当日门架流水明细\G004251001000310010,G004251002000610010-20240503.csv'
    # file_path = r'D:\GJ\项目\事故检测\邻垫高速事故当日门架流水明细\G004251002000620010,G004251001000320010-20240207.csv'

    # gantry = Gantry(file_path)
    # gantry.run()

    # top_dir = r'D:\GJ\项目\事故检测\标定及检验事件数据\用于标定'
    # top_dir = r'D:\GJ\项目\事故检测\标定及检验事件数据\用于验证仿真检测结果'
    top_dir = r'/home/gj/Download/新建文件夹/up_down'
    # 获取目录下的所有文件和文件夹名
    files_and_dirs = os.listdir(top_dir)
    # 遍历文件和文件夹
    i = 0
    for entry in files_and_dirs:
        full_path = os.path.join(top_dir, entry)
        i += 1
        print(i, full_path)
        gantry = Gantry(full_path)
        gantry.run()
