import os
import time
import csv
import uuid
import re
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import chardet
from show_car_speed import ShowCarSpeed
from show_time_duration import ShowTimeDuration
from show_car_slow_num import ShowCarSlowNum
from show_flow_diff import ShowFlowDiff
from show_pass_rate import ShowPassRate

# 设置字体以支持中文
plt.rcParams['font.sans-serif'] = ['SimHei']  # 使用黑体
plt.rcParams['axes.unicode_minus'] = False  # 解决保存图像是负号'-'显示为方块的问题


class Gantry:
    def __init__(self, dir_path):
        self.dir_path = dir_path
        self.ctype_time = None
        self.max_speed = 120
        self.dist = None
        self.time_interval = '5T'
        self.showCarSpeed = ShowCarSpeed()
        self.showTimeDuration = ShowTimeDuration()
        self.showCarSlowNum = ShowCarSlowNum()
        self.showFlowDiff = ShowFlowDiff()
        self.showPassRate = ShowPassRate()

    def parse_duration(self, duration_str):
        # 匹配分钟和秒
        if pd.isna(duration_str) or duration_str.strip() == '':
            return None
        match = re.match(r'(\d+)m(\d+)s', duration_str)
        if match:
            minutes = int(match.group(1))
            seconds = int(match.group(2))
            total_seconds = minutes * 60 + seconds
            return total_seconds
        else:
            return None

    def get_files_containing_substring(self, directory, substring):
        file_name = None
        # 遍历指定目录下的所有文件
        for filename in os.listdir(directory):
            # 构建完整的文件路径
            full_path = os.path.join(directory, filename)
            # 检查是否为文件
            if os.path.isfile(full_path):
                # 检查文件名是否包含指定的子字符串
                if substring in filename:
                    file_name = full_path
        return file_name

    def get_match_data(self, path):
        # 使用 chardet 检测文件编码
        with open(path, 'rb') as f:
            result = chardet.detect(f.read(10000))
        detected_encoding = result['encoding']
        # 使用检测到的编码读取 CSV 文件
        df = pd.read_csv(path, encoding=detected_encoding)
        # 转换为字典
        # a = df.to_dict(orient='records')
        # 转换为numpy数组
        df_np = df.to_numpy()
        # 获取第五列（索引为 4）
        fifth_column = df_np[:, 5]
        # 找出第五列值大于 0 的条件
        mask = (fifth_column >= 0) & (fifth_column < 3)
        # 使用布尔索引选择满足条件的行
        filtered_arr = df_np[mask]
        return filtered_arr

    def get_mean_time(self, df_np):
        total_num = len(df_np)
        ctype_time = {}
        for row in df_np:
            ctype = row[1]
            if ctype not in ctype_time:
                ctype_time[ctype] = {}
                ctype_time[ctype]['list'] = []
            # 解析时间差字符串
            time_diff = row[4]
            parts = time_diff.split('m')
            fen = int(parts[0])  # 分钟数
            miao = int(parts[1].split('s')[0])  # 秒数
            # 计算总秒数
            seconds = fen * 60 + miao
            ctype_time[ctype]['list'].append(seconds)
            if seconds < 0:
                print("time_diff: ", time_diff, row)
        for ctype, data in ctype_time.items():
            total_seconds = data['list']
            total_seconds = np.array(total_seconds)
            data['sum'] = np.sum(total_seconds)
            data['mean'] = np.mean(total_seconds)
            data['max'] = np.max(total_seconds)
            data['min'] = np.min(total_seconds)

        return ctype_time

    def get_car_speed(self, path, time_interval, csv_name, type_flag=None):
        # 读取CSV文件
        df = pd.read_csv(path, parse_dates=['transtime_up'],
                         date_parser=lambda x: pd.to_datetime(x, format='%Y/%m/%d %H:%M:%S'))
        if type_flag is not None:
            # 筛选特定车辆类型的数据
            df = df[df['feevehicletype'] == 1]
        df = df[(df['match_type'] >= 0) & (df['match_type'] <= 2)]
        # 应用函数，将耗时转换为秒
        df['duration_seconds'] = df['time_diff'].apply(self.parse_duration)
        # 过滤掉包含空值的行
        df = df.dropna(subset=['duration_seconds'])
        # 根据时间间隔分组并计算平均速度以及每个分组的样本数量
        grouped = (df.groupby(pd.Grouper(key='transtime_up', freq=time_interval))
                   .agg({'duration_seconds': ['mean', 'count']})
                   .reset_index())
        # 重命名列以便更清晰
        grouped.columns = ['transtime_up', 'duration_seconds', 'count']
        if self.dist is not None and self.dist > 0:
            df = grouped.copy()
            # 计算平均速度
            df["mean_speed"] = df.apply(lambda x: round(self.dist / x['duration_seconds'], 2), axis=1)
            output_file_path = os.path.join(self.dir_path, csv_name)  # 替换为实际文件路径
            df[['transtime_up', 'duration_seconds', 'mean_speed', 'count']].to_csv(
                output_file_path, index=False, encoding='utf-8')
        else:
            print("dist is None")

    def get_all_flow(self, up_path, down_path, time_interval, csv_name):
        # 读取CSV文件
        df_up = pd.read_csv(up_path, parse_dates=["transtime_up"],
                         date_parser=lambda x: pd.to_datetime(x, format='%Y/%m/%d %H:%M:%S'))
        # 根据时间间隔分组，并计算平均速度
        grouped_up = df_up.groupby(pd.Grouper(key="transtime_up", freq=time_interval))
        # 计算每组中 "time" 列为空和不为空的数量
        result_up = grouped_up.agg({
            "transtime_down": lambda x: x.notna().sum()
        }).assign(
            total_count=lambda x: grouped_up.size()
        )
        result_up = result_up.reset_index()
        np_up = result_up.to_numpy()
        # print(result_up)
        # 读取CSV文件
        df_down = pd.read_csv(down_path, parse_dates=["transtime_down"],
                         date_parser=lambda x: pd.to_datetime(x, format='%Y/%m/%d %H:%M:%S'))
        # 根据时间间隔分组，并计算平均速度
        grouped_down = df_down.groupby(pd.Grouper(key="transtime_down", freq=time_interval))
        # 计算每组中 "time" 列为空和不为空的数量
        result_down = grouped_down.agg({
            "transtime_up": lambda x: x.notna().sum()
        }).assign(
            total_count=lambda x: grouped_down.size()
        )
        result_down = result_down.reset_index()
        np_down = result_down.to_numpy()
        fieldnames = ['time', 'total_flow', 'up_flow', 'down_flow', 'flow_diff']
        output_file_path = os.path.join(self.dir_path, csv_name)  # 替换为实际文件路径
        with open(output_file_path, mode='w', newline='', encoding='utf-8') as file:
            writer = csv.DictWriter(file, fieldnames=fieldnames)
            # 写入表头
            writer.writeheader()
            # 写入行数据
            up_num = np_up.shape[0]
            down_num = np_down.shape[0]
            num = min(up_num, down_num)
            rows = []
            for i in range(num):
                up = np_up[i]
                down = np_down[i]
                # 使用字典来构造每一行
                row_dict = {
                    'time': up[0],
                    'total_flow': (up[2] - up[1] + down[2]),
                    'up_flow': up[2],
                    'down_flow': down[2],
                    'flow_diff': (up[2] - down[2])
                }
                rows.append(row_dict)

            writer.writerows(rows)

    def get_mate_all_flow(self, up_path, down_path, time_interval, csv_name):
        # 读取CSV文件
        df_up = pd.read_csv(up_path, parse_dates=["transtime_up"],
                            date_parser=lambda x: pd.to_datetime(x, format='%Y/%m/%d %H:%M:%S'))
        df_up = df_up[df_up['feevehicletype'] == 1]
        df_up = df_up[(df_up['match_type'] >= 0) & (df_up['match_type'] <= 2)]
        # 应用函数，将耗时转换为秒
        df_up['duration_seconds'] = df_up['time_diff'].apply(self.parse_duration)
        # 过滤掉包含空值的行
        df_up = df_up.dropna(subset=['duration_seconds'])
        # 根据时间间隔分组，并计算平均速度
        grouped_up = df_up.groupby(pd.Grouper(key="transtime_up", freq=time_interval)).size().reset_index(
            name='up_flow')
        grouped_up.set_index('transtime_up', inplace=True)
        np_up = grouped_up.to_dict()
        # 读取CSV文件
        df_down = pd.read_csv(down_path, parse_dates=["transtime_down"],
                              date_parser=lambda x: pd.to_datetime(x, format='%Y/%m/%d %H:%M:%S'))
        df_down = df_down[df_down['feevehicletype'] == 1]
        df_down = df_down[(df_down['match_type'] >= 0) & (df_down['match_type'] <= 2)]
        # 应用函数，将耗时转换为秒
        df_down['duration_seconds'] = df_down['time_diff'].apply(self.parse_duration)
        # 过滤掉包含空值的行
        df_down = df_down.dropna(subset=['duration_seconds'])
        # 根据时间间隔分组，并计算平均速度
        grouped_down = df_down.groupby(pd.Grouper(key="transtime_down", freq=time_interval)).size().reset_index(
            name='down_flow')
        grouped_down.set_index('transtime_down', inplace=True)
        np_down = grouped_down.to_dict()
        up_flow_data = np_up['up_flow']
        down_flow_data = np_down['down_flow']

        merged_data = {}
        # 遍历所有的时间点（确保涵盖两个字典中的所有时间）
        all_times = set(up_flow_data.keys()).union(set(down_flow_data.keys()))
        for time in sorted(all_times):
            up_flow = up_flow_data.get(time)
            down_flow = down_flow_data.get(time)
            merged_data[time] = {
                'up_flow': up_flow if up_flow is not None else 0,  # 如果没有up_flow数据，默认为0
                'down_flow': down_flow if down_flow is not None else 0  # 如果没有down_flow数据，默认为0
            }
        fieldnames = ['time', 'total_flow', 'up_flow', 'down_flow', 'flow_diff']
        output_file_path = os.path.join(self.dir_path, csv_name)  # 替换为实际文件路径
        with open(output_file_path, mode='w', newline='', encoding='utf-8') as file:
            writer = csv.DictWriter(file, fieldnames=fieldnames)
            writer.writeheader()
            rows = []
            for time, data in merged_data.items():
                # 使用字典来构造每一行
                row_dict = {
                    'time': time,
                    'total_flow': (data['up_flow'] + data['down_flow']),
                    'up_flow': data['up_flow'],
                    'down_flow': data['down_flow'],
                    'flow_diff': (data['up_flow'] - data['down_flow'])
                }
                rows.append(row_dict)

            writer.writerows(rows)

    def get_slow_vehicle(self, all_path, time_interval, csv_name):
        # 读取CSV文件
        df = pd.read_csv(all_path, parse_dates=["transtime_up"],
                         date_parser=lambda x: pd.to_datetime(x, format='%Y/%m/%d %H:%M:%S'))
        # 筛选特定车辆类型的数据
        df = df[df['feevehicletype'] < 5]
        df = df[(df['match_type'] >= 1) & (df['match_type'] <= 3)]
        # 应用函数，将耗时转换为秒，并过滤掉无效值
        df['duration_seconds'] = df['time_diff'].apply(self.parse_duration)
        # # 过滤掉包含空值的行
        df = df.dropna(subset=['duration_seconds'])
        # 根据时间间隔分组，并计算平均速度
        grouped = df.groupby(pd.Grouper(key='transtime_up', freq=time_interval)).size().reset_index(name='count')
        result = grouped.reset_index()
        np_up = result.to_numpy()

        fieldnames = ['time', 'count']
        output_file_path = os.path.join(self.dir_path, csv_name)  # 替换为实际文件路径
        with open(output_file_path, mode='w', newline='', encoding='utf-8') as file:
            writer = csv.DictWriter(file, fieldnames=fieldnames)
            writer.writeheader()
            up_num = np_up.shape[0]
            num = up_num
            rows = []
            for i in range(num):
                up = np_up[i]
                # 使用字典来构造每一行
                row_dict = {
                    'time': up[1],
                    'count': up[2],
                }
                rows.append(row_dict)

            writer.writerows(rows)

    def get_type_time_change(self, down_path, csv_name):
        # 读取CSV文件
        df = pd.read_csv(down_path, parse_dates=["transtime_down"], date_parser=lambda x: pd.to_datetime(x,
                                                                                                     format='%Y/%m/%d %H:%M:%S'))
        # 筛选特定数据
        df = df[(df['match_type'] >= 0) & (df['match_type'] < 3)]
        # 应用函数，将耗时转换为秒
        df['duration_seconds'] = df['time_diff'].apply(self.parse_duration)
        # 过滤掉包含空值的行
        df = df.dropna(subset=['duration_seconds'])
        # print(df)
        df1 = df[['vlp', 'feevehicletype', 'transtime_down', 'duration_seconds']]
        output_file_path = os.path.join(self.dir_path, csv_name)  # 替换为实际文件路径
        df1.to_csv(output_file_path, index=False, encoding='utf-8')

    def get_abnormal_data(self, all_path, csv_name):
        # 读取CSV文件
        df = pd.read_csv(all_path, parse_dates=["transtime_up"],
                         date_parser=lambda x: pd.to_datetime(x, format='%Y/%m/%d %H:%M:%S'))
        # 筛选异常的数据
        df = df[df['match_type'] == 3]
        # 应用函数，将耗时转换为秒，并过滤掉无效值
        df['duration_seconds'] = df['time_diff'].apply(self.parse_duration)
        # 过滤掉包含空值的行
        df = df.dropna(subset=['duration_seconds'])

        output_file_path = os.path.join(self.dir_path, csv_name)  # 替换为实际文件路径
        df.to_csv(output_file_path, index=False, encoding='utf-8')

    def get_car_pass_rate_data(self, up_path, seconds_average, time_interval, csv_name):
        # 读取CSV文件
        df = pd.read_csv(up_path, parse_dates=["transtime_up"],
                         date_parser=lambda x: pd.to_datetime(x, format='%Y/%m/%d %H:%M:%S'))
        # 筛选特定车辆类型的数据
        df = df[df['feevehicletype'] == 1]
        df = df[(df['match_type'] >= 0) & (df['match_type'] < 3)]
        df['duration_seconds'] = df['time_diff'].apply(self.parse_duration)
        # 过滤掉包含空值的行
        df = df.dropna(subset=['duration_seconds'])
        # 根据时间间隔分组，并计算平均速度
        grouped_up = df.groupby(pd.Grouper(key="transtime_up", freq=time_interval))
        # 初始化结果DataFrame
        result = pd.DataFrame()
        # 遍历每个分组
        for name, group in grouped_up:
            # 筛选出 time_diff 不为空且小于10秒的数据
            valid_data = group[(group['duration_seconds'] <= seconds_average)]
            # 获取符合条件的数据数量
            valid_count = len(valid_data)
            # 获取该组的总数
            total_count = len(group)
            # 计算百分比
            percentage = (valid_count / total_count * 100) if total_count > 0 else 0
            # 创建一个新的行，并将其添加到结果DataFrame中
            new_row = pd.DataFrame({'Group_Time': [name], 'Valid_Count': [valid_count], 'Total_Count': [total_count], 'Percentage': [percentage]})
            # 使用concat函数合并新的行到结果DataFrame
            result = pd.concat([result, new_row], ignore_index=True)
        output_file_path = os.path.join(self.dir_path, csv_name)  # 替换为实际文件路径
        result.to_csv(output_file_path, index=False, encoding='utf-8')

    def check_mean_speed(self, ctype):
        if ctype in self.ctype_time:
            return self.ctype_time[ctype]['mean']
        else:
            return None

    def get_all_pass_rate_data(self, up_path, time_interval, csv_name):
        # 读取CSV文件
        df = pd.read_csv(up_path, parse_dates=["transtime_up"],
                         date_parser=lambda x: pd.to_datetime(x, format='%Y/%m/%d %H:%M:%S'))
        df = df[(df['match_type'] >= 0) & (df['match_type'] < 3)]
        df['duration_seconds'] = df['time_diff'].apply(self.parse_duration)
        df['type_mean_time'] = df['feevehicletype'].apply(self.check_mean_speed)
        # 过滤掉包含空值的行
        df = df.dropna(subset=['duration_seconds'])
        # 根据时间间隔分组，并计算平均速度
        grouped_up = df.groupby(pd.Grouper(key="transtime_up", freq=time_interval))
        # 初始化结果DataFrame
        result = pd.DataFrame()
        # 遍历每个分组
        for name, group in grouped_up:
            valid_data = group[group['duration_seconds'] <= group['type_mean_time']]
            # 获取符合条件的数据数量
            valid_count = len(valid_data)
            # 获取该组的总数
            total_count = len(group)
            # 计算百分比
            percentage = (valid_count / total_count * 100) if total_count > 0 else 0
            # 创建一个新的行，并将其添加到结果DataFrame中
            new_row = pd.DataFrame({'Group_Time': [name], 'Valid_Count': [valid_count], 'Total_Count': [total_count], 'Percentage': [percentage]})
            # 使用concat函数合并新的行到结果DataFrame
            result = pd.concat([result, new_row], ignore_index=True)
        output_file_path = os.path.join(self.dir_path, csv_name)  # 替换为实际文件路径
        result.to_csv(output_file_path, index=False, encoding='utf-8')

    def run(self):
        up_path = self.get_files_containing_substring(self.dir_path, 'up_G')
        down_path = self.get_files_containing_substring(self.dir_path, 'down_G')
        all_path = self.get_files_containing_substring(self.dir_path, 'all_G')

        # 获取合并后数据的numpy数组 （0/1/2）无异常值
        df_np = self.get_match_data(all_path)
        # 计算总间隔时长，平均间隔时长， 最大间隔时长，最小间隔时长
        self.ctype_time = self.get_mean_time(df_np)
        # print("最小间隔时长: ", ctype_time)
        m = list(self.ctype_time.keys())

        if 1 in self.ctype_time:
            seconds_min = self.ctype_time[1]['min']
            seconds_average = self.ctype_time[1]['mean']
        else:
            return False

        if seconds_min <= 10:
            return False

        # 估算区间距离(米)
        self.dist = round(self.max_speed / 3.6 * seconds_min, 2)
        print("估算区间距离(米): ", self.dist)

        # 小客车全天通行时间/平均车速分布（最小通行时间的二分之一，取整）
        # time_interval0 = str(int(seconds_average / 60 / 2)) + 'T'
        car_speed_csv_name = "car_time_data.csv"
        # self.get_car_speed(path=all_path, time_interval=self.time_interval, type_flag=1, csv_name=car_speed_csv_name)
        # self.showCarSpeed.read_file(os.path.join(self.dir_path, car_speed_csv_name))

        # 获取上下门架车流量、车流量差值以及总车流量（最小通行时间的二倍，取整）
        # time_interval1 = str(int(seconds_average / 60)) + 'T'
        # self.get_all_flow(up_path=up_path, down_path=down_path, time_interval=time_interval1, csv_name="all_flow_data.csv")

        # 获取慢速车（match_type为1/2/3）数据（10分钟）
        slow_vehicle_csv_name = "car_slow_move_data.csv"
        # self.get_slow_vehicle(all_path=all_path, time_interval=self.time_interval, csv_name=slow_vehicle_csv_name)
        # self.showCarSlowNum.read_file(os.path.join(self.dir_path, slow_vehicle_csv_name))

        # 获取车型-时间散点图
        type_time_csv_name = "time_duration_data.csv"
        # self.get_type_time_change(down_path=down_path, csv_name=type_time_csv_name)
        # self.showTimeDuration.read_file(os.path.join(self.dir_path, type_time_csv_name))

        # 获取上下门架匹配的车流量、车流量差值以及总车流量（）
        # time_interval4 = str(int(seconds_average / 60)) + 'T'
        mate_all_flow_csv_name = "mate_flow_data.csv"
        self.get_mate_all_flow(up_path=up_path, down_path=down_path, time_interval=self.time_interval,
                               csv_name=mate_all_flow_csv_name)
        self.showFlowDiff.read_file(os.path.join(self.dir_path, mate_all_flow_csv_name))

        # 计算通过率
        # car_pass_rate_csv_name = "car_pass_rate_data.csv"
        # self.get_car_pass_rate_data(up_path=up_path, seconds_average=seconds_average,
        #                             time_interval=self.time_interval, csv_name=car_pass_rate_csv_name)
        # self.showPassRate.read_file(os.path.join(self.dir_path, car_pass_rate_csv_name))
        # all_pass_rate_csv_name = "all_pass_rate_data.csv"
        # self.get_all_pass_rate_data(up_path=up_path, time_interval=self.time_interval, csv_name=all_pass_rate_csv_name)
        # self.showPassRate.read_file(os.path.join(self.dir_path, all_pass_rate_csv_name))

        # 获取异常数据
        abnormal_data_csv_name = 'abnormal_data.csv'
        # self.get_abnormal_data(all_path=all_path, csv_name=abnormal_data_csv_name)

        self.clear()
        return True

    def clear(self):
        self.ctype_time = None
        self.dist = None


if __name__ == '__main__':

    # dir_path = r'D:\GJ\项目\事故检测\output\邻垫高速\G004251002000620010,G004251001000320010-20240404'
    # gantry = Gantry(dir_path)
    # gantry.run()

    error = []
    ii = 0
    k = 0
    output_file = r'D:\GJ\项目\事故检测\output\纳黔高速\error.txt'
    last_time = time.time()
    top_dir = r'D:\GJ\项目\事故检测\output\纳黔高速'
    # 使用 os.walk 遍历文件夹
    for root, dirs, files in os.walk(top_dir):
        # root 是当前正在遍历的这个文件夹的本身的路径
        # dirs 是一个 list，内容是该文件夹中所有的目录的名字(不包括子目录)
        # files 同样是 list, 内容是该文件夹中所有的文件(不包括子目录)
        # 输出当前遍历到的文件夹路径
        # print(f"Folder: {root}")
        # 输出该文件夹下的所有子文件夹
        for dir_name in dirs:
            dir_path = os.path.join(root, dir_name)
            print(f"  Directory: {os.path.join(root, dir_name)}")
            k -= 1
            if k <= 0:
                gantry = Gantry(dir_path)
                F = gantry.run()
                now_time = time.time()
                T = now_time - last_time
                last_time = now_time
                ii += 1
                print(ii, T)
                if not F:
                    error.append(dir_name)
                    print('error', dir_name)
        # # 输出该文件夹下的所有文件
        # for file_name in files:
        #     print(f"  File: {os.path.join(root, file_name)}")
        # 打开文件以写入模式
        # with open(output_file, 'w') as file:
        #     # 遍历列表并将每个元素写入文件，每行一个元素
        #     for item in error:
        #         file.write(f"{item}\n")

        break   # 不遍历深层次的文件目录
