import csv
import os
from math import nan

import chardet
import numpy as np

import matplotlib.pyplot as plt
import pandas as pd


def deal_np_data(df_np):
    print(df_np.ndim)

    # 找到不包含值2的行
    rows_to_keep = ~np.any(df_np == 2, axis=1)
    print(rows_to_keep)
    # 使用布尔索引删除指定行
    filtered_arr = df_np[rows_to_keep]
    time_diff_list = []
    for row in filtered_arr:
        time_diff = row[4]
        # 解析时间差字符串
        parts = time_diff.split('m')
        fen = int(parts[0])  # 分钟数
        miao = int(parts[1].split('s')[0])  # 秒数
        # 计算总秒数
        seconds = fen * 60 + miao
        time_diff_list.append([seconds])
    # 假设我们有一个新的列要添加
    new_column = np.array(time_diff_list)
    arr_with_new_column = np.concatenate((filtered_arr, new_column), axis=1)

    return arr_with_new_column

def show(data):
    # 提取数据
    keys = list(data.keys())
    mean_speeds = [d['mean_speed'] for d in data.values() if 'mean_speed' in d]
    time1s = [d['time1'] for d in data.values() if 'time1' in d]
    # 创建图形和坐标轴
    fig, ax = plt.subplots()
    # 绘制柱状图
    bar_width = 0.35
    indices = np.arange(len(keys))

    # 第一组柱状图
    rects1 = ax.bar(indices, mean_speeds, bar_width, label='Mean Speed')

    # 第二组柱状图
    rects2 = ax.bar(indices + bar_width, time1s, bar_width, label='Time1')

    # 在柱子上添加数据标签
    def add_labels(rects):
        for rect in rects:
            height = rect.get_height()
            ax.annotate('{}'.format(height),
                        xy=(rect.get_x() + rect.get_width() / 2, height),
                        xytext=(0, 3),  # 3 points vertical offset
                        textcoords="offset points",
                        ha='center', va='bottom')

    add_labels(rects1)
    add_labels(rects2)

    # 添加标签和标题
    ax.set_xlabel('Keys')
    ax.set_ylabel('Values')
    ax.set_title('Bar Chart of Nested Dictionary Data')
    ax.set_xticks(indices + bar_width / 2)
    ax.set_xticklabels(keys)
    ax.legend()

    # 显示图表
    plt.show()





def get_pipeilv(path):
    # 使用 chardet 检测文件编码
    with open(path, 'rb') as f:
        result = chardet.detect(f.read(10000))
    detected_encoding = result['encoding']
    # print(f"Detected encoding: {detected_encoding}")

    # 使用检测到的编码读取 CSV 文件
    df = pd.read_csv(path, encoding=detected_encoding)
    # print(df.head())
    # a = df.to_dict(orient='records')
    df_np = df.to_numpy()
    total = df_np.shape[0]
    success = 0
    for row in df_np:
        time_diff = row[4]
        if type(time_diff) != str:
            continue
        success += 1
    failed = total - success
    rate = round(((success / total) * 100), 2)

    return rate, total


def get_files_containing_substring(directory, substring):
    file_name = None
    # 遍历指定目录下的所有文件
    for filename in os.listdir(directory):
        # 构建完整的文件路径
        full_path = os.path.join(directory, filename)

        # 检查是否为文件
        if os.path.isfile(full_path):
            # 检查文件名是否包含指定的子字符串
            if substring in filename:
                print(f"文件名包含 '{substring}' 的文件：{full_path}")
                file_name = full_path
    return file_name

def bianli():
    classification_rules = {
        'G004251002000610010': "A",
        'G004251002000620010': "A",
        'G004251001000120010': "C",
        'G004251001000120020': "C",
        'G004251001000110010': "C",
    }
    top_dir = 'D:\\GJ\\项目\\事故检测\\output'
    rate_dict = {}
    # 获取目录下的所有文件和文件夹名
    files_and_dirs = os.listdir(top_dir)
    # 遍历文件和文件夹
    for entry in files_and_dirs:
        full_path = os.path.join(top_dir, entry)
        # if os.path.isfile(full_path):
        #     print(f"文件: {full_path}")
        if os.path.isdir(full_path):
            # index = entry.split('-')[1]
            rate_dict[entry] = {}
            print(f"文件夹: {full_path}")
            file_up = get_files_containing_substring(full_path, 'up')
            up_rate, up_total = get_pipeilv(file_up)
            file_down = get_files_containing_substring(full_path, 'down')
            down_rate, down_total = get_pipeilv(file_down)
            file_all = get_files_containing_substring(full_path, 'all')
            all_rate, all_total = get_pipeilv(file_all)
            rate_dict[entry]['up'] = up_rate
            rate_dict[entry]['down'] = down_rate
            rate_dict[entry]['all'] = all_rate
            rate_dict[entry]['total'] = all_total
            for pattern, area_name in classification_rules.items():
                if pattern in entry:
                    rate_dict[entry]['area'] = area_name
            if 'area' not in rate_dict[entry]:
                rate_dict[entry]['area'] = 'B'
    return rate_dict


def save_csv(dict, filename, index):
    # 获取所有可能的列名
    all_keys = set()
    for sub_dict in dict.values():
        all_keys.update(sub_dict.keys())
    # 将嵌套字典扁平化为行数据
    rows = []
    for key, sub_dict in dict.items():
        row = {index: key}  # 添加索引列
        for k, v in sub_dict.items():
            row[k] = v
        rows.append(row)
    # 添加所有可能的列名
    fieldnames = [index] + sorted(all_keys)
    with open(filename, mode='w', newline='', encoding='utf-8') as file:
        writer = csv.DictWriter(file, fieldnames=fieldnames)
        # 写入表头
        writer.writeheader()
        # 写入行数据
        writer.writerows(rows)


if __name__ == '__main__':
    # df_np = np.array([[1, 2, 3, 4, '2m30s'],
    #                   [2, 6, 7, 8, '1m30s'],
    #                   [9, 10, 11, 12, 100000],
    #                   [13, 14, 15, 16, '2m30s'],
    #                   [17, 18, 19, 20, '2m30s']])
    # print(deal_np_data(df_np))
    # # 示例数据
    # data = {
    #     "a": {"mean_speed": 2, "time1": 2},
    #     "b": {"mean_speed": 1, "time1": 2, "time2": 4, "time3": 4},
    #     "c": {"mean_speed": 1, "time1": 2, "time2": 4}
    # }
    # show(data)
    filename = 'D:\\GJ\\项目\\事故检测\\output\\邻垫高速\\all_rate.csv'
    rate_dict = bianli()
    save_csv(rate_dict, filename, 'Date')

