import csv
import json
import os
import shutil
import time
from PIL import Image
import concurrent.futures
from datetime import datetime, timedelta
from matplotlib import patches, pyplot as plt
from utils import get_direct_subdirectories, get_non_recursive_folders


# 定义拥堵等级映射，用于比较拥堵程度
congestion_level_mapping = {
    "轻度拥堵": 1,
    "中度拥堵": 2,
    "严重拥堵": 3
}


def split_time_intervals(data_list, threshold=600):
    if not data_list:
        return []
    intervals = []
    current_interval = [data_list[0]]

    for i in range(1, len(data_list)):
        # 将时间字符串转换为 datetime 对象
        current_time = datetime.strptime(data_list[i]["时间"], "%Y-%m-%dT%H:%M:%S.%f")
        prev_time = datetime.strptime(data_list[i - 1]["时间"], "%Y-%m-%dT%H:%M:%S.%f")
        # 计算时间差（秒）
        time_diff = (current_time - prev_time).total_seconds()
        if time_diff > threshold:
            # 如果时间差超过阈值，开始一个新的区间
            intervals.append(current_interval)
            current_interval = [data_list[i]]
        else:
            # 否则，将当前数据添加到当前区间
            current_interval.append(data_list[i])

    # 添加最后一个区间
    intervals.append(current_interval)

    result = []
    for interval in intervals:
        # 找出区间内路况的最高拥堵等级
        max_congestion_level = max(congestion_level_mapping[item["路况"]] for item in interval)
        # 根据最大等级值找到对应的路况描述
        max_congestion_status = [k for k, v in congestion_level_mapping.items() if v == max_congestion_level][0]

        start_time = datetime.strptime(interval[0]["时间"], "%Y-%m-%dT%H:%M:%S.%f")
        if len(interval) == 1:
            # 如果区间只有一个时间，加上 15 秒作为结束时间
            end_time = start_time + timedelta(seconds=300)
        else:
            end_time = datetime.strptime(interval[-1]["时间"], "%Y-%m-%dT%H:%M:%S.%f")

        result.append({
            "start_time": start_time.strftime("%Y-%m-%d %H:%M:%S"),
            "end_time": end_time.strftime("%Y-%m-%d %H:%M:%S"),
            "interval": interval,
            "max_congestion_status": max_congestion_status
        })
    return result


def split_event_time_intervals(data_list, threshold=200):
    if not data_list:
        return []
    intervals = []
    current_interval = [data_list[0]]

    for i in range(1, len(data_list)):
        # 将时间字符串转换为 datetime 对象
        current_time = datetime.strptime(data_list[i]["时间"], "%Y-%m-%dT%H:%M:%S.%f")
        prev_time = datetime.strptime(data_list[i - 1]["时间"], "%Y-%m-%dT%H:%M:%S.%f")
        # 计算时间差（秒）
        time_diff = (current_time - prev_time).total_seconds()
        if time_diff > threshold:
            # 如果时间差超过阈值，开始一个新的区间
            intervals.append(current_interval)
            current_interval = [data_list[i]]
        else:
            # 否则，将当前数据添加到当前区间
            current_interval.append(data_list[i])

    # 添加最后一个区间
    intervals.append(current_interval)

    result = []
    for interval in intervals:
        start_time = datetime.strptime(interval[0]["时间"], "%Y-%m-%dT%H:%M:%S.%f")
        if len(interval) == 1:
            # 如果区间只有一个时间，加上 15 秒作为结束时间
            end_time = start_time + timedelta(seconds=60)
        else:
            end_time = datetime.strptime(interval[-1]["时间"], "%Y-%m-%dT%H:%M:%S.%f")

        result.append({
            "start_time": start_time.strftime("%Y-%m-%d %H:%M:%S"),
            "end_time": end_time.strftime("%Y-%m-%d %H:%M:%S"),
            "interval": interval
        })
    return result


def get_result_data(file, dir_path):
    json_path = os.path.join(dir_path, file.split('.')[0] + '.csv')
    if os.path.exists(json_path):
        dict_list = []
        try:
            with open(json_path, mode='r', encoding='utf-8', newline='') as csvfile:
                # 创建 DictReader 对象
                reader = csv.DictReader(csvfile)
                # 逐行读取并转换为字典
                for row in reader:
                    dict_list.append(row)
        except FileNotFoundError:
            print(f"文件 {json_path} 未找到。")
        except Exception as e:
            print(f"读取文件时出现错误: {e}")
        event_list = []
        yongdu_list = []
        for row in dict_list:
            if row['路况'] != '畅通':
                yongdu_list.append({"时间": row["时间"], "路况": row["路况"]})
            if row['事故'] != '畅通':
                event_list.append({"时间": row["时间"], "事故": row["事故"]})

        yongdu_result = split_time_intervals(yongdu_list)
        event_result = split_event_time_intervals(event_list)

        # print(yongdu_result)

        # print(event_result)

        return yongdu_result, event_result


def deal_truth(d_path, folder_B):
    n = 0
    # d_path = r"D:\GJ\项目\事故检测\output0319"
    # folder_B = r'D:\GJ\项目\事故检测\output0311\output19'
    if not os.path.exists(folder_B):
        os.makedirs(folder_B)
    # 构建 JSON 文件的目标路径
    destination_json_path = os.path.join(folder_B, "truth.json")
    data_list = []
    for dir_path in get_direct_subdirectories(d_path):
        time.sleep(0.1)
        n += 1
        print(n)
        # 定义文件夹 A、B 和 C 的路径
        folder_A = dir_path
        # 处理 PNG 图片
        png_folder = os.path.join(folder_A, 'png')
        if os.path.exists(png_folder):
            # 创建文件夹 B 下与 A 同名的文件夹
            folder_B_A = os.path.join(folder_B, "file", os.path.basename(folder_A))
            if not os.path.exists(folder_B_A):
                os.makedirs(folder_B_A)
            for filename in os.listdir(png_folder):
                if filename.endswith('.png'):
                    source_path = os.path.join(png_folder, filename)
                    destination_path = os.path.join(folder_B_A, filename)
                    shutil.copy2(source_path, destination_path)

        # 获取文件夹 A 的名称（不包含路径）
        folder_A_name = os.path.basename(folder_A)
        # 生成对应的 JSON 文件名
        json_filename = f"{folder_A_name}.json"
        # 构建 JSON 文件的源路径
        source_json_path = os.path.join(folder_A, json_filename)
        # # 构建 JSON 文件的目标路径
        # destination_json_path = os.path.join(folder_B, "truth.json")
        # 检查该 JSON 文件是否存在
        if os.path.exists(source_json_path):
            # 复制 JSON 文件
            # shutil.copy2(source_json_path, destination_json_path)
            # 读取原始 JSON 文件内容
            with open(source_json_path, 'r', encoding='utf-8') as f:
                data = json.load(f)
            data["id"] = folder_A_name
            data["truthMark"] = []
            for i in range(len(data["truthValue"])):
                data["truthValue"][i]["eventTime"] = data["truthValue"][i]["eventTime"] + ":00"
                truth_mark = data["truthValue"][i]["eventType"]
                if truth_mark in data["truthMark"]:
                    continue
                else:
                    data["truthMark"].append(truth_mark)
            if len(data["truthMark"]) == 0:
                data["truthMark"].append("未分类")
            data["algorithmMark"] = ["未分类"]

            data_list.append(data)
    # 将更新后的数据写入目标 JSON 文件
    with open(destination_json_path, 'w', encoding='utf-8') as f:
        for data in data_list:
            json.dump(data, f, ensure_ascii=False)
            f.write("\n")


def deal_algorithm(file_path, log_folder_path, version):
    reslut_name_list = ["block", "congestion", "deduction"]
    folder_C = os.path.join(file_path, "algorithm", version)
    if not os.path.exists(folder_C):
        os.makedirs(folder_C)
    for reslut_name in reslut_name_list:
        new_name = os.path.join(folder_C, reslut_name)
        if not os.path.exists(new_name):
            os.makedirs(new_name)
    # 存放新的readme.json文件，其内部为算法检测结果数据
    # json_path = r"D:\GJ\项目\事故检测\output0305_3\algorithm\V2.6\readme.json"
    json_path = os.path.join(file_path, "algorithm", version, "readme.json")
    # log_folder_path中有所有的检测时保存的csv文件，遍历其中的csv文件获取事件的时间数据
    # log_folder_path = r"D:\GJ\项目\事故检测\output0311\0320\save_csv"
    with open(json_path, mode='w', encoding='utf-8', newline='') as f:
        # 遍历文件夹及其子文件夹
        for root, dirs, files in os.walk(log_folder_path):
            for file in files:
                if file.endswith('.csv'):
                    yongdu_result, event_result = get_result_data(file, log_folder_path)
                    json_data = {}
                    json_data["id"] = file.split('.')[0]
                    json_data["algorithmValue"] = []
                    direction = "上行"
                    for yongdu in yongdu_result:
                        ddict = {
                            "startTime": yongdu["start_time"],
                            "endTime": yongdu["end_time"],
                            "direction": direction,
                            "eventType": "拥堵",
                            "level": int(congestion_level_mapping[yongdu["max_congestion_status"]])
                        }
                        json_data["algorithmValue"].append(ddict)
                    for event in event_result:
                        # todo
                        level = 0
                        start_time = datetime.strptime(event["start_time"], "%Y-%m-%d %H:%M:%S")
                        end_time = datetime.strptime(event["end_time"], "%Y-%m-%d %H:%M:%S")
                        dt = (end_time - start_time).total_seconds()
                        if dt < 300:
                            level = 1
                        elif dt < 1800:
                            level = 2
                        else:
                            level = 3
                        ddict = {
                            "startTime": event["start_time"],
                            "endTime": event["end_time"],
                            "direction": direction,
                            "eventType": "阻断",
                            "level": level
                        }
                        json_data["algorithmValue"].append(ddict)

                    json_data["version"] = version
                    json.dump(json_data, f, ensure_ascii=False)
                    f.write("\n")

                time.sleep(0.1)


def deal_event_png(file_path, version, truth_flag=False):
    true_json_path = os.path.join(file_path, "truth.json")
    event_json_path = os.path.join(file_path, "algorithm", version, "readme.json")
    save_path = os.path.join(file_path, "file")
    if not os.path.exists(save_path):
        return

    all_result = {
        "true_result": {},
        "event_result": {},
        "yongdu_result": {}
    }
    color_dict = {1: "cyan", 2: "yellow", 3: "red"}

    with open(true_json_path, 'r', encoding='utf-8') as file:
        for line in file:
            try:
                data = json.loads(line)
                ID = data["id"]
                all_result["true_result"][ID] = {"true": [], "detection": [], "congestion": []}
                for i in data["truthValue"]:
                    event_time = datetime.strptime(i["eventTime"].split(' ')[1], '%H:%M:%S')
                    all_result["true_result"][ID]["true"].append({"eventTime": event_time, "duration": 20, "secLevel": 3})
                for j in data["detectionValue"]:
                    start_time = datetime.strptime(j["startTime"].split(' ')[1], '%H:%M:%S')
                    end_time = datetime.strptime(j["endTime"].split(' ')[1], '%H:%M:%S')
                    duration = int((end_time - start_time).total_seconds() / 60)
                    all_result["true_result"][ID]["detection"].append({"eventTime": start_time, "duration": duration,
                                                                       "secLevel": 2})
                for k in data["congestionValue"]:
                    start_time = datetime.strptime(k["startTime"].split(' ')[1], '%H:%M:%S')
                    end_time = datetime.strptime(k["endTime"].split(' ')[1], '%H:%M:%S')
                    duration = int((end_time - start_time).total_seconds() / 60)
                    all_result["true_result"][ID]["congestion"].append({"eventTime": start_time, "duration": duration,
                                                                        "secLevel": 1})
            except json.JSONDecodeError:
                print(f"解析true_json_path文件行 {line} 时出错。")

    with open(event_json_path, 'r', encoding='utf-8') as file:
        for line in file:
            try:
                data = json.loads(line)
                ID = data["id"]
                all_result["event_result"][ID] = []
                all_result["yongdu_result"][ID] = []
                for j in data["algorithmValue"]:
                    if j["eventType"] == "阻断":
                        start_time = datetime.strptime(j["startTime"].split(' ')[1][:5], '%H:%M')
                        end_time = datetime.strptime(j["endTime"].split(' ')[1][:5], '%H:%M')
                        duration = int((end_time - start_time).total_seconds() / 60)
                        all_result["event_result"][ID].append({"eventTime": start_time, "duration": duration,
                                                               "secLevel": int(j["level"])})
                    else:
                        start_time = datetime.strptime(j["startTime"].split(' ')[1][:5], '%H:%M')
                        end_time = datetime.strptime(j["endTime"].split(' ')[1][:5], '%H:%M')
                        duration = int((end_time - start_time).total_seconds() / 60)
                        all_result["yongdu_result"][ID].append({"eventTime": start_time, "duration": duration,
                                                               "secLevel": int(j["level"])})
            except json.JSONDecodeError:
                print(f"解析event_json_path文件行 {line} 时出错。")

    # 计算时间点位置
    start_time = datetime.strptime("00:00:00", "%H:%M:%S")
    end_time = datetime.strptime("23:59:59", "%H:%M:%S")
    # 将时间转换为相对于起始时间的分钟数，用于定位
    start_min = 0
    end_min = (end_time - start_time).total_seconds() / 60

    folder_names = get_non_recursive_folders(save_path)
    source_path = os.path.dirname(save_path)

    for source, source_data in all_result.items():
        if source == "true_result" and truth_flag:
            for ID, org_info in source_data.items():
                if ID in folder_names:
                    fig, ax = plt.subplots(figsize=(20, 1))
                    ax.add_patch(patches.Rectangle((start_min, 0), end_min - start_min, 1, color='green'))
                    for data1 in org_info["detection"]:
                        level = data1['secLevel']
                        timestamp = data1['eventTime']
                        duration = data1['duration']
                        red_start = (timestamp - start_time).total_seconds() / 60
                        ax.add_patch(patches.Rectangle((red_start, 0), duration, 1, color=color_dict[level]))
                    for data2 in org_info["true"]:
                        level = data2['secLevel']
                        timestamp = data2['eventTime']
                        duration = data2['duration']
                        red_start = (timestamp - start_time).total_seconds() / 60
                        ax.add_patch(patches.Rectangle((red_start, 0), duration, 1, color=color_dict[level]))
                    for data3 in org_info["congestion"]:
                        level = data3['secLevel']
                        timestamp = data3['eventTime']
                        duration = data3['duration']
                        red_start = (timestamp - start_time).total_seconds() / 60
                        ax.add_patch(patches.Rectangle((red_start, 0), duration, 0.5, color=color_dict[level]))
                    # 配置x轴
                    plt.xlim([start_min, end_min])
                    plt.xticks([i * 60 for i in range(25)], [f'{i:02d}:00' for i in range(25)])
                    # 移除y轴和顶部、右侧边框
                    ax.yaxis.set_visible(False)
                    ax.spines['top'].set_visible(False)
                    ax.spines['right'].set_visible(False)
                    ax.spines['left'].set_visible(False)
                    plt.savefig(os.path.join(save_path, ID, "true_result.png"), dpi=200, bbox_inches='tight',
                                transparent=False)
                    plt.close()
                else:
                    print(f"save_path目录下的{ID}文件夹不存在")

        if source == "event_result":
            for ID, org_info in source_data.items():
                if ID in folder_names:
                    fig, ax = plt.subplots(figsize=(20, 1))
                    ax.add_patch(patches.Rectangle((start_min, 0), end_min - start_min, 1, color='green'))
                    for data1 in org_info:
                        level = data1['secLevel']
                        timestamp = data1['eventTime']
                        duration = data1['duration']
                        red_start = (timestamp - start_time).total_seconds() / 60
                        ax.add_patch(patches.Rectangle((red_start, 0), duration, 1, color=color_dict[level]))
                    # 配置x轴
                    plt.xlim([start_min, end_min])
                    plt.xticks([i * 60 for i in range(25)], [f'{i:02d}:00' for i in range(25)])
                    # 移除y轴和顶部、右侧边框
                    ax.yaxis.set_visible(False)
                    ax.spines['top'].set_visible(False)
                    ax.spines['right'].set_visible(False)
                    ax.spines['left'].set_visible(False)
                    save_png_path = os.path.join(source_path, "algorithm", version, "block")
                    if not os.path.exists(save_png_path):
                        os.makedirs(save_png_path)
                    save_png_file = os.path.join(save_png_path, str(ID) + ".png")
                    plt.savefig(save_png_file, dpi=200, bbox_inches='tight', transparent=False)
                    plt.close()
                else:
                    print(f"save_path目录下的{ID}文件夹不存在")

        if source == "yongdu_result":
            for ID, org_info in source_data.items():
                if ID in folder_names:
                    fig, ax = plt.subplots(figsize=(20, 1))
                    ax.add_patch(patches.Rectangle((start_min, 0), end_min - start_min, 1, color='green'))
                    for data1 in org_info:
                        level = data1['secLevel']
                        timestamp = data1['eventTime']
                        duration = data1['duration']
                        red_start = (timestamp - start_time).total_seconds() / 60
                        ax.add_patch(patches.Rectangle((red_start, 0), duration, 1, color=color_dict[level]))
                    # 配置x轴
                    plt.xlim([start_min, end_min])
                    plt.xticks([i * 60 for i in range(25)], [f'{i:02d}:00' for i in range(25)])
                    # 移除y轴和顶部、右侧边框
                    ax.yaxis.set_visible(False)
                    ax.spines['top'].set_visible(False)
                    ax.spines['right'].set_visible(False)
                    ax.spines['left'].set_visible(False)
                    save_png_path = os.path.join(source_path, "algorithm", version, "congestion")
                    if not os.path.exists(save_png_path):
                        os.makedirs(save_png_path)
                    save_png_file = os.path.join(save_png_path, str(ID) + ".png")
                    plt.savefig(save_png_file, dpi=200, bbox_inches='tight', transparent=False)
                    plt.close()
                else:
                    print(f"save_path目录下的{ID}文件夹不存在")


def compress_all_png_images(root_dir):
    png_files = []
    for root, dirs, files in os.walk(root_dir):
        for file in files:
            if file.lower().endswith('.png'):
                file_path = os.path.join(root, file)
                png_files.append(file_path)

    def compress_png_with_pillow(input_path, output_path, colors=256):
        """
        使用 Pillow 库压缩 PNG 图片
        :param input_path: 输入图片的路径
        :param output_path: 输出图片的路径
        :param colors: 调色板的颜色数量，默认 256
        """
        try:
            # 打开图片
            image = Image.open(input_path)
            # 转换为调色板模式
            image = image.convert('P', palette=Image.ADAPTIVE, colors=colors)
            # 保存图片
            image.save(output_path, 'PNG')
            print(f"使用 Pillow 压缩后的图片已保存到 {output_path}")
        except Exception as e:
            print(f"使用 Pillow 压缩图片时出错: {e}")

    # 创建一个最大线程数为 10 的线程池
    with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
        # 提交任务到线程池
        futures = [executor.submit(compress_png_with_pillow, file_path, file_path) for file_path in png_files]
        # 等待所有任务完成
        concurrent.futures.wait(futures)


if __name__ == "__main__":
    dir_path = r"D:\GJ\项目\事故检测\output0321"
    file_path = r'D:\GJ\项目\事故检测\output0311\0324\file'
    # log_folder_path中有所有的检测时保存的csv文件，遍历其中的csv文件获取事件的时间数据
    log_folder_path = r"D:\GJ\项目\事故检测\output0311\0324\save_csv"

    # 处理真值数据，获取truth.json真值文件，并将离线数据分析中的正常特征图片文件保存到file对应的文件夹下，以便前端界面正确找到图片
    deal_truth(dir_path, file_path)
    print("真值及特征图另存为完成！")
    # 当前算法版本
    version = "V2.1"
    # 处理算法输出日志json数据，获取算法readme.json文件
    deal_algorithm(file_path, log_folder_path, version=version)
    print("算法readme.json文件已生成！")
    # 创建真值特征提取的事件时间图片，算法输出事件信息的时间图片， truth_flag标志为是否生成真值特征图片
    deal_event_png(file_path, version=version, truth_flag=True)
    print("png保存完成！")
    # 压缩文件夹下的图片
    root_directory_file = os.path.join(file_path, "file")
    compress_all_png_images(root_directory_file)
    root_directory_version = os.path.join(file_path, "algorithm", version)
    compress_all_png_images(root_directory_version)
    print("图片压缩完成！")