import base64
import csv
import json
import os
from collections import defaultdict
from datetime import datetime, timedelta
import pandas as pd
from utils import parse_time, save_dict_csv, save_csv
from 相机位置 import KAKO_DATA_LIST_UP, KAKO_DATA_LIST_DOWN
from optimization_data import main_data


def list_files(directory):
    result = []
    for root, dirs, files in os.walk(directory):
        for file in files:
            file_path = os.path.join(root, file)
            # print(file_path)
            result.append(file_path)
    return result


def get_all_files(directory, extension=None):
    """
    获取目录下所有文件的路径。

    :param directory: 要遍历的目录路径。
    :param extension: 可选参数，指定要匹配的文件扩展名（如 '.txt'）。
    """
    for dirpath, _, filenames in os.walk(directory):
        for filename in filenames:
            if extension is None or filename.endswith(extension):
                full_path = os.path.join(dirpath, filename)
                yield full_path


def split_json_by_sn(daily_file_path, csv_path):
    # 获取文件名（包含扩展名）
    file_name_with_extension = os.path.basename(daily_file_path)
    # 分离文件名和扩展名
    file_name, _ = os.path.splitext(file_name_with_extension)
    # 初始化字典，用于按 "sn" 分组存储数据
    sn_to_data = defaultdict(list)

    # 读取单天的 JSON 文件
    with open(daily_file_path, "r", encoding="utf-8") as file:
        for line in file:
            # 解析每一行的 JSON 数据
            data = json.loads(line.strip())
            # 按 "sn" 字段分组
            sn = data.get("sn")
            if sn:
                sn_to_data[sn].append(data)
    sn_folder = os.path.join(csv_path, file_name)
    if not os.path.exists(sn_folder):
        os.makedirs(sn_folder)
    # 遍历每组数据，保存为 CSV 文件
    for sn, data_list in sn_to_data.items():
        # 生成 CSV 文件名（如 "HW3_2025-02-01.csv"）
        csv_file_path = os.path.join(sn_folder, f"{sn}.csv")
        # csv_file_path = f"{sn}_2025-02-01.csv"
        # 定义 CSV 表头
        headers = ["gantryid", "vlp", "feevehicletype", "transtime", "laneNo"]
        # 写入 CSV 文件
        with open(csv_file_path, "w", encoding="utf-8", newline="") as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=headers)
            # 写入表头
            writer.writeheader()
            # 写入数据
            for data in data_list:
                if "cameraNum" in data.keys():
                    vlp = data.get("picLicense")
                    # if vlp == "默A00000":
                    #     continue
                    row = {
                        "gantryid": data.get("sn"),
                        "vlp": vlp,
                        "feevehicletype": data.get("vehFlag"),
                        "transtime": data["externalTrigger"]["timestamp"],
                        "laneNo": data.get("laneNum"),
                    }
                else:
                    vlp = data.get("plateNumber")
                    if vlp == "车牌":
                        vlp = "默A00000"
                    row = {
                        "gantryid": data.get("sn"),
                        "vlp": vlp,
                        "feevehicletype": data.get("vehicleType"),
                        "transtime": data.get("timestamp"),
                        "laneNo": data.get("laneNo"),
                    }
                writer.writerow(row)
        print(f"已保存文件: {csv_file_path}")


def save_all_csv(G_list, SAVE=False, PATH=None):
    result_dict = {}
    gid_list = []
    lists = [v for v in G_list]
    # 遍历每个列表中的每个字典
    for sublist in lists:
        if sublist:
            gid_list.append(sublist[0]["gantryid"])
            for item in sublist:
                vlp_value = item.get("vlp")  # 获取"vlp"键对应的值
                gantryid_value = item.get("gantryid")
                transtime_value = parse_time(item.get("transtime"))
                if vlp_value is not None:
                    # 如果"vlp"键的值已经在结果字典中存在，则合并或者覆盖，取决于你的需求
                    if vlp_value in result_dict.keys():
                        new_flag = True
                        for i in range(len(result_dict[vlp_value])):
                            value = result_dict[vlp_value][i]
                            if gantryid_value in value.keys():
                                if timedelta(minutes=-30) <= (transtime_value-value[gantryid_value]) <= timedelta(minutes=30):
                                    new_flag = False
                                    break
                            else:
                                transtime_0 = list(value.keys())[-1]
                                if timedelta(minutes=-30) <= (transtime_value-value[transtime_0]) <= timedelta(minutes=30):
                                    result_dict[vlp_value][i][gantryid_value] = transtime_value
                                    new_flag = False
                                    break
                        if new_flag:
                            result_dict[vlp_value].append({gantryid_value: transtime_value})
                    else:
                        # 如果"vlp"键的值不在结果字典中，则直接添加
                        result_dict[vlp_value] = []
                        result_dict[vlp_value].append({gantryid_value: transtime_value})

    if SAVE:
        save_dict_csv(result_dict, gid_list, PATH)

    return result_dict


def deal_source_data(directory_to_search):
    data_00 = []
    data_dict = {}
    file_list = list_files(directory_to_search)
    up_list = list(KAKO_DATA_LIST_UP.keys())
    down_list = list(KAKO_DATA_LIST_DOWN.keys())
    for up in up_list:
        data_dict[up] = []
    for down in down_list:
        data_dict[down] = []
    for file_path in file_list:
        df = pd.read_csv(file_path)
        data_dict_list = df.to_dict('records')
        for data in data_dict_list:
            if data['vlp'][:2] in ["车牌", "默A"]:
                data_00.append(data)
            elif data['gantryid'] in up_list:
                data_dict[data['gantryid']].append(data)
            elif data['gantryid'] in down_list:
                data_dict[data['gantryid']].append(data)

    return data_dict, data_00


def main(directory, output_folder):
    all_data_dict, data_00 = deal_source_data(directory)

    up_list = list(KAKO_DATA_LIST_UP.keys())
    list_of_lists_up = []
    for up in up_list:
        list_of_lists_up.append(all_data_dict[up])
    down_list = list(KAKO_DATA_LIST_DOWN.keys())
    list_of_lists_down = []
    for down in down_list:
        list_of_lists_down.append(all_data_dict[down])

    result_dict_up = save_all_csv(list_of_lists_up, SAVE=True, PATH=os.path.join(output_folder, "output_up.csv"))

    result_dict_down = save_all_csv(list_of_lists_down, SAVE=True, PATH=os.path.join(output_folder, "output_down.csv"))

    save_csv(data_00, os.path.join(output_path, 'output_默A.csv'))


if __name__ == '__main__':
    """
    主函数，输入全天全区间门架的json文件地址file_path，如2025-02-20.json
    """

    # 存放一天的卡口数据（json文件）
    directory = r"D:\GJ\项目\事故检测\模拟数据\timestamp_all"
    # 一天数据预处理
    csv_path = r"D:\GJ\项目\事故检测\模拟数据\sn_ip_1"
    if not os.path.exists(csv_path):
        os.makedirs(csv_path)
    for file_path in get_all_files(directory, extension=".json"):
        daily_file_path = file_path
        split_json_by_sn(daily_file_path, csv_path)

    # 保存上下游数据
    # f_list = ['2025-01-25', '2025-01-26', '2025-01-27', '2025-01-28', '2025-01-29', '2025-01-30', '2025-01-31']
    # f_list = ['2025-01-24', '2025-01-25', '2025-01-26', '2025-01-27', '2025-01-28', '2025-01-29', '2025-01-30', '2025-01-31', '2025-02-01', '2025-02-02', '2025-02-03', '2025-02-04', '2025-02-05', '2025-02-06']
    # f_list = ['2025-02-07', '2025-02-08', '2025-02-09', '2025-02-10', '2025-02-11', '2025-02-12', '2025-02-13',
    #           '2025-02-14', '2025-02-15', '2025-02-16', '2025-02-17', '2025-02-18', '2025-02-19', '2025-02-20',
    #           '2025-02-20', '2025-02-21', '2025-02-22', '2025-02-23', '2025-02-24']
    # f_list = ['2025-02-15', '2025-02-16', '2025-02-17', '2025-02-18', '2025-02-19', '2025-02-20',
    #           '2025-02-20', '2025-02-21', '2025-02-22', '2025-02-23', '2025-02-24']
    f_list = ['2025-02-20']
    for ff in f_list:
        # 示例：遍历当前目录下的 'example_folder' 文件夹
        directory_to_search = os.path.join(csv_path, ff)
        output_path = os.path.join(csv_path, ff)
        if not os.path.exists(output_path):
            os.makedirs(output_path)

        main(directory_to_search, output_path)

        # 处理output_up.csv和output_down.csv
        # 上行， 处理output_up.csv
        main_data(output_path, output_path, True)
        # 下行， 处理output_down.csv
        main_data(output_path, output_path, False)



