import json
import os
from datetime import datetime
from collections import defaultdict
import csv
from utils import get_timestamp_milliseconds, get_direct_subdirectories, get_all_files


def get_day_data(original_file_path, new_file_path):
    result_all = {}
    with open(original_file_path, 'r', encoding='utf-8') as f:
        start_time = None
        while True:
            line = f.readline().strip()
            if line:
                result = {}
                data = json.loads(line)
                if 'ip' in data.keys():
                    result['sn'] = data['sn']
                    result['plateNumber'] = data['plateNumber']
                    result['timestamp'] = data['timestamp']
                    result['vehicleType'] = data['vehicleType']
                    result['laneNo'] = data['laneNo']
                    timestamp_milliseconds = get_timestamp_milliseconds(data['timestamp'])
                    if timestamp_milliseconds not in result_all.keys():
                        result_all[timestamp_milliseconds] = [result]
                    else:
                        result_all[timestamp_milliseconds].append(result)

                else:
                    result['sn'] = data['sn']
                    result['plateNumber'] = data['picLicense']
                    result['timestamp'] = data['externalTrigger']['timestamp']
                    result['vehicleType'] = data['triggerType']
                    result['laneNo'] = data['laneNum']
                    timestamp_milliseconds = get_timestamp_milliseconds(result['timestamp'])
                    if timestamp_milliseconds not in result_all.keys():
                        result_all[timestamp_milliseconds] = [result]
                    else:
                        result_all[timestamp_milliseconds].append(result)
            else:
                break

    sorted_v = sorted(result_all.keys())
    with open(new_file_path, 'a', encoding='utf-8') as f:
        for one in sorted_v:
            data_one_list = result_all[one]
            for data_one in data_one_list:
                f.write(str(data_one).replace("'", '"') + "\n")


def split_json_by_date(file_path):
    # 假设 JSON 文件路径
    # file_path = "/home/gj/kako.json"
    # 初始化字典，用于按日期分组存储数据
    date_to_data = defaultdict(list)
    # 逐行读取文件
    with open(file_path, "r", encoding="utf-8") as file:
        for line in file:
            # 解析每一行的 JSON 数据
            data = json.loads(line.strip())
            # 提取 timestamp 字段
            timestamp = data.get("timestamp")
            if timestamp:
                # 解析日期（如 "2025-02-01T12:21:27.000" -> "2025-02-01"）
                date = timestamp.split("T")[0]
                # 将数据按日期分组
                date_to_data[date].append(data)
    # 保存为单天文件
    for date, data_list in date_to_data.items():
        # 生成文件名（如 "2025-02-01.json"）
        output_file = os.path.join(r"D:\下载\飞书文件下载\20250319\timestamp", str(date)+".json")
        # output_file = f"D:\下载\飞书文件下载\20250319\timestamp\{date}.json"
        # 将数据写入文件
        with open(output_file, "w", encoding="utf-8") as f:
            for data in data_list:
                f.write(json.dumps(data, ensure_ascii=False) + "\n")
        print(f"已保存文件: {output_file}")


def split_json_by_date_ip(file_path, ip_sn):
    # 获取文件名（包含扩展名）
    file_name_with_extension = os.path.basename(file_path)
    # 分离文件名和扩展名
    file_name, _ = os.path.splitext(file_name_with_extension)
    ip = file_name.split("_")[3]
    sn = ip_sn.get(ip)
    # 假设 JSON 文件路径
    # file_path = "/home/gj/kako.json"
    # 初始化字典，用于按日期分组存储数据
    date_to_data = defaultdict(list)
    # 逐行读取文件
    with open(file_path, "r", encoding="utf-8") as file:
        for line in file:
            # 解析每一行的 JSON 数据
            data = json.loads(line.strip())
            # 提取 timestamp 字段
            externalTrigger = data.get("externalTrigger")
            timestamp = externalTrigger.get("timestamp")
            if timestamp:
                # 解析日期（如 "2025-02-01T12:21:27.000" -> "2025-02-01"）
                date = timestamp.split("T")[0]
                # 将数据按日期分组
                date_to_data[date].append(data)
    # 保存为单天文件
    for date, data_list in date_to_data.items():
        # 生成文件名（如 "2025-02-01.json"）
        output_file = f"/home/gj/Download/新建文件夹/TLS/timestamp/{date}.json"
        # 将数据写入文件
        with open(output_file, "a", encoding="utf-8") as f:
            for data in data_list:
                data["sn"] = sn
                f.write(json.dumps(data, ensure_ascii=False) + "\n")
        print(f"已保存文件: {output_file}")


def split_json_by_sn(daily_file_path, csv_path):
    # 获取文件名（包含扩展名）
    file_name_with_extension = os.path.basename(daily_file_path)
    # 分离文件名和扩展名
    file_name, _ = os.path.splitext(file_name_with_extension)
    # 初始化字典，用于按 "sn" 分组存储数据
    sn_to_data = defaultdict(list)

    # 读取单天的 JSON 文件
    with open(daily_file_path, "r", encoding="utf-8") as file:
        for line in file:
            # 解析每一行的 JSON 数据
            data = json.loads(line.strip())
            # 按 "sn" 字段分组
            sn = data.get("sn")
            if sn:
                sn_to_data[sn].append(data)
    sn_folder = os.path.join(csv_path, "gantry", file_name)
    if not os.path.exists(sn_folder):
        os.makedirs(sn_folder)
    # 遍历每组数据，保存为 CSV 文件
    for sn, data_list in sn_to_data.items():
        # 生成 CSV 文件名（如 "HW3_2025-02-01.csv"）
        csv_file_path = os.path.join(sn_folder, f"{sn}.csv")
        # csv_file_path = f"{sn}_2025-02-01.csv"
        # 定义 CSV 表头
        headers = ["gantryid", "vlp", "feevehicletype", "transtime", "laneNo"]
        # 写入 CSV 文件
        with open(csv_file_path, "w", encoding="utf-8", newline="") as csvfile:
            writer = csv.DictWriter(csvfile, fieldnames=headers)
            # 写入表头
            writer.writeheader()
            # 写入数据
            for data in data_list:
                if "cameraNum" in data.keys():
                    vlp = data.get("picLicense")
                    # if vlp == "默A00000":
                    #     continue
                    row = {
                        "gantryid": data.get("sn"),
                        "vlp": vlp,
                        "feevehicletype": data.get("vehFlag"),
                        "transtime": data["externalTrigger"]["timestamp"],
                        "laneNo": data.get("laneNum"),
                    }
                else:
                    vlp = data.get("plateNumber")
                    if vlp == "车牌":
                        vlp = "默A00000"
                    row = {
                        "gantryid": data.get("sn"),
                        "vlp": vlp,
                        "feevehicletype": data.get("vehicleType"),
                        "transtime": data.get("timestamp"),
                        "laneNo": data.get("laneNo"),
                    }
                writer.writerow(row)
        print(f"已保存文件: {csv_file_path}")


def merge_csv_files(folder_path, day, file1_path, file2_path):
    # 假设两个 CSV 文件路径
    up_path = os.path.join(folder_path, day, file1_path)
    down_path = os.path.join(folder_path, day, file2_path)
    up_sn = file1_path.split(".")[0]
    dowm_sn = file2_path.split(".")[0]
    # timee = os.path.basename(folder_path)
    timee3 = day.replace("-", "")
    out_name = f"{up_sn},{dowm_sn}-{timee3}.csv"
    # 新文件的路径
    output_file_path = os.path.join(os.path.dirname(folder_path), "up_down", day, out_name)
    if not os.path.exists(os.path.dirname(output_file_path)):
        os.makedirs(os.path.dirname(output_file_path))

    # 读取两个文件的内容
    file1_data = []
    file2_data = []

    # 读取第一个文件
    with open(up_path, "r", encoding="utf-8") as file1:
        reader = csv.reader(file1)
        # file1_data = list(reader)  # 读取所有行
        for row in reader:
            # 检查第二列是否为“车牌”，如果不是则添加到列表中
            # if len(row) > 1 and row[1] != "车牌":
            #     file1_data.append(row)
            if len(row) > 1:
                file1_data.append(row)

    # 读取第二个文件
    with open(down_path, "r", encoding="utf-8") as file2:
        reader = csv.reader(file2)
        for row in reader:
            # 检查第二列是否为“车牌”，如果不是则添加到列表中
            # if len(row) > 1 and row[1] != "车牌":
            #     file2_data.append(row)
            if len(row) > 1:
                file2_data.append(row)
        # file2_data = list(reader)  # 读取所有行

    # 合并内容
    # 表头取第一个文件的表头，其他内容按顺序拼接
    merged_data = file1_data + file2_data[1:]  # 跳过第二个文件的表头

    # 保存为新的 CSV 文件
    with open(output_file_path, "w", encoding="utf-8", newline="") as output_file:
        writer = csv.writer(output_file)
        writer.writerows(merged_data)  # 写入所有行

    print(f"文件已合并并保存为: {output_file_path}")


# f_path = "/home/gj/Download/新建文件夹/TLS/topic_kako_data_20250206-120002_20250224-120002.json"
# split_json_by_date(f_path)
#
# csv_path = r"D:\GJ\项目\事故检测\模拟数据\sn_ip_1"
# directory = r"D:\GJ\项目\事故检测\模拟数据\timestamp_all"
# if not os.path.exists(csv_path):
#     os.makedirs(csv_path)
# for file_path in get_all_files(directory, extension=".json"):
#     daily_file_path = file_path
#     split_json_by_sn(daily_file_path, csv_path)

ip_sn = {
    "10.143.187.150": "TLS_3_1",
    "10.143.187.154": "TLS_3_2",
    "10.143.187.158": "TLS_4_1",
    "10.143.187.162": "TLS_4_2",
    "10.143.187.166": "TLS_1_1",
    "10.143.187.169": "TLS_1_2",
    "10.143.187.172": "TLS_2_1",
    "10.143.187.175": "TLS_2_2"
}
ip_sn_2 = {
    "150": "TLS_3_1",
    "154": "TLS_3_2",
    "158": "TLS_4_1",
    "162": "TLS_4_2",
    "166": "TLS_1_1",
    "169": "TLS_1_2",
    "172": "TLS_2_1",
    "175": "TLS_2_2"
}
ip_path = r'/home/gj/Download/新建文件夹/TLS/topic_kako_data_175_20250206-120002_20250224-120002.json'
# split_json_by_date_ip(ip_path, ip_sn_2)


sn_qujian = [
    ["HW7.csv", "TLS_1_1.csv"],
    ["TLS_1_1.csv", "TLS_1_2.csv"],
    ["TLS_1_2.csv", "TLS_2_1.csv"],
    ["TLS_2_1.csv", "TLS_2_2.csv"],
    ["TLS_2_2.csv", "ZW4.csv"],
    ["ZW4.csv", "ZW3.csv"],
    ["ZW3.csv", "HW3.csv"],
    ["HW1.csv", "ZW1.csv"],
    ["ZW1.csv", "ZW2.csv"],
    ["ZW2.csv", "TLS_3_1.csv"],
    ["TLS_3_1.csv", "TLS_3_2.csv"],
    ["TLS_3_2.csv", "TLS_4_1.csv"],
    ["TLS_4_1.csv", "TLS_4_2.csv"],
    ["TLS_4_2.csv", "HW6.csv"]
]

# directory = "/home/gj/Download/新建文件夹/TLS/sn_ip"
# for file_path in get_direct_subdirectories(directory):
#     print(file_path)
    # for file1_path, file2_path in sn_qujian:
    #     merge_csv_files(file_path, file1_path, file2_path)
    #     print(file1_path, file2_path)

# new_file = r'/home/gj/Download/新建文件夹/TLS/new'
# original_file = r'/home/gj/Download/新建文件夹/TLS/timestamp'
# for original_file_path, filename in get_all_files(original_file, '.json'):
#     new_file_path = os.path.join(new_file, filename)
#     get_day_data(original_file_path, new_file_path)

# def first_column_generator(filename):
#     with open(filename, newline='', encoding='utf-8') as csvfile:
#         csvreader = csv.reader(csvfile, delimiter=',')
#         for row in csvreader:
#             yield row[0] if row else None  # 如果行为空，则返回None或根据需要处理空行的情况
#
# for value in first_column_generator('/home/gj/Download/新建文件夹/sn_ip/HW1,ZW1-20250124.csv'):
#     print(value)  # 打印第一列的每个值

# path = r'D:\GJ\项目\事故检测\模拟数据\kako_data.log'
# save_path = r"D:\GJ\项目\事故检测\模拟数据\kako_data.json"
# with open(save_path, 'w', encoding='utf-8') as ff:
#     with open(path, 'r', encoding='utf-8') as f:
#         for line in f:
#             data = json.loads(line)
#             data = json.loads(data)
#             data_ = json.dumps(data, ensure_ascii=False)
#             ff.write(data_ + '\n')

# 不需要
# split_json_by_date(save_path)

# csv_path = r"D:\GJ\项目\事故检测\模拟数据\sn_ip"
# directory = r"D:\GJ\项目\事故检测\模拟数据\timestamp"
# if not os.path.exists(csv_path):
#     os.makedirs(csv_path)
# if not os.path.exists(directory):
#     os.makedirs(directory)
# for file_path in get_all_files(directory, extension=".json"):
#     daily_file_path = file_path
#     split_json_by_sn(daily_file_path, csv_path)
#
# directory = r"D:\GJ\项目\事故检测\模拟数据\sn_ip"
# for file_path in get_direct_subdirectories(directory):
#     print(file_path)
#     for file1_path, file2_path in sn_qujian:
#         merge_csv_files(file_path, file1_path, file2_path)
#         print(file1_path, file2_path)
