#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：大路网网格化仿真
@File ：utils.py
@IDE ：PyCharm
@Author ：xinyngjie
@Date ：2024/12/26 11:11
'''

import csv
import os
import shutil
from datetime import datetime
from math import *
import chardet
import pandas as pd
from 相机位置 import Format


def get_encoding(input_file):
    """
    查看文件编码格式
    :param input_file: 文件地址
    :return: 文件的编码格式
    """
    # 检测文件编码
    with open(input_file, 'rb') as file:
        raw_data = file.read()
        result = chardet.detect(raw_data)
        encoding = result['encoding']
        # print(encoding)
        return encoding


# def convert_encoding_with_backup(file_path, from_encoding, to_encoding='utf-8'):
#     """
#     将文件备份后，文件编码更改为utf-8
#     :param file_path: 待修改的文件地址
#     :param from_encoding: 非utf-8编码的编码类型
#     :param to_encoding: 修改后的编码类型，默认：utf-8
#     :return:
#     """
#     backup_file_path = file_path + '.bak'
#     try:
#         # 先备份原文件
#         shutil.copy2(file_path, backup_file_path)
#         print(f"备份文件已保存为 {backup_file_path}")
#         # 创建临时文件路径
#         temp_file_path = file_path + '.tmp'
#         try:
#             # 使用 with 语句确保文件正确关闭
#             with open(file_path, 'r', encoding=from_encoding) as infile, \
#                     open(temp_file_path, 'w', encoding=to_encoding) as outfile:
#                 for line in infile:
#                     outfile.write(line)
#             # 替换原文件（原子操作）
#             shutil.move(temp_file_path, file_path)
#             print(f"大文件已成功从 {from_encoding} 转换为 {to_encoding} 并覆盖原文件")
#         except Exception as e:
#             print(f"发生错误: {e}")
#             if os.path.exists(temp_file_path):
#                 os.remove(temp_file_path)  # 清理临时文件
#         print("编码转换完成，原文件已被覆盖")
#     except Exception as e:
#         print(f"发生错误: {e}")


def parse_time(time_str):
    """
    时间字符串转为datetime时间
    :param time_str:
    :return:
    """
    try:
        return datetime.strptime(time_str, Format)
    except (ValueError, TypeError):
        return None


def pre(dist_t):
    """
    数据预处理
    :param dist_t: 数据
    :return:
    """
    new_dict = {}
    for key, value in dist_t.items():
        for i in range(len(value)):
            if i == 0:
                new_dict[key] = value[i]
            else:
                new_key = key + f"-{i}"
                new_dict[new_key] = value[i]
    return new_dict


def csv_to_dict(input_file):
    """
    解析csv文件，获取csv数据
    :param input_file: 待解析的csv文件地址
    :return: csv数据
    """
    dict_data = {}
    # 读取CSV文件，指定正确的编码格式（如果需要）
    encoding = get_encoding(input_file)
    try:
        data = pd.read_csv(input_file, encoding=encoding)
    except UnicodeDecodeError:
        print("无法读取文件")
        raise
    # 将DataFrame转换为字典
    # orient 参数可以是 'list', 'series', 'dict', 'records', 'split', 'index'
    data_dict = data.to_dict(orient='records')
    # 打印转换后的字典
    for item in data_dict:
        if item['vlp'] not in dict_data.keys():
            dict_data[item['vlp']] = []
        # 遍历原始字典中的其他键值对，忽略 NaN 或 None 的值
        data_v = {}
        for key, value in item.items():
            if key != 'vlp' and pd.notna(value) and value is not None:
                data_v[key] = value
        dict_data[item['vlp']].append(data_v)
    # print(dict_data)
    return dict_data


def save_dict_csv(result_dict, gid_list, path):
    """
    保存指定类型的数据到csv文件中
    :param result_dict: 待保存的指定类型的数据
    :param path: csv文件地址
    :return:
    """
    # 准备数据以便写入CSV
    # 我们需要确保所有的记录都是一维的（即每个字典代表一行）
    flattened_data = []
    for vlp, items in result_dict.items():
        if isinstance(items, list):
            for item in items:
                item_with_vlp = item.copy()
                for key, value in item_with_vlp.items():
                    if isinstance(value, datetime):
                        # 如果值是datetime对象，则使用strftime转换为字符串
                        item_with_vlp[key] = value.strftime(Format)[:-3]
                item_with_vlp["vlp"] = vlp.split('-')[0]
                flattened_data.append(item_with_vlp)
        else:
            item_with_vlp = items.copy()
            for key, value in item_with_vlp.items():
                if isinstance(value, datetime):
                    # 如果值是datetime对象，则使用strftime转换为字符串
                    item_with_vlp[key] = value.strftime(Format)[:-3]
            item_with_vlp["vlp"] = vlp.split('-')[0]
            flattened_data.append(item_with_vlp)
    # 获取所有可能的列名（假设所有字典有相同的结构）
    fieldnames = ["vlp"] + gid_list
    for item in flattened_data:
        for key in item.keys():
            if key not in fieldnames:
                fieldnames.append(key)

    # 写入CSV文件
    with open(path, 'w', newline='', encoding='utf-8') as csvfile:
        writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
        # 写入头信息
        writer.writeheader()
        # 写入数据行
        for row in flattened_data:
            writer.writerow(row)


def save_csv(data, csv_file_path):
    """
    保存字典列表到csv文件中
    :param data: 待保存的字典列表数据
    :param csv_file_path: csv文件地址
    :return:
    """
    # print(data[:10])
    # 表头（根据字典中的键来确定）
    fieldnames = data[0].keys() if data else []
    # 写入 CSV 文件
    try:
        with open(csv_file_path, mode='w', newline='', encoding='utf-8') as file:
            writer = csv.DictWriter(file, fieldnames=fieldnames)
            # 写入表头
            writer.writeheader()
            # 写入数据行
            for item in data:
                writer.writerow(item)
        print(f"数据已成功写入到 {csv_file_path}")
    except Exception as e:
        print(f"写入CSV文件时发生错误: {e}")


def sorted_data(input_file, output_file):
    """
    将csv文件中的时间按顺序重新排列
    :param input_file: 待排序的csv文件地址
    :param output_file: 排序完成的csv文件地址
    :return:
    """
    # 读取CSV文件，假设第一行为列名
    data = pd.read_csv(input_file, encoding=get_encoding(input_file))

    # 定义一个函数来获取第一个非空时间
    def get_first_non_empty_time(row):
        for time in row[1:]:
            if pd.notna(time) and isinstance(time, str):  # 确保是字符串类型
                return time
        return None

    # 应用函数获取每个行的第一个非空时间
    data['first_non_empty_time'] = data.apply(get_first_non_empty_time, axis=1)
    # 将时间字符串转换为datetime对象
    data['first_non_empty_time'] = pd.to_datetime(data['first_non_empty_time'], format='%Y-%m-%d %H:%M:%S.%f',
                                                  errors='coerce')
    # 按时间排序，忽略无效的时间（NaT）
    data = data.sort_values(by='first_non_empty_time')
    # 删除临时列
    data.drop(columns=['first_non_empty_time'], inplace=True)
    # 保存排序后的数据到新的CSV文件
    data.to_csv(output_file, index=False)

    print(f"Sorted data has been saved to {output_file}")


def n_calc_time_match_point(t, c, max_diff_minutes):
    """
    根据计划插入的时间 t 和期望时间 c 计算线性评分。

    :param t: 计划插入的时间 (datetime 对象)
    :param c: 期望时间 (datetime 对象)
    :param max_diff_minutes: 最大允许的时间差（分钟），超过此值分数为0，默认30分钟
    :return: 评分（0到100之间的整数）
    """
    # 计算时间差的绝对值
    time_diff = abs((t - c).total_seconds())  # 将时间差转换为秒
    max_diff = abs(max_diff_minutes.total_seconds()) * 2.0

    if time_diff > max_diff:
        return 0

    # 线性计算分数
    score = 50 - (time_diff / max_diff) * 50
    result = round(float(score), 3)
    return result


def n_calc_plate_match_point(plate1, plate2, type_):
    """
    计算两个车牌的匹配率
    :param plate1: 车牌1 字符串
    :param plate2: 车牌2 字符串
    :param type_: 否启用模糊匹配
            type_ = 0 表示不采取模糊匹配：这意味着只有当两个车牌号码的字符完全相同时，才会认为它们是匹配的
            type_ = 1 表示采取模糊匹配：在这种模式下，函数会考虑到一些常见的字符误读或书写错误
    :return: 匹配分数 大于等于72认为匹配成功
    """
    if not plate1 or not plate2:
        return 0

    # Determine the minimum length of the two plates
    platelength = min(len(plate1), len(plate2))

    if platelength > 4 and "未识别" not in (plate1, plate2) and "默A00000" not in (plate1, plate2):
        match_points = 0
        nonmatchnum = 0

        for i in range(platelength):
            if plate1[i] == plate2[i]:
                match_points += 100
            else:
                nonmatchnum += 1
                if type_ == 1:  # Fuzzy matching
                    if (plate1[i] in '1Ii' and plate2[i] in '1Ii'):
                        match_points += 100
                        if nonmatchnum > 0:
                            nonmatchnum -= 1
                    elif (plate1[i] in '0DQ' and plate2[i] in '0DQ') or \
                            (plate1[i] in '0DU' and plate2[i] in '0DU') or \
                            (plate1[i] in '0O' and plate2[i] in '0O'):
                        match_points += 75
                    elif (plate1[i] in '2Z' and plate2[i] in '2Z'):
                        match_points += 70
                    elif (plate1[i] in '5S' and plate2[i] in '5S') or \
                            (plate1[i] in '8B' and plate2[i] in '8B'):
                        match_points += 65
                    elif (plate1[i] in 'EF' and plate2[i] in 'EF') or \
                            (plate1[i] in 'G0' and plate2[i] in 'G0') or \
                            (plate1[i] in 'G6' and plate2[i] in 'G6') or \
                            (plate1[i] in 'TY' and plate2[i] in 'TY') or \
                            (plate1[i] in '17T' and plate2[i] in '17T') or \
                            (plate1[i] in 'Z7' and plate2[i] in 'Z7') or \
                            (plate1[i] in 'VY' and plate2[i] in 'VY') or \
                            (plate1[i] in '4A' and plate2[i] in '4A') or \
                            (plate1[i] in 'HN' and plate2[i] in 'HN'):
                        match_points += 30
                    elif i == 0:
                        if (plate1[i] in '浙湘' and plate2[i] in '浙湘') or \
                                (plate1[i] in '京琼' and plate2[i] in '京琼') or \
                                (plate1[i] in '鲁晋' and plate2[i] in '鲁晋') or \
                                (plate1[i] in 'H川' and plate2[i] in 'H川') or \
                                (plate1[i] in 'E甘' and plate2[i] in 'E甘') or \
                                (plate1[i] in 'Z云' and plate2[i] in 'Z云'):
                            match_points += 70
                        else:
                            match_points += 50
                else:
                    if (plate1[i] in '1Ii' and plate2[i] in '1Ii'):
                        if nonmatchnum > 0:
                            nonmatchnum -= 1

        # Penalize for more than 4 mismatches
        if nonmatchnum > 4:
            return nonmatchnum
        else:
            return match_points // platelength

    return 0


def calculate_distance(jing1, wei1, jing2, wei2):
    lng1, lat1, lng2, lat2 = map(radians, [jing1, wei1, jing2, wei2])  # 经纬度转换成弧度
    dlon = lng2 - lng1
    dlat = lat2 - lat1
    a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
    distance = 2 * asin(sqrt(a)) * 6371.393 * 1000  # 地球平均半径，6371.393km

    return distance


def get_timestamp_milliseconds(t_str):
    """
    将ISO 8601时间戳字符串转为13位毫秒级时间戳
    :param t_str: ISO 8601时间戳字符串
    :return: 13位毫秒级时间戳
    """
    # t_str = data['timestamp']
    # 解析 ISO 8601 时间字符串为 datetime 对象
    dt = datetime.strptime(t_str, "%Y-%m-%dT%H:%M:%S.%f")
    # 将 datetime 对象转换为秒级时间戳
    timestamp_seconds = dt.timestamp()
    # 将秒级时间戳转换为毫秒级时间戳
    timestamp_milliseconds = int(timestamp_seconds * 1000)
    return timestamp_milliseconds


def get_dis_list():
    dis_list = []
    for k, v in KAKO_DATA_LIST.items():
        if v['Downstream']:
            dis = v['Downstream']['dis']
            dis_list.append(dis)
    return dis_list

