import json
import pandas as pd
import numpy as np
from scipy.spatial.distance import cdist

# 地球半径，单位：km
EARTH_RADIUS = 6371

# 度分秒转度
def dms_to_deg(dms):
    try:
        dms = int(dms)  # 将输入转换为整数类型
        degrees = dms // 10000
        minutes = (dms % 10000) // 100
        seconds = dms % 100
        return degrees + minutes / 60 + seconds / 3600
    except ValueError:
        print(f"无法将 {dms} 转换为整数，请检查数据。")
        return np.nan

# 度转弧度
def deg2rad(deg):
    return deg * np.pi / 180

# 计算两个经纬度点之间的距离（单位：km）
def haversine_distance(lon1, lat1, lon2, lat2):
    lon1, lat1, lon2, lat2 = map(deg2rad, [lon1, lat1, lon2, lat2])
    dlon = lon2 - lon1
    dlat = lat2 - lat1
    a = np.sin(dlat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2) ** 2
    c = 2 * np.arcsin(np.sqrt(a))
    return EARTH_RADIUS * c

# 定义搜索半径变量到全局作用域
search_radius = 25

# 定义反距离插值算法，增加对 PRE_12h 的支持
def idw_interpolation(coords, precipitations):
    n = len(coords)
    interpolated_values = []
    std_devs = []  # 存储每个站点的标准差
    for i in range(n):
        current_coord = coords[i]
        other_coords = np.delete(coords, i, axis=0)
        other_precipitations = np.delete(precipitations, i)
        distances = [haversine_distance(current_coord[0], current_coord[1], coord[0], coord[1]) for coord in other_coords]
        # 使用搜索半径变量筛选有效索引
        valid_indices = [j for j, dist in enumerate(distances) if dist <= search_radius and other_precipitations[j] != 999999]
        valid_distances = np.array([distances[j] for j in valid_indices])
        valid_precipitations = np.array([other_precipitations[j] for j in valid_indices])

        # 动态调整 p 值
        density = len(valid_indices) / (np.pi * search_radius ** 2)
        if density < 0.001:
            p = 1
        elif density < 0.01:
            p = 2
        else:
            p = 3

        # 打印每个站点的 p 值
        # print(f"站点 {i} 的 p 值: {p}")

        if len(valid_distances) == 0:
            interpolated_values.append(np.nan)
            std_devs.append(np.nan)
            continue

        # 计算参与计算的站点的标准差
        std_dev = np.std(valid_precipitations)
        std_devs.append(std_dev)

        weights = 1 / (valid_distances ** p + 1e-8)

        # 确保 weights 和 valid_precipitations 为数值类型
        weights = np.asarray(weights, dtype=float)
        valid_precipitations = np.asarray(valid_precipitations, dtype=float)

        # 检查是否有 NaN 值
        if np.isnan(weights).any() or np.isnan(valid_precipitations).any():
            interpolated_values.append(np.nan)
            continue

        # 检查权重总和是否为零
        weight_sum = np.sum(weights)
        if weight_sum == 0:
            interpolated_values.append(np.nan)
            continue

        interpolated_value = np.sum(weights * valid_precipitations) / weight_sum
        interpolated_values.append(interpolated_value)

    return np.array(interpolated_values), np.array(std_devs)

# 读取 stations.xlsx 文件，获取有效站号
stations_df = pd.read_excel('stations.xlsx')
valid_station_ids = set(stations_df['站号'])

# 读取 data.json 文件
with open('data.json', 'r', encoding='utf-8') as f:
    data = json.load(f)

# 过滤 PRE_1h 降水值为 999999 的数据
valid_data = [item for item in data if float(item['PRE_1h']) != 999999]

# 过滤掉不包含 PRE_12h 属性或者 PRE_12h 降水值为 999999 的数据
# valid_data = [item for item in valid_data if 'PRE_12h' in item and float(item['PRE_12h']) != 999999]

# 再次确认过滤结果
for item in valid_data:
    assert float(item['PRE_1h']) != 999999, f"数据过滤失败，站点 {item.get('Station_Id_C', '未知')} 的 PRE_1h 降水值为 999999"
    # assert float(item['PRE_12h']) != 999999, f"数据过滤失败，站点 {item.get('Station_Id_C', '未知')} 的 PRE_12h 降水值为 999999"

# 过滤掉不在 stations.xlsx 中的站点
valid_data = [item for item in valid_data if item.get('Station_Id_C', '') in valid_station_ids]

# 提取经纬度和降水数据，并将经纬度从度分秒转换为度
coords = np.array([[dms_to_deg(item['Lon']), dms_to_deg(item['Lat'])] for item in valid_data])
# 确保 PRE_1h 降水数据为数值类型
precipitations_1h = np.array([float(item['PRE_1h']) for item in valid_data])
# 确保 PRE_12h 降水数据为数值类型
# precipitations_12h = np.array([float(item['PRE_12h']) for item in valid_data])

# 进行 PRE_1h 反距离插值
p_estimated_1h, std_devs_1h = idw_interpolation(coords, precipitations_1h)

# 进行 PRE_12h 反距离插值
# p_estimated_12h, std_devs_12h = idw_interpolation(coords, precipitations_12h)

# 判断 PRE_1h 站点状态
station_status_1h = []
abs_errors_1h = []  # 存储每个站点的绝对误差
rel_errors_1h = []  # 存储每个站点的相对误差
for i in range(len(valid_data)):
    abs_error = abs(p_estimated_1h[i] - precipitations_1h[i])
    rel_error = abs_error / precipitations_1h[i] if precipitations_1h[i] != 0 else 0
    abs_errors_1h.append(abs_error)
    rel_errors_1h.append(rel_error)
    if abs_error > 5 and rel_error > 0.5 and abs_error > std_devs_1h[i]:
        current_coord = coords[i]
        other_coords = np.delete(coords, i, axis=0)
        other_precipitations = np.delete(precipitations_1h, i)
        distances = [haversine_distance(current_coord[0], current_coord[1], coord[0], coord[1]) for coord in other_coords]
        # 这里现在可以正常使用 search_radius 变量
        valid_indices = [j for j, dist in enumerate(distances) if dist <= search_radius and other_precipitations[j] != 999999]
        valid_precipitations = np.array([other_precipitations[j] for j in valid_indices])
        if precipitations_1h[i] < np.max(valid_precipitations):
            station_status_1h.append('异常')
        else:
            station_status_1h.append('疑误')
    else:
        station_status_1h.append('正常')

# 判断 PRE_12h 站点状态
# station_status_12h = []
# abs_errors_12h = []  # 存储每个站点的绝对误差
# rel_errors_12h = []  # 存储每个站点的相对误差
# for i in range(len(valid_data)):
#     abs_error = abs(p_estimated_12h[i] - precipitations_12h[i])
#     rel_error = abs_error / precipitations_12h[i] if precipitations_12h[i] != 0 else 0
#     abs_errors_12h.append(abs_error)
#     rel_errors_12h.append(rel_error)
#     if abs_error > 5 and rel_error > 0.5 and abs_error > std_devs_12h[i]:
#         current_coord = coords[i]
#         other_coords = np.delete(coords, i, axis=0)
#         other_precipitations = np.delete(precipitations_12h, i)
#         distances = [haversine_distance(current_coord[0], current_coord[1], coord[0], coord[1]) for coord in other_coords]
#         # 这里现在可以正常使用 search_radius 变量
#         valid_indices = [j for j, dist in enumerate(distances) if dist <= search_radius and other_precipitations[j] != 999999]
#         valid_precipitations = np.array([other_precipitations[j] for j in valid_indices])
#         if precipitations_12h[i] < np.max(valid_precipitations):
#             station_status_12h.append('异常')
#         else:
#             station_status_12h.append('疑误')
#     else:
#         station_status_12h.append('正常')

# 整理数据
result_data = {
    'Station_Name': [item.get('Station_Name', '') for item in valid_data],
    'Station_Id_C': [item.get('Station_Id_C', '') for item in valid_data],
    'Lon': [item['Lon'] for item in valid_data],
    'Lat': [item['Lat'] for item in valid_data],
    'PRE_1h': precipitations_1h,
    'p_估算': p_estimated_1h,
    '站点状态': station_status_1h,
    '标准差_1h': std_devs_1h,
    '绝对误差_1h': abs_errors_1h,
    '相对误差_1h': rel_errors_1h,
    # 'PRE_12h': precipitations_12h,
    # 'p12_估算': p_estimated_12h,
    # 'p12_站点状态': station_status_12h,
    # '标准差_12h': std_devs_12h,
    # '绝对误差_12h': abs_errors_12h,
    # '相对误差_12h': rel_errors_12h
}

# 保存为 Excel 文件
df = pd.DataFrame(result_data)
df.to_excel('result_and12.xlsx', index=False)