# # 绘制圆形
# import numpy as np
# import netCDF4 as nc
# import matplotlib.pyplot as plt
# import cartopy.crs as ccrs
# import cartopy.feature as cfeature
# import pandas as pd
# #
# # # 加载 NetCDF 数据
# # nf = nc.Dataset(r'/mnt/datastore/liudddata/result/20200104new/2020010104_predicted_2d.nc', 'r')
# # # nf = nc.Dataset(r'/mnt/datastore/liudddata/cloudsat_data/cloudsat_seasonal35_mean_2019MAM.nc', 'r')
# # cth_data = np.ma.getdata(nf.variables['cth'][:])  # 转换为普通数组，使用 cth_data 保持一致性
# # lat = nf.variables['lat'][:]
# # lon = nf.variables['lon'][:]
# # nf.close()
# #
# # # 处理无效值（假设填充值为-999或其他特殊值）
# # cth_data = np.ma.masked_invalid(cth_data)  # 自动屏蔽 NaN 和 inf
# # cth_data = np.clip(cth_data, 0, None)      # 去掉负值
# #
# # # 读取 CloudSat 经纬度信息
# # data_c = pd.read_csv("/mnt/datastore/liudddata/cloudsat_data/cloudsat_20200104time.csv")  # 替换为你的数据文件路径
# # # 将 time 列转换为 datetime 类型
# # data_c['time'] = pd.to_datetime(data_c['time'])
# #
# # # 筛选出 2020 年 1 月 1 日 4 时到 5 时的数据
# # start_time = pd.Timestamp('2020-01-01 04:00:00')
# # end_time = pd.Timestamp('2020-01-01 04:15:00')
# # filtered_data = data_c[(data_c['time'] >= start_time) & (data_c['time'] < end_time)]
# # lon_c = filtered_data['longitude']  # 使用筛选后的数据提取经纬度
# # lat_c = filtered_data['latitude']
# #
# # # 创建地图画布，使用正交投影
# # fig = plt.figure(figsize=(12, 8))
# # ax = fig.add_subplot(1, 1, 1, projection=ccrs.Orthographic(central_latitude=0, central_longitude=104.7))
# #
# # # 添加地理特征
# # ax.add_feature(cfeature.COASTLINE)
# # ax.add_feature(cfeature.BORDERS, linestyle=':')
# # ax.add_feature(cfeature.LAND, facecolor='white')
# #
# # # 添加经纬度线
# # parallels = range(-90, 91, 30)  # 每 30 度一个纬线
# # meridians = range(-180, 181, 30)  # 每 30 度一个经线
# # gl = ax.gridlines(draw_labels=True, color='gray', linestyle='--', xlocs=meridians, ylocs=parallels)
# # gl.top_labels = False
# # gl.right_labels = False
# # gl.xlabel_style = {'size': 10}
# # gl.ylabel_style = {'size': 10}
# #
# # # 使用 contourf 绘制云高分布
# # levels = np.linspace(0, cth_data.max(), 10)  # 自动设置 10 个色阶
# # contour = ax.contourf(lon, lat, cth_data,
# #                       levels=levels,
# #                       cmap='Blues',
# #                       extend='both',        # 扩展颜色条箭头
# #                       transform=ccrs.PlateCarree())
# #
# # # 转换 CloudSat 经纬度为地图坐标
# # coords = ax.projection.transform_points(ccrs.PlateCarree(), lon_c.values, lat_c.values)
# # x_c = coords[:, 0]
# # y_c = coords[:, 1]
# #
# # # 绘制 CloudSat 轨迹
# # ax.plot(x_c, y_c, color='red', linewidth=2, label='CloudSat Trajectory')  # 绘制轨迹
# #
# # # 添加颜色条
# # cbar = plt.colorbar(contour, orientation='vertical', pad=0.01, aspect=20)
# # cbar.set_label('Cloud Top Height (meters)')
# #
# # # 设置标题
# # ax.set_title('Cloud Top Height Distribution (2020-01-01 04UTC)')
# #
# # # 添加图例
# # plt.legend()
# #
# # # 保存或显示图像
# # plt.savefig('cth_20200104.png', dpi=300, bbox_inches='tight')
# # plt.show()
#
# import numpy as np
# import netCDF4 as nc
# import matplotlib.pyplot as plt
# import cartopy.crs as ccrs
# import cartopy.feature as cfeature
#
# # 加载数据
# nf = nc.Dataset(r'/mnt/datastore/liudddata/result/20200104new/2020011505_predicted_2d.nc', 'r')
# cbh_data = np.ma.getdata(nf.variables['predicted'][:])  # 转换为普通数组
# lat = nf.variables['lat'][:]
# lon = nf.variables['lon'][:]
# nf.close()
#
# # 处理无效值（假设填充值为-999或其他特殊值）
# cbh_data = np.ma.masked_invalid(cbh_data)  # 自动屏蔽NaN和inf
# cbh_data = np.clip(cbh_data, 0, None)      # 去掉负值
#
# # 读取 CloudSat 经纬度信息
# data_c = pd.read_csv("/mnt/datastore/liudddata/cloudsat_data/cloudsat_20200104time.csv")  # 替换为你的数据文件路径
# # data_c = pd.read_csv("/mnt/datastore/liudddata/cloudsat_data/merged_201902_202004_cloudsat.csv")  # 替换为你的数据文件路径
# # 将 time 列转换为 datetime 类型
# data_c['time'] = pd.to_datetime(data_c['time'])
#
# # 筛选出 2020 年 1 月 1 日 4 时到 5 时的数据
# start_time = pd.Timestamp('2020-01-15 05:00:00')
# end_time = pd.Timestamp('2020-01-15 05:15:00')
# filtered_data = data_c[(data_c['time'] >= start_time) & (data_c['time'] < end_time)]
# lon_c = filtered_data['longitude']  # 使用筛选后的数据提取经纬度
# lat_c = filtered_data['latitude']
#
# # 创建地图画布
# fig = plt.figure(figsize=(12, 8))
# ax = fig.add_subplot(1, 1, 1, projection=ccrs.PlateCarree())
#
# # 添加地理特征
# ax.add_feature(cfeature.COASTLINE)
# ax.add_feature(cfeature.BORDERS, linestyle=':')
# ax.add_feature(cfeature.LAND, facecolor='lightgray')
#
# # 使用contourf绘制云高分布
# levels = np.linspace(0, cbh_data.max(), 20)  # 自动设置20个色阶
# contour = ax.contourf(lon, lat, cbh_data,
#                       levels=levels,
#                       cmap='Blues',
#                       extend='both',        # 扩展颜色条箭头
#                       transform=ccrs.PlateCarree())
#
# # 转换 CloudSat 经纬度为地图坐标
# coords = ax.projection.transform_points(ccrs.PlateCarree(), lon_c.values, lat_c.values)
# x_c = coords[:, 0]
# y_c = coords[:, 1]
#
# # 绘制 CloudSat 轨迹
# ax.plot(x_c, y_c, color='red', linewidth=2, label='CloudSat Trajectory')  # 绘制轨迹
#
# # 添加颜色条
# cbar = plt.colorbar(contour, orientation='horizontal', pad=0.05, aspect=30)
# cbar.set_label('Cloud Base Height (meters)')
#
# # 设置标题和范围
# ax.set_title('Cloud Base Height Distribution (2020-01-15 05UTC)')
# ax.set_global()  # 显示全球范围
#
# # 保存或显示图像
# plt.savefig('cbh_2020011505.png', dpi=300, bbox_inches='tight')
# plt.show()



# import numpy as np
# import matplotlib.pyplot as plt
# from datetime import datetime
# import pandas as pd
# import netCDF4 as nc
# from scipy.spatial import cKDTree
#
#
# # def read_cloudsat_data(fname):
# #     """读取CloudSat数据"""
# #     f = nc.Dataset(fname, 'r')
# #     lon = f.variables['longitude'][:]
# #     lat = f.variables['latitude'][:]
# #     elv = f.variables['elevation'][:]
# #     data = f.variables['Radar_Reflectivity'][:]
# #     height = f.variables['Height'][:]
# #     time = f.variables['time'][:]
# #     f.close()
# #     return lon, lat, elv, data, height, time
#
#
# def read_fy_data(filename):
#     """读取风云数据"""
#     cth_file = nc.Dataset(filename, 'r')
#     cth_data = cth_file.variables['cth'][:]
#     cbh_data = cth_file.variables['predicted'][:]
#     cth_file.close()
#     return cth_data , cbh_data
#
#
# def read_coordinate_data(coord_file_name):
#     """读取地理坐标数据"""
#     coord_file_open = nc.Dataset(coord_file_name, 'r')
#     lat_fy = coord_file_open.variables['lat'][:, :].T
#     lon_fy = coord_file_open.variables['lon'][:, :].T
#     lat_fy[np.isnan(lat_fy)] = -90.
#     lon_fy[np.isnan(lon_fy)] = 360.
#     coord_file_open.close()
#     return lat_fy, lon_fy
#
#
# def transform_coordinates(lat_fy, lon_fy):
#     """转换坐标为笛卡尔坐标系"""
#     # 修正经纬度顺序
#     fy_coords = np.deg2rad(np.vstack([lon_fy.ravel(), lat_fy.ravel()]).T)
#     phi = np.pi / 2 - fy_coords[:, 1]
#     theta = fy_coords[:, 0]
#     x = np.sin(phi) * np.cos(theta)
#     y = np.sin(phi) * np.sin(theta)
#     z = np.cos(phi)
#     fy_coords_cartesian = np.column_stack([x, y, z])
#     is_finite = np.isfinite(fy_coords_cartesian).all(axis=1)
#     fy_coords_cartesian_clean = fy_coords_cartesian[is_finite]
#     return fy_coords_cartesian_clean
#
#
# def match_data(lat_t, lon_t, fy_coords_cartesian_clean, lat_fy, lon_fy, cth_data,cbh_data):
#     """进行数据空间匹配"""
#     radius = 2 / 6371
#     latitudes, longitudes, cth_values,cbh_values = [], [], [],[]
#     tree = cKDTree(fy_coords_cartesian_clean)
#     for lat, lon in zip(lat_t, lon_t):
#         point = np.deg2rad(np.array([lat, lon]))
#         phi = np.pi / 2 - point[1]
#         theta = point[0]
#         point_cartesian = np.array([np.sin(phi) * np.cos(theta), np.sin(phi) * np.sin(theta), np.cos(phi)])
#         indices = tree.query_ball_point(point_cartesian, r=radius)
#         if indices:
#             for idx in indices:
#                 lat_idx, lon_idx = np.unravel_index(idx, lat_fy.shape)
#                 latitudes.append(lat_fy[lat_idx, lon_idx])
#                 longitudes.append(lon_fy[lat_idx, lon_idx])
#                 cth_value = cth_data[lat_idx, lon_idx]
#                 cbh_value = cbh_data[lat_idx, lon_idx]
#                 if np.isfinite(cth_value):
#                     cth_values.append(cth_value * 0.001)
#                 else:
#                     cth_values.append(np.nan)
#                 if np.isfinite(cbh_value):
#                     cbh_values.append(cbh_value * 0.001)
#                 else:
#                     cbh_values.append(np.nan)
#                 # if np.isfinite(cth_value,cbh_value):
#                 #     cth_values.append(cth_value * 0.001)
#                 #     cbh_value.append(cbh_value * 0.001)
#                 # else:
#                 #     cth_values.append(np.nan)
#         else:
#             latitudes.append(np.nan)
#             longitudes.append(np.nan)
#             cth_values.append(np.nan)
#             cbh_values.append(np.nan)
#     return latitudes, longitudes, cth_values,cbh_values
#
#
# if __name__ == '__main__':
#     # cloudsat_fname = '/mnt/datastore/liudddata/cloudsat_data/cloudsat_GEOPROF/cloudsat_2020_1_4/2020015042112_73067_CS_2B-GEOPROF_GRANULE_P1_R05_E09_F00.hdf'
#     fy_filename = "/mnt/datastore/liudddata/result/20200104new/2020010104_predicted_2d.nc"
#     coord_file_name = 'FY4A_coordinates.nc'
#
#     # lon, lat, elv, data, height, time = read_cloudsat_data(cloudsat_fname)
#     cth_data,cbh_data = read_fy_data(fy_filename)
#     lat_fy, lon_fy = read_coordinate_data(coord_file_name)
#     fy_coords_cartesian_clean = transform_coordinates(lat_fy, lon_fy)
#
#     # 读取 CloudSat 经纬度信息
#     data_c = pd.read_csv("/mnt/datastore/liudddata/cloudsat_data/cloudsat_20200104time.csv")  # 替换为你的数据文件路径
#     # 将 time 列转换为 datetime 类型
#     data_c['time'] = pd.to_datetime(data_c['time'])
#
#     # 筛选出 2020 年 1 月 1 日 4 时到 5 时的数据
#     start_time = pd.Timestamp('2020-01-01 04:00:00')
#     end_time = pd.Timestamp('2020-01-01 04:15:00')
#     filtered_data = data_c[(data_c['time'] >= start_time) & (data_c['time'] < end_time)]
#     lon_t = filtered_data['longitude']  # 使用筛选后的数据提取经纬度
#     lat_t = filtered_data['latitude']
#     cloudsat_cth = filtered_data['cloudsat_cth']
#     cloudsat_cbh = filtered_data['cloudsat_cbh']
#
#     # latitudes, longitudes, cth_values,cbh_values = match_data(lat_t, lon_t, fy_coords_cartesian_clean, lat_fy, lon_fy, cth_data)
#     latitudes, longitudes, cth_values, cbh_values = match_data(lat_t, lon_t, fy_coords_cartesian_clean, lat_fy, lon_fy,
#                                                                cth_data, cbh_data)
#
#     # fy_data = {
#     #     'Latitude': latitudes,
#     #     'Longitude': longitudes,
#     #     'fy_cth': cth_values,
#     #     'fy_cbh': cbh_values
#     # }
#     # 创建字典来存储数据，包括 CloudSat 数据
#     fy_data = {
#         # 'CloudSat_Latitude': lat_t,  # CloudSat 纬度
#         # 'CloudSat_Longitude': lon_t,  # CloudSat 经度
#         'CloudSat_CTH': cloudsat_cth,  # CloudSat 云顶高度
#         'CloudSat_CBH': cloudsat_cbh,  # CloudSat 云底高度
#         'FY_Latitude': latitudes,  # 风云数据纬度
#         'FY_Longitude': longitudes,  # 风云数据经度
#         'fy_cth': cth_values,  # 风云云顶高度
#         'fy_cbh': cbh_values  # 风云云底高度
#     }
#
#     df = pd.DataFrame(fy_data)
#     # 过滤掉cloudsat_cbh列中的空值
#     df = df.dropna(subset=['fy_cbh'])
#     csv_filepath = '/mnt/datastore/liudddata/cloudsat_data/cloudsat_010104match.csv'
#     df.to_csv(csv_filepath, index=False)

'''
单个文件的时空匹配
'''
# import numpy as np
# import matplotlib.pyplot as plt
# from datetime import datetime
# import pandas as pd
# import netCDF4 as nc
# from scipy.spatial import cKDTree
#
#
# def read_fy_data(filename):
#     """读取风云数据"""
#     cth_file = nc.Dataset(filename, 'r')
#     cth_data = cth_file.variables['cth'][:]
#     cbh_data = cth_file.variables['predicted'][:]
#     cth_file.close()
#     return cth_data, cbh_data
#
#
# def read_coordinate_data(coord_file_name):
#     """读取地理坐标数据"""
#     coord_file_open = nc.Dataset(coord_file_name, 'r')
#     lat_fy = coord_file_open.variables['lat'][:, :].T
#     lon_fy = coord_file_open.variables['lon'][:, :].T
#     lat_fy[np.isnan(lat_fy)] = -90.
#     lon_fy[np.isnan(lon_fy)] = 360.
#     coord_file_open.close()
#     return lat_fy, lon_fy
#
#
# def transform_coordinates(lat_fy, lon_fy):
#     """转换坐标为笛卡尔坐标系"""
#     fy_coords = np.deg2rad(np.vstack([lon_fy.ravel(), lat_fy.ravel()]).T)
#     phi = np.pi / 2 - fy_coords[:, 1]
#     theta = fy_coords[:, 0]
#     x = np.sin(phi) * np.cos(theta)
#     y = np.sin(phi) * np.sin(theta)
#     z = np.cos(phi)
#     fy_coords_cartesian = np.column_stack([x, y, z])
#     is_finite = np.isfinite(fy_coords_cartesian).all(axis=1)
#     return fy_coords_cartesian[is_finite], is_finite
#
#
# def match_data(lat_t, lon_t, cloudsat_cth, cloudsat_cbh, fy_coords_cartesian_clean, lat_fy, lon_fy, cth_data, cbh_data,
#                is_finite):
#     """进行数据空间匹配"""
#     radius = 2 / 6371  # 匹配半径（2公里）
#     # 初始化存储匹配结果的列表
#     cloudsat_lats, cloudsat_lons = [], []
#     cloudsat_cths, cloudsat_cbhs = [], []
#     fy_lats, fy_lons = [], []
#     fy_cth_values, fy_cbh_values = [], []
#
#     # 重建原始索引映射
#     original_indices = np.arange(lat_fy.size)[is_finite]
#
#     # 构建KD树
#     tree = cKDTree(fy_coords_cartesian_clean)
#
#     for i, (lat, lon) in enumerate(zip(lat_t, lon_t)):
#         # 转换当前CloudSat点为笛卡尔坐标
#         point = np.deg2rad([lon, lat])  # 注意经纬度顺序
#         phi = np.pi / 2 - point[1]
#         theta = point[0]
#         point_cartesian = np.array([
#             np.sin(phi) * np.cos(theta),
#             np.sin(phi) * np.sin(theta),
#             np.cos(phi)
#         ])
#
#         # 查询匹配点
#         indices = tree.query_ball_point(point_cartesian, r=radius)
#
#         for idx in indices:
#             # 获取原始二维索引
#             original_idx = original_indices[idx]
#             lat_idx, lon_idx = np.unravel_index(original_idx, lat_fy.shape)
#
#             # 获取风云数据
#             fy_lat = lat_fy[lat_idx, lon_idx]
#             fy_lon = lon_fy[lat_idx, lon_idx]
#             fy_cth = cth_data[lat_idx, lon_idx]  if not np.isnan(cth_data[lat_idx, lon_idx]) else np.nan
#             fy_cbh = cbh_data[lat_idx, lon_idx]  if not np.isnan(cbh_data[lat_idx, lon_idx]) else np.nan
#
#             # 添加CloudSat数据（当前点的数据）
#             cloudsat_lats.append(lat)
#             cloudsat_lons.append(lon)
#             cloudsat_cths.append(cloudsat_cth[i])
#             cloudsat_cbhs.append(cloudsat_cbh[i])
#
#             # 添加风云数据
#             fy_lats.append(fy_lat)
#             fy_lons.append(fy_lon)
#             fy_cth_values.append(fy_cth)
#             fy_cbh_values.append(fy_cbh)
#
#     return (cloudsat_lats, cloudsat_lons, cloudsat_cths, cloudsat_cbhs,
#             fy_lats, fy_lons, fy_cth_values, fy_cbh_values)
#
#
# if __name__ == '__main__':
#     # 输入文件路径
#     fy_filename = "/mnt/datastore/liudddata/result/20200104new/2020011505_predicted_2d.nc"
#     coord_file_name = 'FY4A_coordinates.nc'
#     cloudsat_csv_path = "/mnt/datastore/liudddata/cloudsat_data/cloudsat_20200104time.csv"
#
#     # 读取风云数据
#     cth_data, cbh_data = read_fy_data(fy_filename)
#     lat_fy, lon_fy = read_coordinate_data(coord_file_name)
#     fy_coords_cartesian_clean, is_finite = transform_coordinates(lat_fy, lon_fy)
#
#     # 读取并处理CloudSat数据
#     data_c = pd.read_csv(cloudsat_csv_path)
#     data_c['time'] = pd.to_datetime(data_c['time'])
#     start_time = pd.Timestamp('2020-01-15 05:00:00')
#     end_time = pd.Timestamp('2020-01-15 05:15:00')
#     filtered_data = data_c[(data_c['time'] >= start_time) & (data_c['time'] < end_time)]
#
#     # 准备匹配数据
#     lat_t = filtered_data['latitude'].values
#     lon_t = filtered_data['longitude'].values
#     cloudsat_cth = filtered_data['cloudsat_cth'].values
#     cloudsat_cbh = filtered_data['cloudsat_cbh'].values
#     time_c = filtered_data['time'].values
#
#     # 执行匹配
#     matched_data = match_data(lat_t, lon_t, cloudsat_cth, cloudsat_cbh,
#                               fy_coords_cartesian_clean, lat_fy, lon_fy,
#                               cth_data, cbh_data, is_finite)
#
#     # 创建DataFrame
#     df = pd.DataFrame({
#         'CloudSat_Latitude': matched_data[0],
#         'CloudSat_Longitude': matched_data[1],
#         'CloudSat_CTH': matched_data[2],
#         'CloudSat_CBH': matched_data[3],
#         'FY_Latitude': matched_data[4],
#         'FY_Longitude': matched_data[5],
#         'FY_CTH': matched_data[6],
#         'FY_CBH': matched_data[7]
#     })
#
#     # 保存结果
#     csv_filepath = '/mnt/datastore/liudddata/cloudsat_data/cloudsat_011504match.csv'
#     df.to_csv(csv_filepath, index=False)
#     print(f"匹配数据已保存至：{csv_filepath}")



# 固定时刻进行时空匹配
import os
import numpy as np
import pandas as pd
import netCDF4 as nc
from scipy.spatial import cKDTree
from datetime import datetime, timedelta

# 常量定义
CLOUDSAT_CSV = "/mnt/datastore/liudddata/cloudsat_data/cloudsat_20200104time.csv"
# FY_ROOT_DIR = "/mnt/datastore/liudddata/result/200200104_05"
# FY_ROOT_DIR = "/mnt/datastore/liudddata/result/20200104_04UTC"
FY_ROOT_DIR = "/mnt/datastore/liudddata/result/20200104new"
COORD_FILE = "FY4A_coordinates.nc"
OUTPUT_CSV = "/mnt/datastore/liudddata/cloudsat_data/cloudsat_matched_2020_0102_03utc.csv"


def read_fy_data(filename):
    """读取风云数据文件"""
    with nc.Dataset(filename, 'r') as f:
        cth = f.variables['cth'][:]
        cbh = f.variables['predicted'][:]
        # 将无效值转换为NaN（假设原始数据用"--"表示无效）
        cth = np.where(cth == "--", np.nan, cth)
        cbh = np.where(cbh == "--", np.nan, cbh)
    return cth, cbh


def read_coordinate_data():
    """读取并预处理风云卫星坐标数据"""
    with nc.Dataset(COORD_FILE, 'r') as f:
        lat = f.variables['lat'][:, :].T
        lon = f.variables['lon'][:, :].T
        lat[np.isnan(lat)] = -90.0
        lon[np.isnan(lon)] = 360.0
    return lat, lon


def spherical_to_cartesian(lon, lat):
    """球坐标转笛卡尔坐标"""
    lon_rad = np.deg2rad(lon)
    lat_rad = np.deg2rad(90.0 - lat)
    x = np.sin(lat_rad) * np.cos(lon_rad)
    y = np.sin(lat_rad) * np.sin(lon_rad)
    z = np.cos(lat_rad)
    return np.column_stack([x, y, z])


def build_fy_kdtree():
    """预处理风云坐标并构建KD树"""
    lat_fy, lon_fy = read_coordinate_data()
    valid_mask = (~np.isnan(lat_fy)) & (~np.isnan(lon_fy))
    coords_cart = spherical_to_cartesian(lon_fy[valid_mask], lat_fy[valid_mask])
    return cKDTree(coords_cart), valid_mask, lat_fy, lon_fy


def match_observations(tree, valid_mask, lat_fy, lon_fy, cth_data, cbh_data,
                       cloudsat_lons, cloudsat_lats, cloudsat_times,
                       cloudsat_cth, cloudsat_cbh, radius_km=2.0):
    """执行空间匹配的核心函数"""
    # 初始化结果存储
    matches = []

    # 转换匹配半径到球面距离
    earth_radius = 6371.0  # 地球半径，单位km
    radius_rad = radius_km / earth_radius

    # 准备CloudSat点坐标
    cs_points = spherical_to_cartesian(cloudsat_lons, cloudsat_lats)

    # 批量查询KD树
    indices = tree.query_ball_point(cs_points, r=radius_rad)

    # 获取有效坐标的原始索引
    valid_indices = np.where(valid_mask)

    # 处理匹配结果
    for i, (cs_lon, cs_lat, cs_time, cs_cth, cs_cbh) in enumerate(zip(
            cloudsat_lons, cloudsat_lats, cloudsat_times, cloudsat_cth, cloudsat_cbh)):

        if not indices[i]:  # 无匹配项
            continue

        # 获取风云数据索引
        fy_linear_idx = indices[i][0]  # 取第一个匹配点
        fy_row, fy_col = valid_indices[0][fy_linear_idx], valid_indices[1][fy_linear_idx]

        # 提取风云数据
        fy_lat = lat_fy[fy_row, fy_col]
        fy_lon = lon_fy[fy_row, fy_col]
        # fy_cth = cth_data[fy_row, fy_col] * 0.001  # 转换为km
        # fy_cbh = cbh_data[fy_row, fy_col] * 0.001  # 转换为km
        fy_cth = cth_data[fy_row, fy_col]
        fy_cbh = cbh_data[fy_row, fy_col]

        # 保存匹配对
        matches.append({
            'cloudsat_time': cs_time,
            'cloudsat_lat': cs_lat,
            'cloudsat_lon': cs_lon,
            'cloudsat_cth': cs_cth,
            'cloudsat_cbh': cs_cbh,
            'fy_lat': fy_lat,
            'fy_lon': fy_lon,
            'fy_cth': fy_cth if not np.isnan(fy_cth) else None,
            'fy_cbh': fy_cbh if not np.isnan(fy_cbh) else None
        })

    return matches


def process_daily_data(start_date, end_date):
    """主处理函数"""
    # 预处理风云坐标数据
    fy_tree, valid_mask, lat_fy, lon_fy = build_fy_kdtree()

    # 加载CloudSat数据
    cloudsat_df = pd.read_csv(CLOUDSAT_CSV, parse_dates=['time'])

    # 初始化结果存储
    all_matches = []

    # 遍历日期范围
    current_date = start_date
    while current_date <= end_date:
        # 生成风云文件名
        fy_filename = os.path.join(
            FY_ROOT_DIR,
            # current_date.strftime("%Y%m%d") + "05_predicted_2d.nc"
            # current_date.strftime("%Y%m%d") + "04_predicted_2d.nc"
            current_date.strftime("%Y%m%d") + "03_predicted_2d.nc"
        )

        # 跳过不存在文件
        if not os.path.exists(fy_filename):
            current_date += timedelta(days=1)
            continue

        try:
            # 读取风云数据
            cth_data, cbh_data = read_fy_data(fy_filename)

            # 筛选当天的CloudSat数据 (05:00-05:15)
            time_mask = (
                    (cloudsat_df['time'] >= current_date.replace(hour=3)) &
                    (cloudsat_df['time'] < current_date.replace(hour=3, minute=15))
            )
            daily_data = cloudsat_df[time_mask].copy()
            if daily_data.empty:
                continue

            # 执行匹配
            matches = match_observations(
                fy_tree, valid_mask, lat_fy, lon_fy, cth_data, cbh_data,
                daily_data['longitude'].values,
                daily_data['latitude'].values,
                daily_data['time'].values,
                daily_data['cloudsat_cth'].values,
                daily_data['cloudsat_cbh'].values
            )

            # 收集结果
            all_matches.extend(matches)

            print(f"Processed {current_date.strftime('%Y-%m-%d')} with {len(matches)} matches")

        except Exception as e:
            print(f"Error processing {current_date}: {str(e)}")

        current_date += timedelta(days=1)

    # 保存最终结果
    result_df = pd.DataFrame(all_matches)
    # result_df.to_csv(OUTPUT_CSV, index=False)
    # ===== 新增过滤逻辑 =====
    # 创建有效性掩码
    valid_mask = (
            result_df['cloudsat_cbh'].notna() &
            result_df['fy_cbh'].notna()
    )

    # 应用过滤
    filtered_df = result_df[valid_mask].copy()

    # 输出过滤信息
    print(f"\n数据过滤报告:")
    print(f"总匹配记录: {len(result_df)}")
    print(f"有效记录(双CBH非空): {len(filtered_df)}")
    print(f"过滤率: {100 * (1 - len(filtered_df) / len(result_df)):.1f}%")

    # 保存结果
    filtered_df.to_csv(OUTPUT_CSV, index=False)
    # print(f"Total matched records: {len(result_df)}")
    print(f"Results saved to {OUTPUT_CSV}")

    # - 已修正为匹配全天24小时，并修复了文件名错误
    # def process_daily_data(start_date, end_date):
    #     """主处理函数 """
    #     # 预处理风云坐标数据和加载CloudSat数据
    #     fy_tree, valid_mask, lat_fy, lon_fy = build_fy_kdtree()
    #     cloudsat_df = pd.read_csv(CLOUDSAT_CSV, parse_dates=['time'])
    #
    #     all_matches = []
    #
    #     # 按天循环
    #     current_date = start_date
    #     while current_date <= end_date:
    #
    #         # 按小时循环
    #         for hour in range(24):
    #             current_time = current_date.replace(hour=hour, minute=0, second=0, microsecond=0)
    #
    #             # --- 关键修正：确保文件名格式完全正确，包含 "_mc" ---
    #             fy_filename = os.path.join(
    #                 FY_ROOT_DIR,
    #                 current_time.strftime("%Y%m%d%H") + "_predicted_2d_mc.nc"
    #             )
    #
    #             if not os.path.exists(fy_filename):
    #                 continue
    #
    #             try:
    #                 cth_data, cbh_data = read_fy_data(fy_filename)
    #
    #                 # 动态筛选CloudSat数据
    #                 start_window = current_time
    #                 end_window = current_time + timedelta(minutes=15)
    #
    #                 time_mask = (
    #                         (cloudsat_df['time'] >= start_window) &
    #                         (cloudsat_df['time'] < end_window)
    #                 )
    #                 hourly_data = cloudsat_df[time_mask].copy()
    #
    #                 if hourly_data.empty:
    #                     continue
    #
    #                 matches = match_observations(
    #                     fy_tree, valid_mask, lat_fy, lon_fy, cth_data, cbh_data,
    #                     hourly_data['longitude'].values,
    #                     hourly_data['latitude'].values,
    #                     hourly_data['time'].values,
    #                     hourly_data['cloudsat_cth'].values,
    #                     hourly_data['cloudsat_cbh'].values
    #                 )
    #
    #                 if matches:  # 只有当找到匹配时才打印，避免刷屏
    #                     all_matches.extend(matches)
    #                     print(f"Processed {current_time.strftime('%Y-%m-%d %H:00')} with {len(matches)} matches")
    #
    #             except Exception as e:
    #                 print(f"Error processing {current_time.strftime('%Y-%m-%d %H:00')}: {str(e)}")
    #
    #         current_date += timedelta(days=1)
    #
    #     # --- 结尾部分已加入“无匹配”的健壮性检查 ---
    #     result_df = pd.DataFrame(all_matches)
    #
    #     if result_df.empty:
    #         print("\n在指定的日期范围内未找到任何匹配的数据点。")
    #         print(f"程序已结束，未生成输出文件 {OUTPUT_CSV}。")
    #         return
    #
    #     print("\nFiltering matched results...")
    #     valid_mask = (
    #             result_df['cloudsat_cbh'].notna() &
    #             result_df['fy_cbh'].notna()
    #     )
    #     filtered_df = result_df[valid_mask].copy()
    #
    #     print(f"\n数据过滤报告:")
    #     print(f"总匹配记录: {len(result_df)}")
    #     print(f"有效记录(双CBH非空): {len(filtered_df)}")
    #     if len(result_df) > 0:
    #         print(f"过滤率: {100 * (1 - len(filtered_df) / len(result_df)):.1f}%")
    #
    #     filtered_df.to_csv(OUTPUT_CSV, index=False)
    #     print(f"Results saved to {OUTPUT_CSV}")


if __name__ == "__main__":
    # 设置日期范围
    start_date = datetime(2020, 1, 1)
    end_date = datetime(2020, 2, 24)

    # 执行处理
    process_daily_data(start_date, end_date)