# 对输出的clousat和风云数据进行匹配,筛选获取每天 24 个时刻的数据和风云的匹配
import os
import numpy as np
import pandas as pd
import netCDF4 as nc
from scipy.spatial import cKDTree
from datetime import datetime, timedelta

# 常量定义
CLOUDSAT_CSV = "/mnt/datastore/liudddata/cloudsat_data/cloudsat_20200104time.csv"
FY_ROOT_DIR = "/mnt/datastore/liudddata/result/20200104new"
COORD_FILE = "FY4A_coordinates.nc"
OUTPUT_CSV = "/mnt/datastore/liudddata/cloudsat_data/cloudsat_matched_hourly_3.csv"

def read_fy_data(filename):
    """读取风云数据文件"""
    with nc.Dataset(filename, 'r') as f:
        cth = f.variables['cth'][:]
        cbh = f.variables['predicted'][:]
        cth = np.where(cth == "--", np.nan, cth.astype(float))
        cbh = np.where(cbh == "--", np.nan, cbh.astype(float))
    return cth, cbh

def read_coordinate_data():
    """读取并预处理风云卫星坐标数据"""
    with nc.Dataset(COORD_FILE, 'r') as f:
        lat = f.variables['lat'][:, :].T
        lon = f.variables['lon'][:, :].T
        lat[np.isnan(lat)] = -90.0
        lon[np.isnan(lon)] = 360.0
    return lat, lon

def spherical_to_cartesian(lon, lat):
    """球坐标转笛卡尔坐标"""
    lon_rad = np.deg2rad(lon)
    lat_rad = np.deg2rad(90.0 - lat)
    x = np.sin(lat_rad) * np.cos(lon_rad)
    y = np.sin(lat_rad) * np.sin(lon_rad)
    z = np.cos(lat_rad)
    return np.column_stack([x, y, z])

def build_fy_kdtree():
    """预处理风云坐标并构建KD树"""
    lat_fy, lon_fy = read_coordinate_data()
    valid_mask = (~np.isnan(lat_fy)) & (~np.isnan(lon_fy))
    coords_cart = spherical_to_cartesian(lon_fy[valid_mask], lat_fy[valid_mask])
    return cKDTree(coords_cart), valid_mask, lat_fy, lon_fy

def match_observations(tree, valid_mask, lat_fy, lon_fy, cth_data, cbh_data,
                       cloudsat_lons, cloudsat_lats, cloudsat_times,
                       cloudsat_cth, cloudsat_cbh, radius_km=2.0):
    """执行空间匹配的核心函数"""
    matches = []
    earth_radius = 6371.0
    radius_rad = radius_km / earth_radius
    cs_points = spherical_to_cartesian(cloudsat_lons, cloudsat_lats)
    indices = tree.query_ball_point(cs_points, r=radius_rad)
    valid_indices = np.where(valid_mask)

    for i, (cs_lon, cs_lat, cs_time, cs_cth, cs_cbh) in enumerate(zip(
            cloudsat_lons, cloudsat_lats, cloudsat_times, cloudsat_cth, cloudsat_cbh)):

        if not indices[i]:
            continue

        fy_linear_idx = indices[i][0]
        fy_row, fy_col = valid_indices[0][fy_linear_idx], valid_indices[1][fy_linear_idx]

        fy_lat = lat_fy[fy_row, fy_col]
        fy_lon = lon_fy[fy_row, fy_col]
        fy_cth = cth_data[fy_row, fy_col]
        fy_cbh = cbh_data[fy_row, fy_col]

        matches.append({
            'cloudsat_time': cs_time,
            'cloudsat_lat': cs_lat,
            'cloudsat_lon': cs_lon,
            'cloudsat_cth': cs_cth,
            'cloudsat_cbh': cs_cbh,
            # 'fy_time': fy_time,  # 新增风云数据时间戳
            'fy_lat': fy_lat,
            'fy_lon': fy_lon,
            'fy_cth': fy_cth if not np.isnan(fy_cth) else None,
            'fy_cbh': fy_cbh if not np.isnan(fy_cbh) else None
        })
    return matches

def process_hourly_data(start_datetime, end_datetime):
    """主处理函数（按小时处理）"""
    fy_tree, valid_mask, lat_fy, lon_fy = build_fy_kdtree()
    cloudsat_df = pd.read_csv(CLOUDSAT_CSV, parse_dates=['time'])
    all_matches = []

    current_datetime = start_datetime
    while current_datetime <= end_datetime:
        # 生成风云文件名（示例：2020010100_predicted_2d.nc）
        fy_filename = os.path.join(
            FY_ROOT_DIR,
            current_datetime.strftime("%Y%m%d%H") + "_predicted_2d.nc"
        )

        if not os.path.exists(fy_filename):
            current_datetime += timedelta(hours=1)
            continue

        try:
            cth_data, cbh_data = read_fy_data(fy_filename)
            # 筛选当前小时前15分钟的CloudSat数据
            time_mask = (
                (cloudsat_df['time'] >= current_datetime) &
                (cloudsat_df['time'] < current_datetime + timedelta(minutes=15))
            )
            hourly_data = cloudsat_df[time_mask].copy()

            if not hourly_data.empty:
                matches = match_observations(
                    fy_tree, valid_mask, lat_fy, lon_fy, cth_data, cbh_data,
                    hourly_data['longitude'].values,
                    hourly_data['latitude'].values,
                    hourly_data['time'].values,
                    hourly_data['cloudsat_cth'].values,
                    hourly_data['cloudsat_cbh'].values
                )
                all_matches.extend(matches)
                print(f"Processed {current_datetime.strftime('%Y-%m-%d %H:%M')}: {len(matches)} matches")

        except Exception as e:
            print(f"Error processing {current_datetime}: {str(e)}")

        current_datetime += timedelta(hours=1)

    # 数据过滤及保存
    result_df = pd.DataFrame(all_matches)
    valid_mask = (
        result_df['cloudsat_cbh'].notna() &
        result_df['fy_cbh'].notna()
    )
    filtered_df = result_df[valid_mask]
    filtered_df.to_csv(OUTPUT_CSV, index=False)
    print(f"有效记录保存至 {OUTPUT_CSV}，共 {len(filtered_df)} 条")

if __name__ == "__main__":
    # 设置处理时间范围（精确到小时）
    start_datetime = datetime(2020, 3, 1, 0, 0)  # 起始时间
    end_datetime = datetime(2020, 3, 24, 23, 0)   # 结束时间
    process_hourly_data(start_datetime, end_datetime)