import os
import re
import csv
import h5py
import numpy as np
import pandas as pd
from pyhdf import SD
import netCDF4 as nc
from datetime import datetime
from scipy.spatial import cKDTree
from multiprocessing import Pool
from read_CloudSat import reader


def load_fy_coordinates(coord_file_name):
    """加载风云卫星的地理坐标数据"""
    with nc.Dataset(coord_file_name, 'r') as coord_file:
        lat_fy = coord_file.variables['lat'][:, :].T
        lon_fy = coord_file.variables['lon'][:, :].T
        lat_fy[np.isnan(lat_fy)] = -90.
        lon_fy[np.isnan(lon_fy)] = 360.
    return lat_fy, lon_fy

def process_cloudsat_file(filepath, lida_folder, era5_folder):
    """处理 CloudSat 文件，提取地理数据、云属性、时间范围及云层信息"""
    f = reader(filepath)
    lon_c, lat_c, elv = f.read_geo()
    height = f.read_sds('Height')
    cloud_mask = f.read_sds('CPR_Cloud_mask')
    cloud_radar = f.read_sds('Radar_Reflectivity')

    # 获取对应的LIDAR文件路径，并读取 cloud_fraction 数据
    lida_filename = os.path.basename(filepath).replace('CS_2B-GEOPROF_GRANULE_P1', 'CS_2B-GEOPROF-LIDAR_GRANULE_P2')
    era5_filename = os.path.basename(filepath).replace('CS_2B-GEOPROF_GRANULE_P1', 'CS_ECMWF-AUX_GRANULE_P1')
    lida_filepath = os.path.join(lida_folder, lida_filename)
    era5_filepath = os.path.join(era5_folder, era5_filename)

    # 1. 读取 LIDAR 文件中的 CloudFraction
    # ----------------------------
    cloud_fraction = None
    try:
        lida_reader = reader(lida_filepath)  # 创建 LIDAR reader 实例
        cloud_fraction = lida_reader.read_sds('CloudFraction')
    except Exception as e:
        print(f"Error reading LIDAR file {lida_filepath}: {e}")

    # ----------------------------
    # 2. 读取 ERA5 文件中的温湿压数据
    # ----------------------------
    temp_2m = np.nan * np.ones(len(cloud_mask))  # 初始化默认值
    surface_pressure = np.nan * np.ones(len(cloud_mask))
    surface_specific_humidity = np.nan * np.ones(len(cloud_mask))

    try:
        # 打开 ERA5 文件
        era5_reader = reader(era5_filepath)  # 假设 ERA5 文件有对应的 reader 方法
        # 读取温湿压数据（具体方法需根据 ERA5 文件结构调整）
        temp_2m = np.array(era5_reader.attach_vdata('Temperature_2m'))[:, 0]
        surface_pressure = np.array(era5_reader.attach_vdata('Surface_pressure'))[:, 0]

        # 读取比湿数据
        sd = SD.SD(era5_filepath, SD.SDC.READ)
        specific_humidity = sd.select('Specific_humidity').get()

        # 处理比湿数据
        surface_specific_humidity = []
        for row in specific_humidity:
            for value in reversed(row):
                if not np.isnan(value) and value != -999:
                    surface_specific_humidity.append(value)
                    break
            else:
                surface_specific_humidity.append(np.nan)
        surface_specific_humidity = np.array(surface_specific_humidity)
        sd.end()
    except Exception as e:
        print(f"Error reading ERA5 file {era5_filepath}: {e}")

    # ----------------------------
    # 3. 处理云属性
    # ----------------------------
    time = f.read_time(datetime=True)
    cloudsat_start_time, cloudsat_end_time = time[[0, -1]]
    f.close()

    cloudsat_cth = []
    cloudsat_cbh = []
    cloud_types = []

    for i in range(len(cloud_mask)):
        row_cloud_mask = cloud_mask[i]
        row_cloud_radar = cloud_radar[i]
        row_cloud_fraction = cloud_fraction[i] if cloud_fraction is not None else np.nan  # 检查 cloud_fraction 是否为 None

        radar_condition = row_cloud_radar >= -30
        if np.any(radar_condition):
            mask_gte_20 = row_cloud_mask >= 20
            mask_lt_20 = row_cloud_mask < 20

            valid_gte_20 = np.where(radar_condition & mask_gte_20)[0]
            valid_lt_20 = np.where(radar_condition & mask_lt_20)[0]

            if len(valid_gte_20) > 0:
                col_idx = valid_gte_20[0]
                last_idx = valid_gte_20[-1]

                cth_value = height[i, col_idx] if col_idx < height.shape[1] else np.nan
                cloudsat_cth.append(cth_value)

                cbh_value = height[i, last_idx] if last_idx < height.shape[1] else np.nan
                cloudsat_cbh.append(cbh_value)

                if col_idx < last_idx:
                    between_values = row_cloud_mask[col_idx + 1:last_idx]
                    cloud_type = 1 if np.any(between_values < 20) else 0
                else:
                    cloud_type = 0
                cloud_types.append(cloud_type)

            elif len(valid_lt_20) > 0 and np.any(row_cloud_fraction >= 99):
                fraction_valid = row_cloud_fraction >= 99
                col_idx = np.where(fraction_valid)[0][0]
                last_idx = np.where(fraction_valid)[0][-1]

                cth_value = height[i, col_idx] if col_idx < height.shape[1] else np.nan
                cloudsat_cth.append(cth_value)

                cbh_value = height[i, last_idx] if last_idx < height.shape[1] else np.nan
                cloudsat_cbh.append(cbh_value)

                if col_idx < last_idx:
                    between_values = row_cloud_mask[col_idx + 1:last_idx]
                    cloud_type = 1 if np.any(between_values < 20) else 0
                else:
                    cloud_type = 0
                cloud_types.append(cloud_type)

            else:
                cloudsat_cth.append(np.nan)
                cloudsat_cbh.append(np.nan)
                cloud_types.append(np.nan)

        else:
            cloudsat_cth.append(np.nan)
            cloudsat_cbh.append(np.nan)
            cloud_types.append(np.nan)

    cloudsat_cth = np.array(cloudsat_cth)
    cloudsat_cbh = np.array(cloudsat_cbh)
    cloud_types = np.array(cloud_types)
    return lon_c, lat_c, cloudsat_cth, cloudsat_cbh, cloudsat_start_time, cloudsat_end_time, cloud_types, temp_2m, surface_pressure, surface_specific_humidity


def match_cloudsat_with_fy(cloudsat_data, lat_fy, lon_fy, cth_fy_data, clm_fy_data, clt_fy_data,  ctt_fy_data, ctp_fy_data, olr_fy_data,
                           # sst_fy_data,
                           band_data, start_datetime, threshold=2):
    """匹配 CloudSat 数据与风云卫星数据"""
    lon_c, lat_c, cloudsat_cth, cloudsat_cbh, cloud_types,temp_2m,surface_pressure,surface_specific_humidity = cloudsat_data
    threshold_deg = threshold / 111.32
    valid_mask = (cth_fy_data!= 65535) & (clm_fy_data == 0)
    valid_lat_fy = lat_fy[valid_mask]
    valid_lon_fy = lon_fy[valid_mask]
    valid_cth_fy_data = cth_fy_data[valid_mask]
    valid_clm_fy_data = clm_fy_data[valid_mask]
    valid_clt_fy_data = clt_fy_data[valid_mask]
    # valid_crf_fy_data = crf_fy_data[valid_mask]
    valid_ctt_fy_data = ctt_fy_data[valid_mask]
    valid_ctp_fy_data = ctp_fy_data[valid_mask]
    valid_olr_fy_data = olr_fy_data[valid_mask]
    # valid_sst_fy_data = sst_fy_data[valid_mask]
    valid_band_data = {key: value[valid_mask] for key, value in band_data.items()}

    tree = cKDTree(np.c_[valid_lon_fy.ravel(), valid_lat_fy.ravel()])
    dist, idx = tree.query(np.c_[lon_c, lat_c], k=1, distance_upper_bound=threshold_deg)
    matched_indices = idx[dist!= np.inf]
    matched_distances = dist[dist!= np.inf]
    matched_cloudsat_points = np.arange(len(lat_c))[dist < threshold_deg]

    results = [{
        'cloudsat_idx': c_idx,
        'cloudsat_lat': lat_c[c_idx],
        'cloudsat_lon': lon_c[c_idx],
        'fy_lat': valid_lat_fy.flat[f_idx],
        'fy_lon': valid_lon_fy.flat[f_idx],
        'fy_cth': valid_cth_fy_data.flat[f_idx],
        'fy_clm': valid_clm_fy_data.flat[f_idx],
        'fy_clt': valid_clt_fy_data.flat[f_idx],
        'fy_ctt': valid_ctt_fy_data.flat[f_idx],
        'fy_ctp': valid_ctp_fy_data.flat[f_idx],
        'fy_olr': valid_olr_fy_data.flat[f_idx],
        # 'fy_sst': valid_sst_fy_data.flat[f_idx],
        # 'fy_lpwmid': valid_lpwmid_fy_data.flat[f_idx],
        **{key: value.flat[f_idx] for key, value in valid_band_data.items()},
        'cloudsat_cth': cloudsat_cth[c_idx],
        'cloudsat_cbh': cloudsat_cbh[c_idx],
        'cloudsat_tpye': cloud_types[c_idx],
        'temp_2m':temp_2m[c_idx],
        'surface_pressure':surface_pressure[c_idx],
        'surface_specific_humidity':surface_specific_humidity[c_idx],
        'distance': d,
        'time': start_datetime  # 使用开始时间作为唯一键的一部分
    } for c_idx, f_idx, d in zip(matched_cloudsat_points, matched_indices, matched_distances)]

    closest_matches = {}
    for result in results:
        fy_key = (result['fy_lat'], result['fy_lon'], result['time'])  # 包括时间戳
        if fy_key not in closest_matches or result['distance'] < closest_matches[fy_key]['distance']:
            closest_matches[fy_key] = result

    filtered_results = list(closest_matches.values())

    final_results = [
        result for result in filtered_results
        if result['cloudsat_cth']!= result['cloudsat_cbh'] and
           -1000 <= result['fy_cth'] - result['cloudsat_cth'] <= 1000
    ]
    return final_results


def process_files(file_info):
    cloudsat_filepath, fy_files, lat_fy, lon_fy,lida_folder,era5_folder = file_info
    final_results = []
    try:
        lon_c, lat_c, cloudsat_cth, cloudsat_cbh, cloudsat_start_time, cloudsat_end_time, cloudsat_types,temp_2m,surface_pressure,surface_specific_humidity = process_cloudsat_file(
            cloudsat_filepath,lida_folder,era5_folder)

        for (start_datetime, end_datetime), filepaths in fy_files.items():
            if start_datetime <= cloudsat_end_time and end_datetime >= cloudsat_start_time:
                with nc.Dataset(filepaths['cth'], 'r') as cth_file:
                    cth_data = cth_file.variables['CTH'][:]
                with nc.Dataset(filepaths['clm'], 'r') as clm_file:
                    clm_data = clm_file.variables['CLM'][:]
                with nc.Dataset(filepaths['clt'], 'r') as clt_file:
                    clt_data = clt_file.variables['CLT'][:]
                with nc.Dataset(filepaths['ctt'], 'r') as ctt_file:
                    ctt_data = ctt_file.variables['CTT'][:]
                with nc.Dataset(filepaths['ctp'], 'r') as ctp_file:
                    ctp_data = ctp_file.variables['CTP'][:]
                with nc.Dataset(filepaths['olr'], 'r') as olr_file:
                    olr_data = olr_file.variables['OLR'][:]

                l1_filepath = filepaths['l1']
                if os.path.exists(l1_filepath):
                    img = np.zeros((2748, 2748, 14), dtype=np.float32)
                    NOMNames = [f'NOMChannel{str(i).zfill(2)}' for i in range(1, 15)]
                    CALNames = [f'CALChannel{str(i).zfill(2)}' for i in range(1, 15)]
                    with h5py.File(l1_filepath, 'r') as h5file:
                        for i in range(14):
                            NOMData = h5file[NOMNames[i]][:]
                            CalData = h5file[CALNames[i]][:]
                            # 对 NOMData 进行有效值筛选
                            valid_mask = (NOMData >= 0) & (NOMData < 4096)
                            if i == 6:  # 对应通道 7 的索引是 6
                                valid_mask = (NOMData >= 0) & (NOMData < 65536)
                            TOARefData = np.zeros_like(NOMData, dtype=np.float32)
                            # 获取有效索引
                            indices = np.where(valid_mask)
                            # 使用矢量化操作进行辐射定标
                            index_values = NOMData[indices].astype(int)
                            valid_indices = index_values[(0 <= index_values) & (index_values < len(CalData))]
                            # 赋值
                            TOARefData[indices] = CalData[valid_indices]
                            img[:, :, i] = TOARefData
                    band_data = img[:, :, :14]
                else:
                    band_data = None

                all_bands = {f'band{i + 1}': band_data[:, :, i] for i in range(14)}
                matched_results = match_cloudsat_with_fy(
                    (lon_c, lat_c, cloudsat_cth, cloudsat_cbh, cloudsat_types,temp_2m, surface_pressure, surface_specific_humidity),
                    lat_fy, lon_fy, cth_data, clm_data, clt_data,  ctt_data, ctp_data, olr_data,
                    # sst_data,
                    all_bands, start_datetime
                )
                final_results.extend(matched_results)
    except Exception as e:
        print(f"Error processing file {cloudsat_filepath}: {e}")

    return final_results


def main():
    # 202001,测试数据1天
    # cloudsat_folder = '/mnt/datastore/liudddata/cloudsat_data/cloudsat_GEOPROF/cloudsat_20200101test'
    # lida_folder = '/mnt/datastore/liudddata/cloudsat_data/cloudsat_lida/cloudsat_lida20200101test'
    # era5_folder = '/mnt/datastore/liudddata/cloudsat_data/cloudsat_ecmwf/cloudsat_20200101_ecmwf'
    # base_cth_filepath = '/mnt/datastore/liudddata/fy_4Adata/FY20200101test/fy_cth_20200101'
    # fy_clm_folder = '/mnt/datastore/liudddata/fy_4Adata/FY20200101test/fy_clm_20200101'
    # fy_clt_folder = '/mnt/datastore/liudddata/fy_4Adata/FY20200101test/fy_clt_20200101'
    # fy_ctt_folder = '/mnt/datastore/liudddata/fy_4Adata/FY20200101test/fy_ctt_20200101'
    # fy_ctp_folder = '/mnt/datastore/liudddata/fy_4Adata/FY20200101test/fy_ctp_20200101'
    # fy_olr_folder = '/mnt/datastore/liudddata/fy_4Adata/FY20200101test/fy_olr_20200101'
    # fy_l1_folder = '/mnt/datastore/liudddata/fy_4Adata/FY20200101test/fy_l1_20200101'
    # coord_file_name = 'FY4A_coordinates.nc'


    # 201902——08 共计7个月
    # cloudsat_folder = '/mnt/datastore/liudddata/cloudsat_data/cloudsat_GEOPROF/cloudsat_2019_2_8'
    # lida_folder = '/mnt/datastore/liudddata/cloudsat_data/cloudsat_lida/cloudsat_lida_2019_2_8'
    # era5_folder = '/mnt/datastore/liudddata/cloudsat_data/cloudsat_ecmwf/cloudsat_2_8_ecmwf'
    # base_cth_filepath = '/mnt/datastore/liudddata/fy_4Adata/FY201902_08/fy_cth_2_8'
    # fy_clm_folder = '/mnt/datastore/liudddata/fy_4Adata/FY201902_08/fy_clm_2_8'
    # fy_clt_folder = '/mnt/datastore/liudddata/fy_4Adata/FY201902_08/fy_clt_2_8'
    # fy_ctt_folder = '/mnt/datastore/liudddata/fy_4Adata/FY201902_08/fy_ctt_2_8'
    # fy_ctp_folder = '/mnt/datastore/liudddata/fy_4Adata/FY201902_08/fy_ctp_2_8'
    # fy_olr_folder = '/mnt/datastore/liudddata/fy_4Adata/FY201902_08/fy_olr_2_8'
    # fy_l1_folder = '/mnt/datastore/liudddata/fy_4Adata/FY201902_08/fy_l1_2-8'
    # coord_file_name = 'FY4A_coordinates.nc'

    # 201909-12 共计4个月
    cloudsat_folder = '/mnt/datastore/liudddata/cloudsat_data/cloudsat_GEOPROF/cloudsat_2019_9_12'
    lida_folder = '/mnt/datastore/liudddata/cloudsat_data/cloudsat_lida/cloudsat_lida_2019_9_12'
    era5_folder = '/mnt/datastore/liudddata/cloudsat_data/cloudsat_ecmwf/cloudsat_9_12_ecmwf'
    base_cth_filepath = '/mnt/datastore/liudddata/fy_4Adata/FY201909_12/fy_cth_9_12'
    fy_clm_folder = '/mnt/datastore/liudddata/fy_4Adata/FY201909_12/fy_clm_9_12'
    fy_clt_folder = '/mnt/datastore/liudddata/fy_4Adata/FY201909_12/fy_clt_9_12'
    fy_ctt_folder = '/mnt/datastore/liudddata/fy_4Adata/FY201909_12/fy_ctt_9_12'
    fy_ctp_folder = '/mnt/datastore/liudddata/fy_4Adata/FY201909_12/fy_ctp_9_12'
    fy_olr_folder = '/mnt/datastore/liudddata/fy_4Adata/FY201909_12/fy_olr_9_12'
    fy_l1_folder = '/mnt/datastore/liudddata/fy_4Adata/FY201909_12/fy_L1_9_12'
    coord_file_name = 'FY4A_coordinates.nc'

    # 202001-04 共计4个月
    # cloudsat_folder = '/mnt/datastore/liudddata/cloudsat_data/cloudsat_GEOPROF/cloudsat_2020_1_4'
    # lida_folder = '/mnt/datastore/liudddata/cloudsat_data/cloudsat_lida/cloudsat_lida_2020_1_4'
    # era5_folder = '/mnt/datastore/liudddata/cloudsat_data/cloudsat_ecmwf/cloudsat_202001_04_ecmwf'
    # base_cth_filepath = '/mnt/datastore/liudddata/fy_4Adata/FY202001_04/fy_cth_202001_04'
    # fy_clm_folder = '/mnt/datastore/liudddata/fy_4Adata/FY202001_04/fy_clm_202001_04'
    # fy_clt_folder = '/mnt/datastore/liudddata/fy_4Adata/FY202001_04/fy_clt_202001_04'
    # fy_ctt_folder = '/mnt/datastore/liudddata/fy_4Adata/FY202001_04/fy_ctt_202001_04'
    # fy_ctp_folder = '/mnt/datastore/liudddata/fy_4Adata/FY202001_04/fy_ctp_202001_04'
    # fy_olr_folder = '/mnt/datastore/liudddata/fy_4Adata/FY202001_04/fy_olr_202001_04'
    # fy_l1_folder = '/mnt/datastore/liudddata/fy_4Adata/FY202001_04/fy_l1_202001_04'
    # coord_file_name = 'FY4A_coordinates.nc'


    lat_fy, lon_fy = load_fy_coordinates(coord_file_name)
    final_results = []

    fy_files = {}
    for fy_filename in os.listdir(base_cth_filepath):
        if not (fy_filename.endswith('.NC') or fy_filename.endswith('.HDF')):
            continue
        start_datetime, end_datetime = [datetime.strptime(x, "%Y%m%d%H%M%S") for x in fy_filename.split('_')[9:11]]
        fy_files[(start_datetime, end_datetime)] = {
            'cth': os.path.join(base_cth_filepath, fy_filename),
            'clm': os.path.join(fy_clm_folder, fy_filename.replace('L2-_CTH', 'L2-_CLM')),
            'clt': os.path.join(fy_clt_folder, fy_filename.replace('L2-_CTH', 'L2-_CLT')),
            'ctt': os.path.join(fy_ctt_folder, fy_filename.replace('L2-_CTH', 'L2-_CTT')),
            'ctp': os.path.join(fy_ctp_folder, fy_filename.replace('L2-_CTH', 'L2-_CTP')),
            'olr': os.path.join(fy_olr_folder, fy_filename.replace('L2-_CTH', 'L2-_OLR')),
            'l1': os.path.join(fy_l1_folder, fy_filename.replace('L2-_CTH', 'L1-_FDI').replace('.NC', '.HDF'))
        }

    file_info_list = [(os.path.join(cloudsat_folder, cloudsat_filename), fy_files, lat_fy, lon_fy,lida_folder,era5_folder) for cloudsat_filename in
                      os.listdir(cloudsat_folder)]

    with Pool() as pool:
        results = pool.map(process_files, file_info_list)

    for result in results:
        final_results.extend(result)

    df = pd.DataFrame(final_results)
    csv_file_path = 'match_time2019_09_12_2025.csv'
    df.to_csv(csv_file_path, index=False)


if __name__ == '__main__':
    main()