import os
import csv
import h5py
import numpy as np
import pandas as pd
import netCDF4 as nc
from datetime import datetime
from scipy.spatial import cKDTree
from multiprocessing import Pool
from read_CloudSat import reader

def load_fy_coordinates(coord_file_name):
    """加载风云卫星的地理坐标数据"""
    with nc.Dataset(coord_file_name, 'r') as coord_file:
        lat_fy = coord_file.variables['lat'][:, :].T
        lon_fy = coord_file.variables['lon'][:, :].T
        lat_fy[np.isnan(lat_fy)] = -90.
        lon_fy[np.isnan(lon_fy)] = 360.
    return lat_fy, lon_fy

def process_cloudsat_file(filepath):
    """Processes a CloudSat file to extract geographical data, cloud properties, and time range."""
    f = reader(filepath)
    lon_c, lat_c, elv = f.read_geo()
    height = f.read_sds('Height')
    cloud_mask = f.read_sds('CPR_Cloud_mask')
    time = f.read_time(datetime=True)
    cloudsat_start_time, cloudsat_end_time = time[[0, -1]]
    f.close()

    cloudsat_cth = []
    cloudsat_cbh = []
    cloud_types = []

    for i, row_data in enumerate(cloud_mask):
        if np.any(row_data >= 20):
            col_idx = np.argmax(row_data >= 20)
            last_idx = np.where(row_data >= 20)[0][-1]

            cth_value = height[i, col_idx] if col_idx < height.shape[1] else np.nan
            cloudsat_cth.append(cth_value)

            cbh_value = height[i, last_idx] if last_idx < height.shape[1] else np.nan
            cloudsat_cbh.append(cbh_value)

            if col_idx < last_idx:
                between_values = row_data[col_idx + 1:last_idx]
                cloud_type = 1 if np.any(between_values < 20) else 0
            else:
                cloud_type = 0
            cloud_types.append(cloud_type)
        else:
            cloudsat_cth.append(np.nan)
            cloudsat_cbh.append(np.nan)
            cloud_types.append(np.nan)

    cloudsat_cth = np.array(cloudsat_cth)
    cloudsat_cbh = np.array(cloudsat_cbh)
    cloud_types = np.array(cloud_types)

    return lon_c, lat_c, cloudsat_cth, cloudsat_cbh, cloudsat_start_time, cloudsat_end_time, cloud_types

def match_cloudsat_with_fy(cloudsat_data, lat_fy, lon_fy, cth_fy_data, clm_fy_data, clt_fy_data, crf_fy_data, ctt_fy_data, ctp_fy_data, olr_fy_data, lpwmid_fy_data, lpwlow_fy_data,
                           band_data, start_datetime, threshold=2):
    """匹配 CloudSat 数据与风云卫星数据"""
    lon_c, lat_c, cloudsat_cth, cloudsat_cbh, cloud_types = cloudsat_data
    threshold_deg = threshold / 111.32
    valid_mask = (cth_fy_data!= 65535) & (clm_fy_data == 0)
    valid_lat_fy = lat_fy[valid_mask]
    valid_lon_fy = lon_fy[valid_mask]
    valid_cth_fy_data = cth_fy_data[valid_mask]
    valid_clm_fy_data = clm_fy_data[valid_mask]
    valid_clt_fy_data = clt_fy_data[valid_mask]
    valid_crf_fy_data = crf_fy_data[valid_mask]
    valid_ctt_fy_data = ctt_fy_data[valid_mask]
    valid_ctp_fy_data = ctp_fy_data[valid_mask]
    valid_olr_fy_data = olr_fy_data[valid_mask]
    valid_lpwlow_fy_data = lpwlow_fy_data[valid_mask]
    valid_lpwmid_fy_data = lpwmid_fy_data[valid_mask]
    valid_band_data = {key: value[valid_mask] for key, value in band_data.items()}

    tree = cKDTree(np.c_[valid_lon_fy.ravel(), valid_lat_fy.ravel()])
    dist, idx = tree.query(np.c_[lon_c, lat_c], k=1, distance_upper_bound=threshold_deg)
    matched_indices = idx[dist!= np.inf]
    matched_distances = dist[dist!= np.inf]
    matched_cloudsat_points = np.arange(len(lat_c))[dist < threshold_deg]

    results = [{
        'cloudsat_idx': c_idx,
        'cloudsat_lat': lat_c[c_idx],
        'cloudsat_lon': lon_c[c_idx],
        'fy_lat': valid_lat_fy.flat[f_idx],
        'fy_lon': valid_lon_fy.flat[f_idx],
        'fy_cth': valid_cth_fy_data.flat[f_idx],
        'fy_clm': valid_clm_fy_data.flat[f_idx],
        'fy_clt': valid_clt_fy_data.flat[f_idx],
        'fy_crf': valid_crf_fy_data.flat[f_idx],
        'fy_ctt': valid_ctt_fy_data.flat[f_idx],
        'fy_ctp': valid_ctp_fy_data.flat[f_idx],
        'fy_olr': valid_olr_fy_data.flat[f_idx],
        'fy_lpwlow': valid_lpwlow_fy_data.flat[f_idx],
        'fy_lpwmid': valid_lpwmid_fy_data.flat[f_idx],
        **{key: value.flat[f_idx] for key, value in valid_band_data.items()},
        'cloudsat_cth': cloudsat_cth[c_idx],
        'cloudsat_cbh': cloudsat_cbh[c_idx],
        'cloudsat_tpye': cloud_types[c_idx],
        'distance': d,
        'time': start_datetime  # 使用开始时间作为唯一键的一部分
    } for c_idx, f_idx, d in zip(matched_cloudsat_points, matched_indices, matched_distances)]

    closest_matches = {}
    for result in results:
        fy_key = (result['fy_lat'], result['fy_lon'], result['time'])  # 包括时间戳
        if fy_key not in closest_matches or result['distance'] < closest_matches[fy_key]['distance']:
            closest_matches[fy_key] = result

    filtered_results = list(closest_matches.values())

    final_results = [
        result for result in filtered_results
        if result['cloudsat_cth']!= result['cloudsat_cbh'] and
           -1000 <= result['fy_cth'] - result['cloudsat_cth'] <= 1000
    ]
    return final_results

def process_files(file_info):
    cloudsat_filepath, fy_files, lat_fy, lon_fy = file_info
    final_results = []
    try:
        lon_c, lat_c, cloudsat_cth, cloudsat_cbh, cloudsat_start_time, cloudsat_end_time, cloudsat_types = process_cloudsat_file(cloudsat_filepath)

        for (start_datetime, end_datetime), filepaths in fy_files.items():
            if start_datetime <= cloudsat_end_time and end_datetime >= cloudsat_start_time:
                with nc.Dataset(filepaths['cth'], 'r') as cth_file:
                    cth_data = cth_file.variables['CTH'][:]
                with nc.Dataset(filepaths['clm'], 'r') as clm_file:
                    clm_data = clm_file.variables['CLM'][:]
                with nc.Dataset(filepaths['clt'], 'r') as clt_file:
                    clt_data = clt_file.variables['CLT'][:]
                with nc.Dataset(filepaths['crf'], 'r') as crf_file:
                    crf_data = crf_file.variables['CFR'][:]
                with nc.Dataset(filepaths['ctt'], 'r') as ctt_file:
                    ctt_data = ctt_file.variables['CTT'][:]
                with nc.Dataset(filepaths['ctp'], 'r') as ctp_file:
                    ctp_data = ctp_file.variables['CTP'][:]
                with nc.Dataset(filepaths['olr'], 'r') as olr_file:
                    olr_data = olr_file.variables['OLR'][:]
                with nc.Dataset(filepaths['lpw'], 'r') as lpw_file:
                    lpwmid_data = lpw_file.variables['LPW_MID'][:]
                    lpwlow_data = lpw_file.variables['LPW_LOW'][:]

                l1_filepath = filepaths['l1']
                if os.path.exists(l1_filepath):
                    img = np.zeros((2748, 2748, 14), dtype=np.float32)
                    NOMNames = [f'NOMChannel{str(i).zfill(2)}' for i in range(1, 15)]
                    CALNames = [f'CALChannel{str(i).zfill(2)}' for i in range(1, 15)]
                    with h5py.File(l1_filepath, 'r') as h5file:
                        for i in range(14):
                            NOMData = h5file[NOMNames[i]][:]
                            CalData = h5file[CALNames[i]][:]
                            # 对 NOMData 进行有效值筛选
                            valid_mask = (NOMData >= 0) & (NOMData < 4096)
                            if i == 6:  # 对应通道 7 的索引是 6
                                valid_mask = (NOMData >= 0) & (NOMData < 65536)
                            TOARefData = np.zeros_like(NOMData, dtype=np.float32)
                            # 获取有效索引
                            indices = np.where(valid_mask)
                            # 使用矢量化操作进行辐射定标
                            index_values = NOMData[indices].astype(int)
                            valid_indices = index_values[(0 <= index_values) & (index_values < len(CalData))]
                            # 赋值
                            TOARefData[indices] = CalData[valid_indices]
                            img[:, :, i] = TOARefData
                    band_data = img[:, :, :14]
                else:
                    band_data = None

                all_bands = {f'band{i + 1}': band_data[:, :, i] for i in range(14)}
                matched_results = match_cloudsat_with_fy(
                    (lon_c, lat_c, cloudsat_cth, cloudsat_cbh, cloudsat_types),
                    lat_fy, lon_fy, cth_data, clm_data, clt_data, crf_data, ctt_data, ctp_data, olr_data, lpwmid_data, lpwlow_data,
                    all_bands, start_datetime
                )
                final_results.extend(matched_results)
    except Exception as e:
        print(f"Error processing file {cloudsat_filepath}: {e}")

    return final_results

def main():
    cloudsat_folder = '/mnt/space2/liudd/cloudsatdate'
    base_cth_filepath = '/mnt/space2/liudd/fy_cth_2_8'
    fy_clm_folder = '/mnt/space2/liudd/fy_clm_2_8'
    fy_clt_folder = '/mnt/space2/liudd/fy_clt_2_8'
    fy_crf_folder = '/mnt/space2/liudd/fy_cfr_2_8'
    fy_ctt_folder = '/mnt/space2/liudd/fy_ctt_2_8'
    fy_ctp_folder = '/mnt/space2/liudd/fy_ctp_2_8'
    fy_olr_folder = '/mnt/space2/liudd/fy_olr_2_8'
    fy_lpw_folder = '/mnt/space2/liudd/fy_lpw_2_8'
    fy_l1_folder = '/mnt/space2/liudd/fy4aL1'
    coord_file_name = 'FY4A_coordinates.nc'

    lat_fy, lon_fy = load_fy_coordinates(coord_file_name)
    final_results = []

    fy_files = {}
    for fy_filename in os.listdir(base_cth_filepath):
        if not (fy_filename.endswith('.NC') or fy_filename.endswith('.HDF')):
            continue
        start_datetime, end_datetime = [datetime.strptime(x, "%Y%m%d%H%M%S") for x in fy_filename.split('_')[9:11]]
        fy_files[(start_datetime, end_datetime)] = {
            'cth': os.path.join(base_cth_filepath, fy_filename),
            'clm': os.path.join(fy_clm_folder, fy_filename.replace('L2-_CTH', 'L2-_CLM')),
            'clt': os.path.join(fy_clt_folder, fy_filename.replace('L2-_CTH', 'L2-_CLT')),
            'crf': os.path.join(fy_crf_folder, fy_filename.replace('L2-_CTH', 'L2-_CFR')),
            'ctt': os.path.join(fy_ctt_folder, fy_filename.replace('L2-_CTH', 'L2-_CTT')),
            'ctp': os.path.join(fy_ctp_folder, fy_filename.replace('L2-_CTH', 'L2-_CTP')),
            'olr': os.path.join(fy_olr_folder, fy_filename.replace('L2-_CTH', 'L2-_OLR')),
            'lpw': os.path.join(fy_lpw_folder, fy_filename.replace('L2-_CTH', 'L2-_LPW')),
            'l1': os.path.join(fy_l1_folder, fy_filename.replace('L2-_CTH', 'L1-_FDI').replace('.NC', '.HDF'))
        }

    file_info_list = [(os.path.join(cloudsat_folder, cloudsat_filename), fy_files, lat_fy, lon_fy) for cloudsat_filename in os.listdir(cloudsat_folder)]

    with Pool() as pool:
        results = pool.map(process_files, file_info_list)

    for result in results:
        final_results.extend(result)

    df = pd.DataFrame(final_results)
    csv_file_path = 'match_time201902_08_L1_10.25.csv'
    df.to_csv(csv_file_path, index=False)

if __name__ == '__main__':
    main()