import h5py
import numpy as np
import pandas as pd
import netCDF4 as nc
from global_land_mask import globe
import multiprocessing
from datetime import datetime
from sklearn.preprocessing import StandardScaler
import joblib
import os

# ==== 全局配置 ====
# 文件路径配置
FILE_PATHS = {
    'fy4a': {
        'hdf': r"/mnt/datastore/liudddata/fy_4Adata/FY20200101test/fy_l1_20200101/FY4A-_AGRI--_N_DISK_1047E_L1-_FDI-_MULT_NOM_20200101000000_20200101001459_4000M_V0001.HDF",
        'cth': r"/mnt/datastore/liudddata/fy_4Adata/FY20200101test/fy_cth_20200101/FY4A-_AGRI--_N_DISK_1047E_L2-_CTH-_MULT_NOM_20200101000000_20200101001459_4000M_V0001.NC",
        'ctt': r"/mnt/datastore/liudddata/fy_4Adata/FY20200101test/fy_ctt_20200101/FY4A-_AGRI--_N_DISK_1047E_L2-_CTT-_MULT_NOM_20200101000000_20200101001459_4000M_V0001.NC",
        'ctp': r"/mnt/datastore/liudddata/fy_4Adata/FY20200101test/fy_ctp_20200101/FY4A-_AGRI--_N_DISK_1047E_L2-_CTP-_MULT_NOM_20200101000000_20200101001459_4000M_V0001.NC",
        'clt': r"/mnt/datastore/liudddata/fy_4Adata/FY20200101test/fy_clt_20200101/FY4A-_AGRI--_N_DISK_1047E_L2-_CLT-_MULT_NOM_20200101000000_20200101001459_4000M_V0001.NC",
        'olr': r"/mnt/datastore/liudddata/fy_4Adata/FY20200101test/fy_olr_20200101/FY4A-_AGRI--_N_DISK_1047E_L2-_OLR-_MULT_NOM_20200101000000_20200101001459_4000M_V0001.NC",
        'clm': r"/mnt/datastore/liudddata/fy_4Adata/FY20200101test/fy_clm_20200101/FY4A-_AGRI--_N_DISK_1047E_L2-_CLM-_MULT_NOM_20200101000000_20200101001459_4000M_V0001.NC"
    },
    'era5': r'/home/liudd/data_preprocessing/ERA5_surface_vars_FY4A_grid_v2.nc',
    'coord': '/home/liudd/data_preprocessing/FY4A_coordinates.nc',
    'scalers': {
        2: "/home/liudd/deeplearing/backup/scaler2/",
        3: "/home/liudd/deeplearing/backup/scaler3/",
        4: "/home/liudd/deeplearing/backup/scaler4/",
        5: "/home/liudd/deeplearing/backup/scaler5/",
        6: "/home/liudd/deeplearing/backup/scaler6/"
    }
}

# 判断某一行数据是海洋还是陆地
def is_land_row(row):
    lat = row['fy_lat']
    lon = row['fy_lon']
    if lon > 180:
        lon -= 360
    return globe.is_land(lat, lon)

# 分批处理数据：将数据按海洋和陆地分开
def process_chunk(chunk):
    land_rows = []
    ocean_rows = []
    for _, row in chunk.iterrows():
        if is_land_row(row):
            land_rows.append(row)
        else:
            ocean_rows.append(row)
    return pd.DataFrame(land_rows), pd.DataFrame(ocean_rows)

# 处理FY4A卫星数据
def process_fy4a_data():
    try:
        # 加载数据
        cth = nc.Dataset(FILE_PATHS['fy4a']['cth'], 'r')
        cth_data = cth.variables['CTH'][:]

        ctt = nc.Dataset(FILE_PATHS['fy4a']['ctt'], 'r')
        ctt_data = ctt.variables['CTT'][:]

        ctp = nc.Dataset(FILE_PATHS['fy4a']['ctp'], 'r')
        ctp_data = ctp.variables['CTP'][:]

        clt = nc.Dataset(FILE_PATHS['fy4a']['clt'], 'r')
        clt_data = clt.variables['CLT'][:]

        olr = nc.Dataset(FILE_PATHS['fy4a']['olr'], 'r')
        olr_data = olr.variables['OLR'][:]

        clm = nc.Dataset(FILE_PATHS['fy4a']['clm'], 'r')
        clm_data = clm.variables['CLM'][:]

        coord_file_open = nc.Dataset(FILE_PATHS['coord'], 'r')
        lat = coord_file_open.variables['lat'][:, :].T
        lon = coord_file_open.variables['lon'][:, :].T

        start_datetime, end_datetime = [datetime.strptime(x, "%Y%m%d%H%M%S") for x in FILE_PATHS['fy4a']['cth'].split('_')[12:14]]

        # 读取 HDF 文件中的数据
        NOMNames = [f'NOMChannel{str(i).zfill(2)}' for i in range(1, 15)]
        CALNames = [f'CALChannel{str(i).zfill(2)}' for i in range(1, 15)]
        img = np.zeros((2748, 2748, 14), dtype=np.float32)

        with h5py.File(FILE_PATHS['fy4a']['hdf'], 'r') as h5file:
            for i in range(14):
                NOMData = h5file[NOMNames[i]][:]
                CalData = h5file[CALNames[i]][:]
                valid_mask = (NOMData >= 0) & (NOMData < 4096)
                if i == 6:
                    valid_mask = (NOMData >= 0) & (NOMData < 65536)
                TOARefData = np.zeros_like(NOMData, dtype=np.float32)
                indices = np.where(valid_mask)
                index_values = NOMData[indices].astype(int)
                valid_indices = index_values[(0 <= index_values) & (index_values < len(CalData))]
                TOARefData[indices] = CalData[valid_indices]
                img[:, :, i] = TOARefData

        # 将数据整理成 DataFrame
        rows, cols = np.indices(img.shape[:2])

        data = {
            'fy_lat': lat.flatten(),
            'fy_lon': lon.flatten(),
            'fy_cth': cth_data.flatten(),
            'fy_ctt': ctt_data.flatten(),
            'fy_ctp': ctp_data.flatten(),
            'fy_clt': clt_data.flatten(),
            'fy_olr': olr_data.flatten(),
            'fy_clm': clm_data.flatten(),
            'fy_time': start_datetime,
        }

        # 添加每个通道的数据
        for i in range(14):
            data[f'band{i + 1}'] = img[:, :, i].flatten()

        # 创建 DataFrame
        df = pd.DataFrame(data).dropna()

        # 分批处理数据，避免内存问题
        batch_size = 1000  # 每批处理1000行数据
        num_processes = multiprocessing.cpu_count()  # 使用所有可用的CPU核心
        pool = multiprocessing.Pool(processes=num_processes)  # 创建进程池
        results = []

        # 分批处理
        for start in range(0, len(df), batch_size):
            end = start + batch_size
            chunk = df.iloc[start:end]
            results.append(pool.apply_async(process_chunk, args=(chunk,)))

        # 等待所有进程完成
        land_data = []
        ocean_data = []

        for result in results:
            land_df_chunk, ocean_df_chunk = result.get()
            land_data.append(land_df_chunk)
            ocean_data.append(ocean_df_chunk)

        # 关闭进程池
        pool.close()
        pool.join()

        # 将结果合并
        land_df = pd.concat(land_data, ignore_index=True)
        ocean_df = pd.concat(ocean_data, ignore_index=True)

        return land_df, ocean_df
    except Exception as e:
        print(f"处理FY4A数据时出错: {e}")
        return None, None

# 处理ERA5气象数据
def process_era5_data():
    try:
        ds = nc.Dataset(FILE_PATHS['era5'], 'r')

        # 假设2米温度变量名为 't2m'，根据实际情况修改
        temp_2m = ds['t2m'][:]
        surface_pressure = ds.variables['sp'][:]
        surface_specific_humidity = ds.variables['q_surface'][:]
        lon = ds['lon']
        lat = ds['lat']

        # 创建一个DataFrame来存储数据
        data = {
            'longitude': lon[:].flatten(),
            'latitude': lat[:].flatten(),
            'temp_2m': temp_2m.flatten(),
            'surface_specific_humidity': surface_specific_humidity.flatten(),
            'surface_pressure': surface_pressure.flatten()
        }
        df = pd.DataFrame(data)

        # 过滤掉包含NaN值的行
        df.dropna(inplace=True)

        return df
    except Exception as e:
        print(f"处理ERA5数据时出错: {e}")
        return None

# 数据整合函数
def integrate_data(sat_df, era5_df):
    """
    整合卫星数据与ERA5气象数据
    输入参数：
    sat_df : 卫星数据DataFrame (land_df 或 ocean_df)
    era5_df : ERA5气象数据DataFrame
    """
    try:
        # 合并数据（基于经纬度精确匹配）
        merged_df = pd.merge(
            sat_df,
            era5_df[['longitude', 'latitude', 'temp_2m', 'surface_pressure', 'surface_specific_humidity']],
            left_on=['fy_lon', 'fy_lat'],
            right_on=['longitude', 'latitude'],
            how='inner'
        )

        # 数据清洗
        merged_df = merged_df.dropna()
        merged_df = merged_df.loc[
            (merged_df['fy_lon'].between(-180, 180)) &
            (merged_df['fy_lat'].between(-90, 90))
        ]

        # 可以选择删除多余的列（如果需要）
        merged_df = merged_df.drop(columns=['longitude', 'latitude'])

        return merged_df
    except Exception as e:
        print(f"整合数据时出错: {e}")
        return None

# 加载指定云类型的标准化模型
def load_scalers(cloud_type):
    scaler_dir = FILE_PATHS['scalers'].get(cloud_type)
    if not scaler_dir or not os.path.exists(scaler_dir):
        return None

    scalers = {}
    file_suffix = f"_scaler{int(cloud_type)}.pkl"
    for file in os.listdir(scaler_dir):
        if file.endswith(file_suffix):
            column = file.replace(file_suffix, "")
            scaler_path = os.path.join(scaler_dir, file)
            scalers[column] = joblib.load(scaler_path)
    return scalers

# 执行标准化处理
def standardize_data(df):
    """执行标准化处理"""
    # 定义需要标准化的列
    columns_to_scale = ['fy_cth', 'fy_ctt', 'fy_ctp', 'fy_olr',
                        'temp_2m', 'surface_pressure', 'surface_specific_humidity']
    band_columns = [f'band{i}' for i in range(1, 7)]

    processed_dfs = []
    for cloud_type, group in df.groupby('fy_clt'):
        scalers = load_scalers(cloud_type)
        if not scalers:
            print(f"未找到云类型 {cloud_type} 对应的标准化拟合器，跳过该组数据。")
            continue

        # 执行标准化
        missing_columns = []
        for col in columns_to_scale + band_columns:
            if col not in group.columns:
                print(f"警告：数据框中不存在 {col} 列，跳过该列。")
                continue
            if col not in scalers:
                missing_columns.append(col)
                continue
            scaler = scalers[col]
            group[f'{col}_scaled'] = scaler.transform(group[[col]])
        if missing_columns:
            print(f"云类型 {cloud_type} 缺失以下列的标准化拟合器：{missing_columns}")

        # 添加周期性特征
        group['fy_lat_sin'] = np.sin(np.radians(group['fy_lat']))
        group['fy_lat_cos'] = np.cos(np.radians(group['fy_lat']))
        group['fy_lon_sin'] = np.sin(np.radians(group['fy_lon']))
        group['fy_lon_cos'] = np.cos(np.radians(group['fy_lon']))

        processed_dfs.append(group)

    return pd.concat(processed_dfs) if processed_dfs else None

# ==== 主程序 ====
def main():
    # 步骤1：处理卫星数据
    land_df, ocean_df = process_fy4a_data()

    if land_df is not None and ocean_df is not None:
        # 步骤2：处理ERA5数据
        era5_df = process_era5_data()

        if era5_df is not None:
            # 步骤3：数据融合
            merged_df = integrate_data(ocean_df, era5_df)  # 只处理海洋数据

            if merged_df is not None:
                # 步骤4：数据标准化
                standardized_df = standardize_data(merged_df)
                if standardized_df is not None:
                    standardized_df.to_csv('2020010100standardized_ocean_data.csv', index=False)
                    print("标准化数据已保存")
                else:
                    print("标准化处理失败")
            else:
                print("数据融合失败")
        else:
            print("ERA5数据处理失败，未进行整合。")
    else:
        print("FY4A数据处理失败，未进行后续操作。")

if __name__ == "__main__":
    main()