import xarray as xr
import h5py
import numpy as np
import pandas as pd
import netCDF4 as nc
from global_land_mask import globe
import multiprocessing
from datetime import datetime, timedelta
from sklearn.preprocessing import StandardScaler
import joblib
import os
from tqdm import tqdm  # 用于显示进度条

# ==== 全局配置 ====
BASE_DATE = "20200101"  # 处理的基准日期
OUTPUT_BASE = "/mnt/datastore/liudddata/processed"  # 输出文件基础路径

FILE_PATHS = {
    'fy4a': {
        'base_dir': "/mnt/datastore/liudddata/FY20200101test",
        'var_templates': {
            'hdf': "fy_l1_{date}/FY4A-_AGRI--_N_DISK_1047E_L1-_FDI-_MULT_NOM_{start}_{end}_4000M_V0001.HDF",
            'cth': "fy_cth_{date}/FY4A-_AGRI--_N_DISK_1047E_L2-_CTH-_MULT_NOM_{start}_{end}_4000M_V0001.NC",
            'ctt': "fy_ctt_{date}/FY4A-_AGRI--_N_DISK_1047E_L2-_CTT-_MULT_NOM_{start}_{end}_4000M_V0001.NC",
            'ctp': "fy_ctp_{date}/FY4A-_AGRI--_N_DISK_1047E_L2-_CTP-_MULT_NOM_{start}_{end}_4000M_V0001.NC",
            'clt': "fy_clt_{date}/FY4A-_AGRI--_N_DISK_1047E_L2-_CLT-_MULT_NOM_{start}_{end}_4000M_V0001.NC",
            'olr': "fy_olr_{date}/FY4A-_AGRI--_N_DISK_1047E_L2-_OLR-_MULT_NOM_{start}_{end}_4000M_V0001.NC",
            'clm': "fy_clm_{date}/FY4A-_AGRI--_N_DISK_1047E_L2-_CLM-_MULT_NOM_{start}_{end}_4000M_V0001.NC"
        }
    },
    'era5': {
        'base_dir': "/mnt/datastore/liudddata/FY20200101test/ERA5_{date}",
        'template': "ERA5_{date}{hour:02d}_surface_vars_FY4A_grid.nc"
    },
    'coord': '/home/liudd/data_preprocessing/FY4A_coordinates.nc',
    'scalers': {
        2: "/home/liudd/deeplearing/train/scaler2",
        3: "/home/liudd/deeplearing/train/scaler3/",
        4: "/home/liudd/deeplearing/train/scaler4/",
        5: "/home/liudd/deeplearing/train/scaler5/",
        6: "/home/liudd/deeplearing/train/scaler6/",
        7: "/home/liudd/deeplearing/train/scaler7/"
    }
}

# ==== 时间处理函数 ====
def generate_hourly_intervals(date_str):
    """生成整点时间间隔（每个整点后的15分钟数据）"""
    base_date = datetime.strptime(date_str, "%Y%m%d")
    return [
        (
            base_date + timedelta(hours=hour),
            base_date + timedelta(hours=hour, minutes=14, seconds=59)
        ) for hour in range(24)
    ]

# ==== 路径生成函数 ====
def get_fy4a_paths(start_time, end_time):
    """生成FY4A各变量文件路径"""
    date_str = start_time.strftime("%Y%m%d")
    time_params = {
        "date": date_str,
        "start": start_time.strftime("%Y%m%d%H%M%S"),
        "end": end_time.strftime("%Y%m%d%H%M%S")
    }

    paths = {}
    for var, template in FILE_PATHS['fy4a']['var_templates'].items():
        full_path = os.path.join(
            FILE_PATHS['fy4a']['base_dir'],
            template.format(**time_params)
        )
        paths[var] = full_path
    return paths

def get_era5_path(hour):
    """生成ERA5文件路径"""
    return os.path.join(
        FILE_PATHS['era5']['base_dir'].format(date=BASE_DATE),
        FILE_PATHS['era5']['template'].format(date=BASE_DATE, hour=hour)
    )

# 判断某一行数据是海洋还是陆地
def is_land_row(row):
    lat = row['fy_lat']
    lon = row['fy_lon']
    if lon > 180:
        lon -= 360
    return globe.is_land(lat, lon)

# 分批处理数据：将数据按海洋和陆地分开
def process_chunk(chunk):
    land_rows = []
    ocean_rows = []
    for _, row in chunk.iterrows():
        if is_land_row(row):
            land_rows.append(row)
        else:
            ocean_rows.append(row)
    return pd.DataFrame(land_rows), pd.DataFrame(ocean_rows)

# ==== 处理函数改进 ====
def process_fy4a_data(start_time, end_time):
    """处理指定时间段的FY4A数据"""
    try:
        # 获取文件路径
        paths = get_fy4a_paths(start_time, end_time)

        # 检查所有必需文件是否存在
        required_files = ['hdf', 'cth', 'ctt', 'ctp', 'clt', 'olr', 'clm']
        for f in required_files:
            if not os.path.exists(paths[f]):
                print(f"文件不存在: {paths[f]}")
                return None, None

        # 加载数据
        cth = nc.Dataset(paths['cth'], 'r')
        cth_data = cth.variables['CTH'][:]

        ctt = nc.Dataset(paths['ctt'], 'r')
        ctt_data = ctt.variables['CTT'][:]

        ctp = nc.Dataset(paths['ctp'], 'r')
        ctp_data = ctp.variables['CTP'][:]

        clt = nc.Dataset(paths['clt'], 'r')
        clt_data = clt.variables['CLT'][:]

        olr = nc.Dataset(paths['olr'], 'r')
        olr_data = olr.variables['OLR'][:]

        clm = nc.Dataset(paths['clm'], 'r')
        clm_data = clm.variables['CLM'][:]

        coord_file_open = nc.Dataset(FILE_PATHS['coord'], 'r')
        lat = coord_file_open.variables['lat'][:, :].T
        lon = coord_file_open.variables['lon'][:, :].T

        # 读取 HDF 文件中的数据
        NOMNames = [f'NOMChannel{str(i).zfill(2)}' for i in range(1, 15)]
        CALNames = [f'CALChannel{str(i).zfill(2)}' for i in range(1, 15)]
        img = np.zeros((2748, 2748, 14), dtype=np.float32)

        with h5py.File(paths['hdf'], 'r') as h5file:
            for i in range(14):
                NOMData = h5file[NOMNames[i]][:]
                CalData = h5file[CALNames[i]][:]
                valid_mask = (NOMData >= 0) & (NOMData < 4096)
                if i == 6:
                    valid_mask = (NOMData >= 0) & (NOMData < 65536)
                TOARefData = np.zeros_like(NOMData, dtype=np.float32)
                indices = np.where(valid_mask)
                index_values = NOMData[indices].astype(int)
                valid_indices = index_values[(0 <= index_values) & (index_values < len(CalData))]
                TOARefData[indices] = CalData[valid_indices]
                img[:, :, i] = TOARefData

        # 将数据整理成 DataFrame
        rows, cols = np.indices(img.shape[:2])

        data = {
            'fy_lat': lat.flatten(),
            'fy_lon': lon.flatten(),
            'fy_cth': cth_data.flatten(),
            'fy_ctt': ctt_data.flatten(),
            'fy_ctp': ctp_data.flatten(),
            'fy_clt': clt_data.flatten(),
            'fy_olr': olr_data.flatten(),
            'fy_clm': clm_data.flatten(),
            'fy_time': start_time,
        }

        # 添加每个通道的数据
        for i in range(14):
            data[f'band{i + 1}'] = img[:, :, i].flatten()

        # 创建 DataFrame
        df = pd.DataFrame(data).dropna()

        land_rows = []
        ocean_rows = []
        for _, row in df.iterrows():
            if is_land_row(row):
                land_rows.append(row)
            else:
                ocean_rows.append(row)
        land_df = pd.DataFrame(land_rows)
        ocean_df = pd.DataFrame(ocean_rows)

        return land_df, ocean_df
    except Exception as e:
        print(f"处理FY4A数据时出错({start_time}-{end_time}): {e}")
        return None, None

def process_era5_data(hour):
    """处理指定小时的ERA5数据"""
    era5_path = get_era5_path(hour)
    if not os.path.exists(era5_path):
        print(f"ERA5文件不存在: {era5_path}")
        return None

    try:
        ds = nc.Dataset(era5_path, 'r')

        # 假设2米温度变量名为 't2m'，根据实际情况修改
        temp_2m = ds['t2m'][:]
        surface_pressure = ds.variables['sp'][:]
        surface_specific_humidity = ds.variables['q_surface'][:]
        lon = ds['lon']
        lat = ds['lat']

        # 创建一个DataFrame来存储数据
        data = {
            'longitude': lon[:].flatten(),
            'latitude': lat[:].flatten(),
            'temp_2m': temp_2m.flatten(),
            'surface_specific_humidity': surface_specific_humidity.flatten(),
            'surface_pressure': surface_pressure.flatten()
        }
        df = pd.DataFrame(data)

        # 过滤掉包含NaN值的行
        df.dropna(inplace=True)

        return df
    except Exception as e:
        print(f"处理ERA5数据时出错(hour={hour}): {e}")
        return None

# 数据整合函数
def integrate_data(sat_df, era5_df):
    """
    整合卫星数据与ERA5气象数据
    输入参数：
    sat_df : 卫星数据DataFrame (land_df 或 ocean_df)
    era5_df : ERA5气象数据DataFrame
    """
    try:
        # 合并数据（基于经纬度精确匹配）
        merged_df = pd.merge(
            sat_df,
            era5_df[['longitude', 'latitude', 'temp_2m', 'surface_pressure', 'surface_specific_humidity']],
            left_on=['fy_lon', 'fy_lat'],
            right_on=['longitude', 'latitude'],
            how='inner'
        )

        # 数据清洗
        merged_df = merged_df.dropna()
        merged_df = merged_df.loc[
            (merged_df['fy_lon'].between(-180, 180)) &
            (merged_df['fy_lat'].between(-90, 90))
        ]

        # 可以选择删除多余的列（如果需要）
        merged_df = merged_df.drop(columns=['longitude', 'latitude'])

        return merged_df
    except Exception as e:
        print(f"整合数据时出错: {e}")
        return None

# 加载指定云类型的标准化模型
def load_scalers(cloud_type):
    scaler_dir = FILE_PATHS['scalers'].get(cloud_type)
    if not scaler_dir or not os.path.exists(scaler_dir):
        return None

    scalers = {}
    file_suffix = f"_scaler{int(cloud_type)}.pkl"
    for file in os.listdir(scaler_dir):
        if file.endswith(file_suffix):
            column = file.replace(file_suffix, "")
            scaler_path = os.path.join(scaler_dir, file)
            scalers[column] = joblib.load(scaler_path)
    return scalers

# 执行标准化处理
def standardize_data(df):
    """执行标准化处理"""
    # 定义需要标准化的列
    columns_to_scale = ['fy_cth', 'fy_ctt', 'fy_ctp', 'fy_olr',
                        'temp_2m', 'surface_pressure', 'surface_specific_humidity']
    band_columns = [f'band{i}' for i in range(1, 15)]

    processed_dfs = []
    for cloud_type, group in df.groupby('fy_clt'):
        scalers = load_scalers(cloud_type)
        if not scalers:
            print(f"未找到云类型 {cloud_type} 对应的标准化拟合器，跳过该组数据。")
            continue

        # 执行标准化
        missing_columns = []
        for col in columns_to_scale + band_columns:
            if col not in group.columns:
                print(f"警告：数据框中不存在 {col} 列，跳过该列。")
                continue
            if col not in scalers:
                missing_columns.append(col)
                continue
            scaler = scalers[col]
            group[f'{col}_scaled'] = scaler.transform(group[[col]])
        if missing_columns:
            print(f"云类型 {cloud_type} 缺失以下列的标准化拟合器：{missing_columns}")

        # 添加周期性特征
        group['fy_lat_sin'] = np.sin(np.radians(group['fy_lat']))
        group['fy_lat_cos'] = np.cos(np.radians(group['fy_lat']))
        group['fy_lon_sin'] = np.sin(np.radians(group['fy_lon']))
        group['fy_lon_cos'] = np.cos(np.radians(group['fy_lon']))

        processed_dfs.append(group)

    return pd.concat(processed_dfs) if processed_dfs else None

# ==== 主流程改进 ====
# def process_single_time(start_time, end_time):
#     """处理单个时间点"""
#     hour = start_time.hour
#
#     # 处理FY4A数据
#     land_df, ocean_df = process_fy4a_data(start_time, end_time)
#     if land_df is None or ocean_df is None:
#         return False
#
#     # 处理ERA5数据
#     era5_df = process_era5_data(hour)
#     if era5_df is None:
#         return False
#
#     # 数据融合（以海洋数据为例）
#     merged_df = integrate_data(ocean_df, era5_df)
#     if merged_df is None:
#         return False
#
#     # 标准化处理
#     standardized_df = standardize_data(merged_df)
#     if standardized_df is None:
#         return False
#
#     # 保存结果
#     output_dir = os.path.join(OUTPUT_BASE, start_time.strftime("%Y%m%d"))
#     os.makedirs(output_dir, exist_ok=True)
#
#     timestamp = start_time.strftime("%Y%m%d%H")
#     output_path = os.path.join(output_dir, f"{timestamp}_standardized_ocean.nc")
#
#     # 转换为NetCDF保存
#     standardized_df.to_netcdf(output_path)
#     print(f"已保存: {output_path}")
#     return True
def process_single_time(start_time, end_time):
    """处理单个时间点"""
    hour = start_time.hour

    # 处理FY4A数据
    land_df, ocean_df = process_fy4a_data(start_time, end_time)
    if land_df is None or ocean_df is None:
        return False

    # 处理ERA5数据
    era5_df = process_era5_data(hour)
    if era5_df is None:
        return False

    # 数据融合（以海洋数据为例）
    merged_df = integrate_data(ocean_df, era5_df)
    if merged_df is None:
        return False

    # 标准化处理
    standardized_df = standardize_data(merged_df)
    if standardized_df is None:
        return False

    # 保存结果
    output_dir = os.path.join(OUTPUT_BASE, start_time.strftime("%Y%m%d"))
    os.makedirs(output_dir, exist_ok=True)

    timestamp = start_time.strftime("%Y%m%d%H")
    output_path = os.path.join(output_dir, f"{timestamp}_standardized_ocean.nc")

    try:
        # 转换为xarray Dataset
        ds = xr.Dataset.from_dataframe(standardized_df)

        # 添加坐标属性
        ds['fy_lat'].attrs = {'units': 'degrees_north'}
        ds['fy_lon'].attrs = {'units': 'degrees_east'}

        # 设置编码参数
        encoding = {var: {'zlib': True, 'complevel': 1} for var in ds.data_vars}

        ds.to_netcdf(output_path, encoding=encoding)
        print(f"已保存: {output_path}")
        return True
    except Exception as e:
        print(f"保存文件失败: {str(e)}")
        return False

def main():
    time_intervals = generate_hourly_intervals(BASE_DATE)

    # 使用进程池并行处理
    with multiprocessing.Pool(processes=8) as pool:  # 根据CPU核心数调整
        results = []
        for start, end in time_intervals:
            results.append(pool.apply_async(process_single_time, (start, end)))

        # 显示进度条
        success_count = 0
        with tqdm(total=len(results)) as pbar:
            for res in results:
                success = res.get()
                if success:
                    success_count += 1
                pbar.update(1)

        print(f"处理完成，成功处理{success_count}/{len(time_intervals)}个时间点")


if __name__ == "__main__":
    main()