# 最终版

import os
import xarray as xr
import h5py
import numpy as np
import pandas as pd
import netCDF4 as nc
from global_land_mask import globe
import multiprocessing
from datetime import datetime, timedelta
from sklearn.preprocessing import StandardScaler
import joblib
import torch
import torch.nn as nn
from tqdm import tqdm
import logging

# 设置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

# ==== 全局配置 ====
START_DATE = "20200101"
END_DATE = "20200430"
# START_DATE = "20190901"
# END_DATE = "20191231"
# START_DATE = "20190201"
# END_DATE = "20190831"
# 最终结果保存目录（逐小时保存，文件名中包含时间信息）
RESULT_BASE = "/mnt/datastore/liudddata/result/20200104new"
# RESULT_BASE = "/mnt/datastore/liudddata/result/20190212new"

CONFIG = {
    'model_dir': '/home/liudd/deeplearing/train/',
    'scaler_dir': '/home/liudd/deeplearing/train/',
    'cloud_types': [2, 3, 4, 5, 6, 7],
}

FILE_PATHS = {
    'fy4a': {
        'base_dirs': {
            'hdf': "/mnt/datastore/liudddata/fy_4Adata/FY202001_04/fy_l1_202001_04",
            'cth': "/mnt/datastore/liudddata/fy_4Adata/FY202001_04/fy_cth_202001_04",
            'ctt': "/mnt/datastore/liudddata/fy_4Adata/FY202001_04/fy_ctt_202001_04",
            'ctp': "/mnt/datastore/liudddata/fy_4Adata/FY202001_04/fy_ctp_202001_04",
            'clt': "/mnt/datastore/liudddata/fy_4Adata/FY202001_04/fy_clt_202001_04",
            'olr': "/mnt/datastore/liudddata/fy_4Adata/FY202001_04/fy_olr_202001_04",
            'clm': "/mnt/datastore/liudddata/fy_4Adata/FY202001_04/fy_clm_202001_04"
            # 'hdf': "/mnt/datastore/liudddata/fy_4Adata/FY201909_12/fy_L1_9_12",
            # 'cth': "/mnt/datastore/liudddata/fy_4Adata/FY201909_12/fy_cth_9_12",
            # 'ctt': "/mnt/datastore/liudddata/fy_4Adata/FY201909_12/fy_ctt_9_12",
            # 'ctp': "/mnt/datastore/liudddata/fy_4Adata/FY201909_12/fy_ctp_9_12",
            # 'clt': "/mnt/datastore/liudddata/fy_4Adata/FY201909_12/fy_clt_9_12",
            # 'olr': "/mnt/datastore/liudddata/fy_4Adata/FY201909_12/fy_olr_9_12",
            # 'clm': "/mnt/datastore/liudddata/fy_4Adata/FY201909_12/fy_clm_9_12"
            # 'hdf': "/mnt/datastore/liudddata/fy_4Adata/FY201902_08/fy_l1_2_8",
            # 'cth': "/mnt/datastore/liudddata/fy_4Adata/FY201902_08/fy_cth_2_8",
            # 'ctt': "/mnt/datastore/liudddata/fy_4Adata/FY201902_08/fy_ctt_2_8",
            # 'ctp': "/mnt/datastore/liudddata/fy_4Adata/FY201902_08/fy_ctp_2_8",
            # 'clt': "/mnt/datastore/liudddata/fy_4Adata/FY201902_08/fy_clt_2_8",
            # 'olr': "/mnt/datastore/liudddata/fy_4Adata/FY201902_08/fy_olr_2_8",
            # 'clm': "/mnt/datastore/liudddata/fy_4Adata/FY201902_08/fy_clm_2_8"
        },
        'var_templates': {
            'hdf': "FY4A-_AGRI--_N_DISK_1047E_L1-_FDI-_MULT_NOM_{start}_{end}_4000M_V0001.HDF",
            'cth': "FY4A-_AGRI--_N_DISK_1047E_L2-_CTH-_MULT_NOM_{start}_{end}_4000M_V0001.NC",
            'ctt': "FY4A-_AGRI--_N_DISK_1047E_L2-_CTT-_MULT_NOM_{start}_{end}_4000M_V0001.NC",
            'ctp': "FY4A-_AGRI--_N_DISK_1047E_L2-_CTP-_MULT_NOM_{start}_{end}_4000M_V0001.NC",
            'clt': "FY4A-_AGRI--_N_DISK_1047E_L2-_CLT-_MULT_NOM_{start}_{end}_4000M_V0001.NC",
            'olr': "FY4A-_AGRI--_N_DISK_1047E_L2-_OLR-_MULT_NOM_{start}_{end}_4000M_V0001.NC",
            'clm': "FY4A-_AGRI--_N_DISK_1047E_L2-_CLM-_MULT_NOM_{start}_{end}_4000M_V0001.NC"
        }
    },
    'era5': {
        'base_dir': "/mnt/datastore/liudddata/ERA5_output/202001_04",
        # 'base_dir': "/mnt/datastore/liudddata/ERA5_output/20190912",
        # 'base_dir': "/mnt/datastore/liudddata/ERA5_output/201902_08",
        'template': "ERA5_{date}{hour:02d}_surface_vars_FY4A_grid.nc"
    },
    'coord': '/home/liudd/data_preprocessing/FY4A_coordinates.nc',
    'scalers': {
        2: "/home/liudd/deeplearing/train/scaler2",
        3: "/home/liudd/deeplearing/train/scaler3/",
        4: "/home/liudd/deeplearing/train/scaler4/",
        5: "/home/liudd/deeplearing/train/scaler5/",
        6: "/home/liudd/deeplearing/train/scaler6/",
        7: "/home/liudd/deeplearing/train/scaler7/"
    }
}

# ==== 时间处理函数 ====
def generate_hourly_intervals(date_str):
    """生成指定日期的24个整点到整点后14分59秒的时间间隔"""
    base_date = datetime.strptime(date_str, "%Y%m%d")
    return [
        (
            base_date + timedelta(hours=hour),
            base_date + timedelta(hours=hour, minutes=14, seconds=59)
        ) for hour in range(24)
    ]

def date_range(start_date_str, end_date_str):
    """生成从start_date到end_date的日期列表，格式为'YYYYMMDD'"""
    start_date = datetime.strptime(start_date_str, "%Y%m%d")
    end_date = datetime.strptime(end_date_str, "%Y%m%d")
    current = start_date
    dates = []
    while current <= end_date:
        dates.append(current.strftime("%Y%m%d"))
        current += timedelta(days=1)
    return dates

def get_fy4a_paths(start_time, end_time):
    """生成FY4A各变量文件路径"""
    time_params = {
        "start": start_time.strftime("%Y%m%d%H%M%S"),
        "end": end_time.strftime("%Y%m%d%H%M%S")
    }
    paths = {}
    for var, template in FILE_PATHS['fy4a']['var_templates'].items():
        base_dir = FILE_PATHS['fy4a']['base_dirs'][var]
        full_path = os.path.join(base_dir, template.format(**time_params))
        paths[var] = full_path
    return paths

def get_era5_path(hour, base_date):
    """生成ERA5文件路径"""
    return os.path.join(
        FILE_PATHS['era5']['base_dir'],
        FILE_PATHS['era5']['template'].format(date=base_date, hour=hour)
    )

# ==== 数据处理函数 ====
def process_fy4a_data(start_time, end_time):
    """
    处理指定时间段的FY4A数据，仅保留海洋数据
    利用向量化方法区分陆地与海洋，减少逐行遍历开销
    """
    try:
        paths = get_fy4a_paths(start_time, end_time)
        required_files = ['hdf', 'cth', 'ctt', 'ctp', 'clt', 'olr', 'clm']
        for f in required_files:
            # if not os.path.exists(paths[f]):
            #     logging.error(f"文件不存在: {paths[f]}")
            #     return None
            if not os.path.exists(paths[f]):
                logging.error(f"文件不存在: {paths[f]}")
                return None, None, None

        # 读取坐标数据并生成行列索引
        coord_ds = nc.Dataset(FILE_PATHS['coord'], 'r')
        lat_2d = coord_ds.variables['lat'][:].T  # (2748, 2748)
        lon_2d = coord_ds.variables['lon'][:].T
        rows, cols = np.indices(lat_2d.shape)  # 生成行列索引矩阵
        # 加载各项数据
        cth = nc.Dataset(paths['cth'], 'r')
        cth_data = cth.variables['CTH'][:]
        ctt = nc.Dataset(paths['ctt'], 'r')
        ctt_data = ctt.variables['CTT'][:]
        ctp = nc.Dataset(paths['ctp'], 'r')
        ctp_data = ctp.variables['CTP'][:]
        clt = nc.Dataset(paths['clt'], 'r')
        clt_data = clt.variables['CLT'][:]
        olr = nc.Dataset(paths['olr'], 'r')
        olr_data = olr.variables['OLR'][:]
        clm = nc.Dataset(paths['clm'], 'r')
        clm_data = clm.variables['CLM'][:]

        # 读取坐标数据
        coord_ds = nc.Dataset(FILE_PATHS['coord'], 'r')
        lat = coord_ds.variables['lat'][:, :].T
        lon = coord_ds.variables['lon'][:, :].T
        coord_ds.close()

        # 读取 HDF 数据（14个波段）
        NOMNames = [f'NOMChannel{str(i).zfill(2)}' for i in range(1, 15)]
        CALNames = [f'CALChannel{str(i).zfill(2)}' for i in range(1, 15)]
        img = np.zeros((2748, 2748, 14), dtype=np.float32)
        with h5py.File(paths['hdf'], 'r') as h5file:
            for i in range(14):
                NOMData = h5file[NOMNames[i]][:]
                CalData = h5file[CALNames[i]][:]
                # 根据不同波段设置有效值判断
                valid_mask = (NOMData >= 0) & (NOMData < 4096)
                if i == 6:
                    valid_mask = (NOMData >= 0) & (NOMData < 65536)
                TOARefData = np.zeros_like(NOMData, dtype=np.float32)
                indices = np.where(valid_mask)
                index_values = NOMData[indices].astype(int)
                valid_indices = index_values[(0 <= index_values) & (index_values < len(CalData))]
                TOARefData[indices] = CalData[valid_indices]
                img[:, :, i] = TOARefData

        # 整理数据，构造DataFrame
        data = {
            'row': rows.flatten().astype(int),  # 新增行列索引
            'col': cols.flatten().astype(int),
            'fy_lat': lat_2d.flatten(),
            'fy_lon': lon_2d.flatten(),
            'fy_cth': cth_data.flatten(),
            'fy_ctt': ctt_data.flatten(),
            'fy_ctp': ctp_data.flatten(),
            'fy_clt': clt_data.flatten(),  # 保留原始云类型，用于后续按云类型反演
            'fy_olr': olr_data.flatten(),
            'fy_clm': clm_data.flatten(),
            'fy_time': start_time,
        }
        for i in range(14):
            data[f'band{i+1}'] = img[:, :, i].flatten()
        df = pd.DataFrame(data).dropna()

        # 向量化海陆划分
        lon_adj = np.where(df['fy_lon'] > 180, df['fy_lon'] - 360, df['fy_lon'])
        # is_land = globe.is_land(df['fy_lat'].values, lon_adj.values)
        is_land = globe.is_land(df['fy_lat'].values, lon_adj)

        ocean_df = df[~is_land].copy()

        return ocean_df, lat_2d, lon_2d  # 返回海洋数据和原始网格
    except Exception as e:
        logging.error(f"处理FY4A数据失败: {e}")
        return None, None, None



# ==== 新增2D网格重建函数 ====
def reconstruct_2d_grid(predicted_df, lat_2d, lon_2d):
    """
    根据预测结果的row/col索引重建2D网格
    """
    # 初始化全NaN数组
    predicted_2d = np.full(lat_2d.shape, np.nan, dtype=np.float32)
    # 添加clt、clm
    clt_2d = np.full(lat_2d.shape, np.nan, dtype=np.float32)
    clm_2d = np.full(lat_2d.shape, np.nan, dtype=np.float32)
    cth_2d = np.full(lat_2d.shape, np.nan, dtype=np.float32)

    # 填充有效预测值
    if not predicted_df.empty:
        rows = predicted_df['row'].values.astype(int)
        cols = predicted_df['col'].values.astype(int)
        predicted_2d[rows, cols] = predicted_df['Predicted'].values
        # 添加
        clt_2d[rows, cols] = predicted_df['fy_clt'].values
        clm_2d[rows, cols] = predicted_df['fy_clm'].values
        cth_2d[rows, cols] = predicted_df['fy_cth'].values

    return xr.Dataset(
        {
            "predicted": (("y", "x"), predicted_2d),
            "clt": (("y", "x"), clt_2d),
            "clm": (("y", "x"), clm_2d),
            "cth": (("y", "x"), cth_2d),
            "lat": (("y", "x"), lat_2d),
            "lon": (("y", "x"), lon_2d)
        },
        coords={"y": np.arange(lat_2d.shape[0]),
                "x": np.arange(lat_2d.shape[1])}
    )
def process_era5_data(hour, base_date):
    """处理指定小时的ERA5数据"""
    era5_path = get_era5_path(hour, base_date)
    if not os.path.exists(era5_path):
        logging.error(f"ERA5文件不存在: {era5_path}")
        return None
    try:
        ds = nc.Dataset(era5_path, 'r')
        temp_2m = ds['temp_2m'][:]
        surface_pressure = ds.variables['surface_pressure'][:]
        surface_specific_humidity = ds.variables['surface_specific_humidity'][:]
        lon = ds['lon'][:]
        lat = ds['lat'][:]
        ds.close()
        data = {
            'longitude': lon.flatten(),
            'latitude': lat.flatten(),
            'temp_2m': temp_2m.flatten(),
            'surface_pressure': surface_pressure.flatten(),
            'surface_specific_humidity': surface_specific_humidity.flatten()
        }
        df = pd.DataFrame(data)
        df.dropna(inplace=True)
        return df
    except Exception as e:
        logging.error(f"处理ERA5数据时出错(hour={hour}): {e}")
        return None

def integrate_data(sat_df, era5_df):
    """
    整合卫星数据与ERA5气象数据（基于经纬度匹配）
    """
    try:
        merged_df = pd.merge(
            sat_df,
            era5_df[['longitude', 'latitude', 'temp_2m', 'surface_pressure', 'surface_specific_humidity']],
            left_on=['fy_lon', 'fy_lat'],
            right_on=['longitude', 'latitude'],
            how='inner'
        )
        merged_df.dropna(inplace=True)
        merged_df = merged_df.loc[
            (merged_df['fy_lon'].between(-180, 180)) &
            (merged_df['fy_lat'].between(-90, 90))
        ]
        merged_df.drop(columns=['longitude', 'latitude'], inplace=True)
        return merged_df
    except Exception as e:
        logging.error(f"整合数据时出错: {e}")
        return None
#

def load_scalers(cloud_type):
    """加载指定云类型的标准化拟合器"""
    scaler_dir = FILE_PATHS['scalers'].get(cloud_type)
    if not scaler_dir or not os.path.exists(scaler_dir):
        return None
    scalers = {}
    file_suffix = f"_scaler{int(cloud_type)}.pkl"
    for file in os.listdir(scaler_dir):
        if file.endswith(file_suffix):
            column = file.replace(file_suffix, "")
            scaler_path = os.path.join(scaler_dir, file)
            scalers[column] = joblib.load(scaler_path)
    return scalers

def standardize_data(df):
    """
    对合并后的数据执行标准化处理，仅生成标准化后的特征（保留原始云类型用于分组反演），
    同时添加周期性特征
    """
    columns_to_scale = ['fy_cth', 'fy_ctt', 'fy_ctp', 'fy_olr',
                        'temp_2m', 'surface_pressure', 'surface_specific_humidity']
    band_columns = [f'band{i}' for i in range(1, 15)]
    processed_dfs = []
    # 按云类型分组（基于原始的fy_clt）
    for cloud_type, group in df.groupby('fy_clt'):
        scalers = load_scalers(cloud_type)
        if not scalers:
            logging.warning(f"未找到云类型 {cloud_type} 对应的标准化拟合器，跳过该组数据。")
            continue
        missing_columns = []
        for col in columns_to_scale + band_columns:
            if col not in group.columns:
                logging.warning(f"数据框中不存在 {col} 列，跳过该列。")
                continue
            if col not in scalers:
                missing_columns.append(col)
                continue
            scaler = scalers[col]
            group[f'{col}_scaled'] = scaler.transform(group[[col]])
        if missing_columns:
            logging.warning(f"云类型 {cloud_type} 缺失以下列的标准化拟合器：{missing_columns}")
        # 添加周期性特征（经纬度转换）
        group['fy_lat_sin'] = np.sin(np.radians(group['fy_lat']))
        group['fy_lat_cos'] = np.cos(np.radians(group['fy_lat']))
        group['fy_lon_sin'] = np.sin(np.radians(group['fy_lon']))
        group['fy_lon_cos'] = np.cos(np.radians(group['fy_lon']))
        processed_dfs.append(group)
    return pd.concat(processed_dfs) if processed_dfs else None

# ==== 模型定义 ====
class CNN_DNN(nn.Module):
    def __init__(self, input_size):
        super().__init__()
        self.conv1 = nn.Conv1d(1, 32, kernel_size=3, padding=1)
        self.bn1 = nn.BatchNorm1d(32)
        self.conv2 = nn.Conv1d(32, 64, kernel_size=3, padding=1)
        self.bn2 = nn.BatchNorm1d(64)
        self.conv3 = nn.Conv1d(64, 128, kernel_size=3, padding=1)
        self.bn3 = nn.BatchNorm1d(128)
        self.fc1 = nn.Linear(128 * input_size, 256)
        self.fc2 = nn.Linear(256, 128)
        self.fc3 = nn.Linear(128, 1)
        self.relu = nn.LeakyReLU(0.01)
        self.dropout = nn.Dropout(p=0.4)

    def forward(self, x):
        x = self.relu(self.bn1(self.conv1(x)))
        x = self.relu(self.bn2(self.conv2(x)))
        x = self.relu(self.bn3(self.conv3(x)))
        x = x.view(x.size(0), -1)
        x = self.relu(self.fc1(x))
        x = self.dropout(x)
        x = self.relu(self.fc2(x))
        x = self.fc3(x)
        return x

def predict_for_cloud_type(cloud_type, df, device):
    """对单个云类型数据进行预测反演，使用标准化后的特征作为输入"""
    try:
        test_df = df[df['fy_clt'] == cloud_type].copy()
        if test_df.empty:
            return None
        features = [
            'fy_cth_scaled', 'fy_ctt_scaled', 'fy_ctp_scaled', 'fy_olr_scaled',
            'fy_lat_sin', 'fy_lat_cos', 'fy_lon_sin', 'fy_lon_cos',
            'band1_scaled', 'band2_scaled', 'band3_scaled', 'band4_scaled',
            'band5_scaled', 'band6_scaled', 'band7_scaled', 'band8_scaled',
            'band9_scaled', 'band10_scaled', 'band11_scaled', 'band12_scaled',
            'band13_scaled', 'band14_scaled',
            'temp_2m_scaled', 'surface_pressure_scaled', 'surface_specific_humidity_scaled'
        ]
        missing_features = [f for f in features if f not in test_df.columns]
        if missing_features:
            logging.warning(f"云类型 {cloud_type} 缺失特征: {missing_features}")
            return None
        X_test = test_df[features].values
        X_test_tensor = torch.tensor(X_test, dtype=torch.float32)
        model_path = os.path.join(CONFIG['model_dir'], f'trained_model_ocean{cloud_type}.1.pth')
        if not os.path.exists(model_path):
            logging.error(f"模型不存在: {model_path}")
            return None
        model = CNN_DNN(X_test.shape[1]).to(device)
        model.load_state_dict(torch.load(model_path, map_location=device))
        model.eval()
        batch_size = 1024
        y_pred = []
        with torch.no_grad():
            for i in range(0, len(X_test_tensor), batch_size):
                batch = X_test_tensor[i:i+batch_size].unsqueeze(1).to(device)
                pred = model(batch).cpu().numpy().flatten()
                y_pred.append(pred)
        y_pred = np.concatenate(y_pred)
        scaler_y_path = os.path.join(CONFIG['scaler_dir'], f'scaler{cloud_type}_y.pkl')
        if not os.path.exists(scaler_y_path):
            logging.error(f"标准化器不存在: {scaler_y_path}")
            return None
        scaler_y = joblib.load(scaler_y_path)
        test_df['Predicted'] = scaler_y.inverse_transform(y_pred.reshape(-1, 1))
        return test_df
    except Exception as e:
        logging.error(f"云类型 {cloud_type} 预测失败: {str(e)}")
        return None

# ==== 单时段数据处理：融合、标准化、预测反演并保存结果 ====
def process_single_time(start_time, end_time):
    # 处理FY4A数据（仅保留海洋数据）
    ocean_df,lat_2d,lon_2d = process_fy4a_data(start_time, end_time)
    if ocean_df is None or ocean_df.empty:
        return False

    # 处理ERA5数据
    era5_df = process_era5_data(start_time.hour, start_time.strftime("%Y%m%d"))
    if era5_df is None:
        return False

    # 合并卫星数据与ERA5气象数据
    merged_df = integrate_data(ocean_df, era5_df)
    if merged_df is None or merged_df.empty:
        return False
    # ===== 新增：添加clm=0约束 =====
    merged_df = merged_df[merged_df['fy_clm'] == 0]  # 只保留clm=0的数据
    if merged_df.empty:
        logging.warning("无满足clm=0的数据，跳过该时段")
        return False

    # 对合并后的数据进行标准化处理（仅保留标准化后的特征用于输入）
    standardized_df = standardize_data(merged_df)
    if standardized_df is None or standardized_df.empty:
        return False

    # 对各云类型进行反演预测
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    predicted_dfs = []
    for cloud_type in CONFIG['cloud_types']:
        pred_df = predict_for_cloud_type(cloud_type, standardized_df, device)
        if pred_df is not None and not pred_df.empty:
            predicted_dfs.append(pred_df)
    if not predicted_dfs:
        logging.warning("没有预测结果")
        return False

    final_df = pd.concat(predicted_dfs, ignore_index=True)
    # 仅输出经纬度和预测结果（云类型信息可用于后续分析）
    # final_df = final_df[['row','col','fy_lat', 'fy_lon', 'Predicted']]
    final_df = final_df[['row', 'col', 'fy_lat', 'fy_lon', 'Predicted', 'fy_clt', 'fy_clm','fy_cth']]

    # 重建2D网格并保存
    output_ds =reconstruct_2d_grid(final_df,lat_2d,lon_2d)

    # 设置元数据
    output_ds['predicted'].attrs ={
        'long_name':'Predicted_Value',
        'units':'m',
        'missing_value':'np.nan',
    }
    output_ds['clt'].attrs = {
        'long_name':'fy_clt',
        'units':'1',
        'missing_value':'np.nan',
    }
    output_ds['clm'].attrs = {
        'long_name':'fy_clm',
        'units':'1',
        'valid_range': [0, 3],
        'missing_value':'np.nan',
    }
    output_ds['cth'].attrs = {
        'long_name':'fy_cth',
        'units':'m',
        'missing_value':'np.nan',
    }
    output_ds['lat'].attrs ={'units':'degrees_north'}
    output_ds['lon'].attrs ={'units':'degrees_east'}

    # 保存文件
    timestamp = start_time.strftime("%Y%m%d%H")
    output_path = os.path.join(RESULT_BASE, f"{timestamp}_predicted_2d.nc")
    output_ds.to_netcdf(
        output_path,
        encoding={'predicted': {'zlib': True, 'complevel': 1},
                  'clt': {'zlib': True, 'complevel': 1},
                  'clm': {'zlib': True, 'complevel': 1},}
    )
    logging.info(f"2D结果已保存: {output_path}")
    return True


# ==== 主函数 ====
def main():
    all_dates = date_range(START_DATE, END_DATE)
    total_intervals = sum(len(generate_hourly_intervals(date_str)) for date_str in all_dates)
    logging.info(f"共需处理 {len(all_dates)} 天，共 {total_intervals} 个时段。")
    pool = multiprocessing.Pool(processes=8)  # 根据CPU核数调整
    results = []
    for date_str in all_dates:
        intervals = generate_hourly_intervals(date_str)
        for start, end in intervals:
            results.append(pool.apply_async(process_single_time, (start, end)))
    pool.close()
    success_count = 0
    with tqdm(total=len(results)) as pbar:
        for res in results:
            success = res.get()
            if success:
                success_count += 1
            pbar.update(1)
    pool.join()
    logging.info(f"处理完成，成功处理 {success_count}/{len(results)} 个时段。")

if __name__ == "__main__":
    main()
