import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import joblib
import os

# 读取数据文件
data_path = '/home/liudd/03_land_inversion/01data/land_merged_201902_202004_2025.csv'
df = pd.read_csv(data_path, low_memory=False)

df = df[df['fy_cth'] - df['cloudsat_cbh'] > 0]
filtered_data_size = len(df)
print(f'筛选前的数据总量为：{filtered_data_size}')

# 筛选过冷云
df = df[df['fy_clt'] == 6]
filtered_data_size = len(df)
print(f'水云的数据量为：{filtered_data_size}')

# 筛选单层云
df = df[df['cloudsat_tpye'] == 0]
filtered_data_size = len(df)
print(f'单层水云的数据总量为：{filtered_data_size}')
# 替换 '--' 为 NaN
df.replace('--', np.nan, inplace=True)

# 将 fy_olr 转换为数值类型，并将无法转换的值设置为 NaN
df['fy_olr'] = pd.to_numeric(df['fy_olr'], errors='coerce')

# 处理 NaN 值，如果适用，删除或填充
df['fy_olr'] = df['fy_olr'].fillna(df['fy_olr'].median())  # 用中位数填充
# 划分训练集、验证集和测试集
train_df, test_df = train_test_split(df, test_size=0.15, random_state=42)
train_df, val_df = train_test_split(train_df, test_size=0.2, random_state=42)

# 定义全局Scaler字典
scalers = {}
scaler_dir = "/home/liudd/03_land_inversion/02scaler/scaler5"

def fit_scalers(train_data, columns):
    """拟合训练集的标准化器。"""
    for column in columns:
        scaler = StandardScaler()
        train_data[f'{column}_scaled'] = scaler.fit_transform(train_data[[column]])
        scalers[column] = scaler  # 保存每列对应的Scaler

def transform_data(data, columns):
    """使用训练集拟合的Scaler标准化数据。"""
    for column in columns:
        scaler = scalers[column]
        data[f'{column}_scaled'] = scaler.transform(data[[column]])



def map_lat_lon(data):
    """将经纬度特征映射为周期性特征。"""
    data['fy_lat_sin'] = np.sin(np.radians(data['fy_lat']))
    data['fy_lat_cos'] = np.cos(np.radians(data['fy_lat']))
    data['fy_lon_sin'] = np.sin(np.radians(data['fy_lon']))
    data['fy_lon_cos'] = np.cos(np.radians(data['fy_lon']))

# 定义列
columns_to_scale = ['fy_cth', 'fy_ctt', 'fy_ctp', 'fy_olr','temp_2m','surface_pressure','surface_specific_humidity']
band_columns_direct = [f'band{i}' for i in range(1, 15)]  # Band1-Band6直接标准化
# band_columns_brightness = [f'band{i}' for i in range(7, 15)]  # Band7-Band14计算亮温

# 对训练集拟合Scaler，并进行亮温计算、灰度值计算、经纬度映射
fit_scalers(train_df, columns_to_scale + band_columns_direct)
# compute_brightness_temperature(train_df, band_columns_brightness)
# compute_gray_values(train_df, band_columns_direct + band_columns_brightness)
map_lat_lon(train_df)

# 处理验证集和测试集
transform_data(val_df, columns_to_scale + band_columns_direct)
# compute_brightness_temperature(val_df, band_columns_brightness)
# compute_gray_values(val_df, band_columns_direct + band_columns_brightness)
map_lat_lon(val_df)

transform_data(test_df, columns_to_scale + band_columns_direct)
# compute_brightness_temperature(test_df, band_columns_brightness)
# compute_gray_values(test_df, band_columns_direct + band_columns_brightness)
map_lat_lon(test_df)

# 保存为 CSV 文件
train_df.to_csv('/home/liudd/03_land_inversion/01data/standard_train_ocean6.csv', index=False)
val_df.to_csv('/home/liudd/03_land_inversion/01data/standaerd_validation_ocean6.csv', index=False)
test_df.to_csv('/home/liudd/03_land_inversion/01data/standaerd_test_ocean6.csv', index=False)


# 确保保存路径存在
os.makedirs(scaler_dir, exist_ok=True)

# 保存每个 scaler
for column, scaler in scalers.items():
    scaler_path = os.path.join(scaler_dir, f"{column}_scaler6.pkl")
    joblib.dump(scaler, scaler_path)
    print(f"Scaler for {column} saved to {scaler_path}")

print("数据处理完成并保存至文件。")

