import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
import joblib
import os

# 定义不同云类型对应的标准化拟合器目录
scaler_dirs = {
    2: "/home/liudd/deeplearing/backup/scaler2/",
    3: "/home/liudd/deeplearing/backup/scaler3/",
    4: "/home/liudd/deeplearing/backup/scaler4/",
    5: "/home/liudd/deeplearing/backup/scaler5/",
    6: "/home/liudd/deeplearing/backup/scaler6/"
}

# 读取新数据
new_data_path = '2020010104ocean_data.csv'
new_df = pd.read_csv(new_data_path, low_memory=False)

# 数据预处理
new_df.replace('--', np.nan, inplace=True)

# 定义列
columns_to_scale = ['fy_cth', 'fy_ctt', 'fy_ctp', 'fy_olr', 'temp_2m', 'surface_pressure', 'surface_specific_humidity']
band_columns_direct = [f'band{i}' for i in range(1, 7)]  # Band1 - Band6直接标准化

def transform_data(data, columns, scalers):
    """使用训练集拟合的Scaler标准化数据。"""
    missing_columns = []
    for column in columns:
        if column not in data.columns:
            print(f"警告：数据框中不存在 {column} 列，跳过该列。")
            continue
        if column not in scalers:
            missing_columns.append(column)
            continue
        scaler = scalers[column]
        data[f'{column}_scaled'] = scaler.transform(data[[column]])
    if missing_columns:
        print(f"云类型 {data['fy_clt'].iloc[0]} 缺失以下列的标准化拟合器：{missing_columns}")

def map_lat_lon(data):
    """将经纬度特征映射为周期性特征。"""
    data['fy_lat_sin'] = np.sin(np.radians(data['fy_lat']))
    data['fy_lat_cos'] = np.cos(np.radians(data['fy_lat']))
    data['fy_lon_sin'] = np.sin(np.radians(data['fy_lon']))
    data['fy_lon_cos'] = np.cos(np.radians(data['fy_lon']))

# 按云类型分组处理数据
grouped = new_df.groupby('fy_clt')
result_dfs = []

for cloud_type, group in grouped:
    if cloud_type in scaler_dirs:
        scaler_dir = scaler_dirs[cloud_type]
        # 检查目录是否存在
        if not os.path.exists(scaler_dir):
            print(f"云类型 {cloud_type} 对应的标准化拟合器目录 {scaler_dir} 不存在，跳过该组数据。")
            continue
        # 加载对应云类型的标准化拟合器
        scalers = {}
        file_suffix = f"_scaler{int(cloud_type)}.pkl"
        for file in os.listdir(scaler_dir):
            if file.endswith(file_suffix):
                column = file.replace(file_suffix, "")
                scaler_path = os.path.join(scaler_dir, file)
                scalers[column] = joblib.load(scaler_path)
        print(f"云类型 {cloud_type} 加载的标准化拟合器键：{list(scalers.keys())}")

        # 应用标准化拟合器
        transform_data(group, columns_to_scale + band_columns_direct, scalers)
        map_lat_lon(group)
        result_dfs.append(group)
    else:
        print(f"未找到云类型 {cloud_type} 对应的标准化拟合器目录，跳过该组数据。")

# 合并处理后的数据
if result_dfs:
    final_df = pd.concat(result_dfs, ignore_index=True)
    # 保存处理后的新数据
    final_df.to_csv('standard_new_ocean_data.csv', index=False)
    print("新数据处理完成并保存至文件。")
else:
    print("没有有效数据进行处理。")