import os
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
import matplotlib
matplotlib.use('TkAgg')  # 或者 'Qt5Agg'


# 配置路径
train_folder = 'E:\\code\\611\\轻量化\\飞行数据\\☆公开☆仿真-训练集\\训练集'
test_folder = 'E:\\code\\611\\轻量化\\飞行数据\\☆公开☆仿真-测试集\\测试集01'
processed_train_folder = 'E:\\code\\611\\轻量化\\飞行数据\\processed_train-new'
processed_test_folder = 'E:\\code\\611\\轻量化\\飞行数据\\processed_test-new'
os.makedirs(processed_train_folder, exist_ok=True)
os.makedirs(processed_test_folder, exist_ok=True)

# 读取 Excel 文件
def load_all_data(folder):
    all_data = []
    filenames = []
    for file in sorted(os.listdir(folder)):
        if file.endswith('.xlsx'):
            df = pd.read_excel(os.path.join(folder, file), header=0)
            df.columns = df.columns.str.strip()
            all_data.append(df)
            filenames.append(file)
    return all_data, filenames

# 读取数据
train_dfs, train_filenames = load_all_data(train_folder)
test_dfs, test_filenames = load_all_data(test_folder)

# 预处理数据
def preprocess_data(dfs):
    processed_dfs = []
    for df in dfs:
        if 'F0-' in df.columns:
            df = df[df['F0-'] == 0].drop(columns=['F0-'])
        l0_columns = [col for col in df.columns if col.startswith('L0-')]
        df = df.drop(columns=l0_columns)
        processed_dfs.append(df)
    return processed_dfs

train_dfs = preprocess_data(train_dfs)
test_dfs = preprocess_data(test_dfs)

# 计算相关性
corr_matrix = train_dfs[0].corr()
target_col = 'L2-ALF'

# 筛选相关性大于 0.5 的变量
selected_features = corr_matrix[target_col][abs(corr_matrix[target_col]) > 0.5].index.tolist()
selected_features.remove(target_col)
print(f"筛选出的相关变量: {selected_features}")

# 画热力图
plt.figure(figsize=(10, 8))
sns.heatmap(corr_matrix.loc[selected_features + [target_col], selected_features + [target_col]],
            annot=True, cmap='coolwarm', vmin=-1, vmax=1)
plt.title("Correlation Heatmap (L1-NX & Selected Features)")
plt.show()

# 仅保留相关变量 + 目标变量
train_dfs = [df[selected_features + [target_col]] for df in train_dfs]
test_dfs = [df[selected_features + [target_col]] for df in test_dfs]

# 线性插值填充连续小于 10 次的 0 值
def interpolate_small_gaps(df, max_gap=10):
    for col in df.columns:
        zero_groups = (df[col] == 0).astype(int).groupby((df[col] != 0).cumsum()).cumsum()
        df[col] = df[col].mask((df[col] == 0) & (zero_groups <= max_gap)).interpolate().fillna(method='bfill').fillna(method='ffill')
    return df

train_dfs = [interpolate_small_gaps(df) for df in train_dfs]
test_dfs = [interpolate_small_gaps(df) for df in test_dfs]

# 数据归一化
scaler_X = MinMaxScaler()
scaler_y = MinMaxScaler()

# 归一化数据
for i in range(len(train_dfs)):
    train_dfs[i][selected_features] = scaler_X.fit_transform(train_dfs[i][selected_features])
    train_dfs[i][target_col] = scaler_y.fit_transform(train_dfs[i][[target_col]])

for i in range(len(test_dfs)):
    test_dfs[i][selected_features] = scaler_X.transform(test_dfs[i][selected_features])
    test_dfs[i][target_col] = scaler_y.transform(test_dfs[i][[target_col]])

# 保存预处理后的数据到 NPY 文件
for i, (df, filename) in enumerate(zip(train_dfs, train_filenames)):
    save_path = os.path.join(processed_train_folder, f"train_{i+1}.npy")
    np.save(save_path, df.to_numpy())
    print(f"训练集已保存: {save_path}")

for i, (df, filename) in enumerate(zip(test_dfs, test_filenames)):
    save_path = os.path.join(processed_test_folder, f"test_{i+1}.npy")
    np.save(save_path, df.to_numpy())
    print(f"测试集已保存: {save_path}")

print("处理完成")
