import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.model_selection import KFold
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.svm import SVR
from scipy.signal import savgol_filter
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout, Bidirectional
from tensorflow.keras.callbacks import EarlyStopping, LearningRateScheduler
from sklearn.ensemble import IsolationForest
from sklearn.decomposition import PCA

# 1. 数据读取与初步检查
data = pd.read_csv('D:/Users/YIKAI/Desktop/PCA_Filtered_Data/WSA/sorted_data.csv')
data['date'] = pd.to_datetime(data['date'])
data.set_index('date', inplace=True)
data_8d = data.resample('8D').max()

# 处理缺失值，使用三次样条插值
data_8d = data_8d.interpolate(method='spline', order=3)

# 2. 去除异常值（使用孤立森林）
iso_forest = IsolationForest(contamination=0.1, random_state=42)
outliers = iso_forest.fit_predict(data_8d[['GPP_U95_f']])
data_8d = data_8d[outliers == 1]

# 3. 添加周期性特征
data_8d['month'] = data_8d.index.month
data_8d['day_of_year'] = data_8d.index.dayofyear
data_8d['week'] = data_8d.index.isocalendar().week
data_8d['season'] = data_8d.index.month % 12 // 3 + 1

# 移除非数值列 site_id
data_8d = data_8d.drop(columns=['site_id'])

# 4. 处理PC1和PC2列
encoder = OneHotEncoder(sparse_output=False)
pc_encoded = encoder.fit_transform(data_8d[['PC1', 'PC2']])
pc_encoded_df = pd.DataFrame(pc_encoded, index=data_8d.index, columns=encoder.get_feature_names_out(['PC1', 'PC2']))
data_8d = pd.concat([data_8d.drop(columns=['PC1', 'PC2']), pc_encoded_df], axis=1)

# 5. 标准化数据
scaler = StandardScaler()
numeric_columns = ['GPP_U95_f', 'month', 'day_of_year', 'week', 'season'] + list(pc_encoded_df.columns)
data_8d[numeric_columns] = scaler.fit_transform(data_8d[numeric_columns])
data_8d = data_8d.dropna()

# 6. 平滑处理
data_8d['GPP_U95_f'] = savgol_filter(data_8d['GPP_U95_f'], window_length=7, polyorder=3)

# 获取目标值列的索引
gpp_index = data_8d.columns.get_loc('GPP_U95_f')

# 7. 创建序列数据
def create_sequences(data, seq_length, target_index):
    sequences = []
    targets = []
    for i in range(len(data) - seq_length):
        seq = data[i:i+seq_length]
        target = data[i+seq_length, target_index]
        sequences.append(seq)
        targets.append(target)
    return np.array(sequences, dtype=np.float32), np.array(targets, dtype=np.float32)

seq_length = 18  # 调整序列长度以适应数据大小
sequences, targets = create_sequences(data_8d.values, seq_length, gpp_index)

# 8. 10折交叉验证
kf = KFold(n_splits=10, shuffle=True, random_state=42)

# 存储结果
results = {
    'LSTM': {'train_r2': [], 'test_r2': [], 'train_rmse': [], 'test_rmse': [], 'y_test': [], 'y_test_pred': []},
    'BiLSTM': {'train_r2': [], 'test_r2': [], 'train_rmse': [], 'test_rmse': [], 'y_test': [], 'y_test_pred': []},
    'SVR': {'train_r2': [], 'test_r2': [], 'train_rmse': [], 'test_rmse': [], 'y_test': [], 'y_test_pred': []}
}

# 创建模型函数
def create_lstm_model(input_shape):
    model = Sequential()
    model.add(LSTM(200, return_sequences=True, input_shape=input_shape))
    model.add(Dropout(0.3))
    model.add(LSTM(100))
    model.add(Dropout(0.3))
    model.add(Dense(1))
    model.compile(optimizer='adam', loss='mse')
    return model

def create_bilstm_model(input_shape):
    model = Sequential()
    model.add(Bidirectional(LSTM(200, return_sequences=True, input_shape=input_shape)))
    model.add(Dropout(0.3))
    model.add(Bidirectional(LSTM(100)))
    model.add(Dropout(0.3))
    model.add(Dense(1))
    model.compile(optimizer='adam', loss='mse')
    return model

def scheduler(epoch, lr):
    if epoch < 100:
        return lr
    else:
        return lr * 0.9

# 训练和评估模型
for train_index, test_index in kf.split(sequences):
    X_train, X_test = sequences[train_index], sequences[test_index]
    y_train, y_test = targets[train_index], targets[test_index]

    # 确保所有数据都是浮点数类型
    X_train = np.array(X_train, dtype=np.float32)
    X_test = np.array(X_test, dtype=np.float32)
    y_train = np.array(y_train, dtype=np.float32)
    y_test = np.array(y_test, dtype=np.float32)

    # LSTM 模型
    lstm_model = create_lstm_model((seq_length, X_train.shape[2]))
    lstm_model.fit(X_train, y_train, epochs=100, batch_size=64, validation_data=(X_test, y_test), callbacks=[EarlyStopping(monitor='val_loss', patience=20, restore_best_weights=True), LearningRateScheduler(scheduler)], verbose=0)
    y_train_pred_lstm = lstm_model.predict(X_train)
    y_test_pred_lstm = lstm_model.predict(X_test)
    results['LSTM']['train_r2'].append(r2_score(y_train, y_train_pred_lstm))
    results['LSTM']['test_r2'].append(r2_score(y_test, y_test_pred_lstm))
    results['LSTM']['train_rmse'].append(np.sqrt(mean_squared_error(y_train, y_train_pred_lstm)))
    results['LSTM']['test_rmse'].append(np.sqrt(mean_squared_error(y_test, y_test_pred_lstm)))
    results['LSTM']['y_test'].extend(y_test)
    results['LSTM']['y_test_pred'].extend(y_test_pred_lstm)

    # BiLSTM 模型
    bilstm_model = create_bilstm_model((seq_length, X_train.shape[2]))
    bilstm_model.fit(X_train, y_train, epochs=100, batch_size=64, validation_data=(X_test, y_test), callbacks=[EarlyStopping(monitor='val_loss', patience=20, restore_best_weights=True), LearningRateScheduler(scheduler)], verbose=0)
    y_train_pred_bilstm = bilstm_model.predict(X_train)
    y_test_pred_bilstm = bilstm_model.predict(X_test)
    results['BiLSTM']['train_r2'].append(r2_score(y_train, y_train_pred_bilstm))
    results['BiLSTM']['test_r2'].append(r2_score(y_test, y_test_pred_bilstm))
    results['BiLSTM']['train_rmse'].append(np.sqrt(mean_squared_error(y_train, y_train_pred_bilstm)))
    results['BiLSTM']['test_rmse'].append(np.sqrt(mean_squared_error(y_test, y_test_pred_bilstm)))
    results['BiLSTM']['y_test'].extend(y_test)
    results['BiLSTM']['y_test_pred'].extend(y_test_pred_bilstm)

    # SVR 模型
    X_train_flat = X_train.reshape(X_train.shape[0], -1)
    X_test_flat = X_test.reshape(X_test.shape[0], -1)

    # 使用PCA对数据进行降维
    pca = PCA(n_components=min(10, X_train_flat.shape[1]))
    X_train_pca = pca.fit_transform(X_train_flat)
    X_test_pca = pca.transform(X_test_flat)

    svr_model = SVR(kernel='rbf', C=1.0, epsilon=0.2)
    svr_model.fit(X_train_pca, y_train)
    y_train_pred_svr = svr_model.predict(X_train_pca)
    y_test_pred_svr = svr_model.predict(X_test_pca)
    results['SVR']['train_r2'].append(r2_score(y_train, y_train_pred_svr))
    results['SVR']['test_r2'].append(r2_score(y_test, y_test_pred_svr))
    results['SVR']['train_rmse'].append(np.sqrt(mean_squared_error(y_train, y_train_pred_svr)))
    results['SVR']['test_rmse'].append(np.sqrt(mean_squared_error(y_test, y_test_pred_svr)))
    results['SVR']['y_test'].extend(y_test)
    results['SVR']['y_test_pred'].extend(y_test_pred_svr)

# 计算平均值
avg_results = {model: {metric: np.mean(values) for metric, values in metrics.items() if metric not in ['y_test', 'y_test_pred']} for model, metrics in results.items()}

# 打印结果
for model_name, metrics in avg_results.items():
    print(f"{model_name} 平均测试 R²: {metrics['test_r2']:.4f}, 平均测试 RMSE: {metrics['test_rmse']:.4f}")

# 绘制比较图表
fig, axs = plt.subplots(1, 3, figsize=(18, 6))
axs = axs.flatten()

# LSTM
axs[0].scatter(results['LSTM']['y_test'], results['LSTM']['y_test_pred'], color='purple')
axs[0].plot([min(results['LSTM']['y_test']), max(results['LSTM']['y_test'])], [min(results['LSTM']['y_test']), max(results['LSTM']['y_test'])], 'k--', lw=2)
axs[0].set_title(f'LSTM\nRMSE={avg_results["LSTM"]["test_rmse"]:.2f} R²={avg_results["LSTM"]["test_r2"]:.2f}')
axs[0].set_xlabel('GPP (observed)')
axs[0].set_ylabel('GPP (predicted)')

# BiLSTM
axs[1].scatter(results['BiLSTM']['y_test'], results['BiLSTM']['y_test_pred'], color='purple')
axs[1].plot([min(results['BiLSTM']['y_test']), max(results['BiLSTM']['y_test'])], [min(results['BiLSTM']['y_test']), max(results['BiLSTM']['y_test'])], 'k--', lw=2)
axs[1].set_title(f'BiLSTM\nRMSE={avg_results["BiLSTM"]["test_rmse"]:.2f} R²={avg_results["BiLSTM"]["test_r2"]:.2f}')
axs[1].set_xlabel('GPP (observed)')
axs[1].set_ylabel('GPP (predicted)')

# SVR
axs[2].scatter(results['SVR']['y_test'], results['SVR']['y_test_pred'], color='purple')
axs[2].plot([min(results['SVR']['y_test']), max(results['SVR']['y_test'])], [min(results['SVR']['y_test']), max(results['SVR']['y_test'])], 'k--', lw=2)
axs[2].set_title(f'SVR\nRMSE={avg_results["SVR"]["test_rmse"]:.2f} R²={avg_results["SVR"]["test_r2"]:.2f}')
axs[2].set_xlabel('GPP (observed)')
axs[2].set_ylabel('GPP (predicted)')

plt.tight_layout()
plt.show()