import os.path
import numpy as np
import pandas as pda
from sklearn.preprocessing import MinMaxScaler
import pickle
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
from keras.models import Model
from keras.layers import Input, TimeDistributed
from keras.wrappers.scikit_learn import KerasRegressor
from keras.callbacks import Callback
from sklearn.model_selection import GridSearchCV
from keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler
import matplotlib.pyplot as plt
from keras.layers import Dropout, Dense, GRU,LSTM,Bidirectional
from keras.regularizers import l2
from keras.optimizers import Adam, RMSprop
from keras.models import load_model

# 第一部分：设置需要用到的数据和文件所在路径以及数据处理
readfile_folder = r"F:\dataset\BQW_min\data_min"
readfile_name = "A1_6month.csv"
readfile_path = os.path.join(readfile_folder, readfile_name)
writefile_folder = r'F:\dataset\test\LSTM\BiLSTM'
trainfile_name = "Train_GeneratorWindingTemperature1.csv"
trainfile_path = os.path.join(writefile_folder, trainfile_name)

# 模型名称
model_name = 'GeneratorWindingTemperature1_Model.h5'
model_path = os.path.join(writefile_folder, model_name)

# 加载数据并检查大小
df_ori = pda.read_csv(readfile_path)
print("Data Shape--", df_ori.shape)

# 对读取的数据进行处理，只需要时间列、预测列和影响因子排前的因素
time_name = 'rectime'
target_name = 'V71'
need_name = ['V74', 'V73', 'V72', 'V76']
print(need_name)

# 参数设置
N_past_value = 60
Pre_size = 60
Lstm_input_size = len(need_name) + 1  # 目标值 + 特征数

# 提取时间索引、预测温度所在列索引和影响因子所在列索引
time_index = [i for i, col in enumerate(df_ori.columns) if col == time_name]
target_index = [i for i, col in enumerate(df_ori.columns) if col == target_name]
need_index = [i for i, col in enumerate(df_ori.columns) if col in need_name]

T = df_ori.iloc[:, time_index]
X = df_ori.iloc[:, need_index]
y = df_ori.iloc[:, target_index]

# 拼接形成训练数据，格式为：时间+目标值+影响因子
df_train = pda.concat([T, y, X], axis=1)
print(df_train.head())
df_train.to_csv(trainfile_path, index=False)

# 对拼接数据进行时间解析
df = pda.read_csv(trainfile_path, parse_dates=["rectime"], index_col=[0])

n = len(df)

# 定义划分比例
train_ratio = 0.7
val_ratio = 0.15
test_ratio = 0.15

# 计算分割点
train_size = int(n * train_ratio)
val_size = int(n * val_ratio)
test_size = n - train_size - val_size

# 按时间顺序划分
df_for_training = df.iloc[:train_size]
df_for_validation = df.iloc[train_size:train_size + val_size]
df_for_testing = df.iloc[-test_size:]

# 验证划分结果
print(f"训练集: {train_size}条 ({train_ratio * 100:.1f}%)")
print(f"验证集: {val_size}条 ({val_ratio * 100:.1f}%)")
print(f"测试集: {test_size}条 ({test_ratio * 100:.1f}%)")

# 对数据做缩放
scaler = MinMaxScaler(feature_range=(0, 1))
df_for_training_scaled = scaler.fit_transform(df_for_training)
df_for_testing_scaled = scaler.transform(df_for_testing)
df_for_validation_scaled = scaler.transform(df_for_validation)

# 保存scaler
scaler_path = os.path.join(writefile_folder, 'GeneratorWindingTemperature1_scaler.pkl')
with open(scaler_path, 'wb') as f:
    pickle.dump(scaler, f)

# 将数据拆分为X和Y
def createXY(dataset, n_past, Pre_size):
    dataX = []
    dataY = []
    for i in range(n_past, len(dataset) - Pre_size):
        dataX.append(dataset[i - n_past:i, 0:dataset.shape[1]])
        dataY.append(dataset[i:i + Pre_size, 0])  # 目标列
    return np.array(dataX), np.array(dataY)

trainX, trainY = createXY(df_for_training_scaled, N_past_value, Pre_size)
testX, testY = createXY(df_for_testing_scaled, N_past_value, Pre_size)
valX, valY = createXY(df_for_validation_scaled, N_past_value, Pre_size)

# 检查数据形状
print("trainX Shape-- ", trainX.shape)
print("trainY Shape-- ", trainY.shape)
print("testX Shape-- ", testX.shape)
print("testY Shape-- ", testY.shape)

# 第二部分：构建模型
def build_bilstm_model(optimizer='adam', units=50, dropout_rate=0.3, l2_reg=1e-3):
    # 输入层
    inputs = Input(shape=(N_past_value, Lstm_input_size))

    # 双向LSTM层
    x = Bidirectional(LSTM(units=units,
                           return_sequences=True,
                           kernel_regularizer=l2(l2_reg)))(inputs)
    x = Dropout(dropout_rate)(x)

    # 第二层双向LSTM
    x = Bidirectional(LSTM(units=units // 2,
                           return_sequences=True,
                           kernel_regularizer=l2(l2_reg)))(x)
    x = Dropout(dropout_rate)(x)

    # 输出层
    outputs = TimeDistributed(Dense(1))(x)

    model = Model(inputs, outputs)

    if optimizer == 'adam':
        opt = Adam(learning_rate=0.001)
    else:
        opt = RMSprop(learning_rate=0.001)

    model.compile(loss='mse', optimizer=opt)
    return model

# 对应的参数网格
param_grid = {
    'units': [32, 64],
    'batch_size': [128, 256],
    'dropout_rate': [0.2, 0.3],
    'l2_reg': [1e-4, 1e-3]
}

# 学习率调度器
def lr_scheduler(epoch, lr):
    if epoch > 20 and epoch % 5 == 0:
        return lr * 0.9
    return lr

# 自定义回调函数，实时打印每个epoch的损失
class PrintEpochProgress(Callback):
    def on_epoch_end(self, epoch, logs=None):
        logs = logs or {}
        train_loss = logs.get('loss')
        val_loss = logs.get('val_loss')
        print(f"Epoch {epoch + 1}: Train Loss: {train_loss:.6f}, Val Loss: {val_loss:.6f}")

lr_callback = LearningRateScheduler(lr_scheduler)
early_stop = EarlyStopping(monitor='val_loss', patience=5, verbose=1)  # 增加patience
checkpoint = ModelCheckpoint(model_path,
                             monitor='val_loss',
                             save_best_only=True,
                             mode='min',
                             verbose=1)
custom_print = PrintEpochProgress()

# 创建模型实例
model = KerasRegressor(build_fn=build_bilstm_model, epochs=50, verbose=0)

# 执行网格搜索
grid = GridSearchCV(estimator=model,
                    param_grid=param_grid,
                    cv=2,
                    n_jobs=1,
                    verbose=2)

# 训练模型
print("开始训练模型...")
grid_result = grid.fit(trainX, trainY,
                       validation_data=(valX, valY),
                       callbacks=[early_stop, checkpoint, lr_callback, custom_print])

# 输出最佳参数
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
best_params = grid_result.best_params_

# 可视化训练过程
plt.figure(figsize=(12, 6))
plt.plot(grid_result.best_estimator_.model.history.history['loss'], label='Train Loss')
plt.plot(grid_result.best_estimator_.model.history.history['val_loss'], label='Validation Loss')
plt.title('Model Training Progress')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend()
plt.savefig(os.path.join(writefile_folder, 'training_curve.png'))
plt.close()

# 加载最佳模型
best_model = load_model(model_path)

# 测试集预测
print("进行测试集预测...")
prediction = best_model.predict(testX)
print("prediction shape:", prediction.shape)

# 修正的逆标准化函数
def inverse_scale_values(values_array, scaler, n_features):
    """
    逆标准化函数
    values_array: 需要逆标准化的数组
    scaler: 标准化器
    n_features: 原始特征数量
    """
    # 重塑为2D
    values_2d = values_array.reshape(-1, 1)
    # 创建与原始数据相同维度的数组
    dummy_array = np.zeros((len(values_2d), n_features))
    dummy_array[:, 0] = values_2d[:, 0]  # 将值放在第一列（目标值位置）
    # 逆标准化
    inverted_array = scaler.inverse_transform(dummy_array)
    return inverted_array[:, 0]  # 返回目标列

# 对预测值和真实值进行逆标准化
pred = inverse_scale_values(prediction, scaler, Lstm_input_size)
original = inverse_scale_values(testY, scaler, Lstm_input_size)

# 添加形状验证
print(f"Original shape: {original.shape}, Pred shape: {pred.shape}")

# 计算评估指标
test_r2 = r2_score(original, pred)
test_mae = mean_absolute_error(original, pred)
test_mse = mean_squared_error(original, pred)
test_rmse = np.sqrt(test_mse)

# 可视化预测结果对比
plt.figure(figsize=(12, 6))
plt.plot(original, label='Actual Values', alpha=0.7)
plt.plot(pred, label='Predicted Values', alpha=0.7)
plt.title('Test Set Prediction Comparison')
plt.xlabel('Time Steps')
plt.ylabel('Value')
plt.legend()
plt.savefig(os.path.join(writefile_folder, 'test_pred_comparison.png'))
plt.close()

print(f"\n未来{Pre_size}步预测指标:")
print(f"R²: {test_r2:.4f}")
print(f"MSE: {test_mse:.4f}")
print(f"MAE: {test_mae:.4f}")
print(f"RMSE: {test_rmse:.4f}")

# 保存预测结果
results_df = pda.DataFrame({
    'Actual': original,
    'Predicted': pred
})
results_path = os.path.join(writefile_folder, 'prediction_results.csv')
results_df.to_csv(results_path, index=False)
print(f"预测结果已保存至: {results_path}")

