import os.path
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import pickle
from sklearn.metrics import r2_score, mean_squared_error
from keras.losses import mean_absolute_error
from keras.models import Model
from keras.layers import Input, TimeDistributed
from keras.wrappers.scikit_learn import KerasRegressor
from keras.callbacks import Callback
from sklearn.model_selection import GridSearchCV
from keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler
import matplotlib.pyplot as plt
from keras.layers import Conv1D, Dropout, Dense,LSTM,GRU
from keras.regularizers import l2
from keras.optimizers import Adam, RMSprop
from keras.models import Sequential
# 第一部分：设置需要用到的数据和文件所在路径以及数据处理
# 输入文件，读取数据，处理过的文件
readfile_folder = r"F:\dataset\BQW_min\data_min"
readfile_name = "A1_2month.csv"
readfile_path = os.path.join(readfile_folder, readfile_name)
# 输出文件夹，用于保存结果
writefile_folder = r'F:\dataset\test\LSTM'  #####
# 输出文件，处理过后的训练文件
trainfile_name = "Train_GeneratorWindingTemperature1.csv"  #####
trainfile_path = os.path.join(writefile_folder, trainfile_name)

# 模型名称
model_name = 'GeneratorWindingTemperature1_LSTM.h5'  #####
model_path = os.path.join(writefile_folder, model_name)

# 加载数据并检查大小
df_ori = pd.read_csv(readfile_path)  #
print("Data Shape--", df_ori.shape)
# 对读取的数据进行处理，只需要时间列、预测列和影响因子排前的因素
time_name = 'rectime'
target_name = 'V71'
need_name = ['V74', 'V73','V72','V76']
print(need_name)

# 其余可改参数：N_past_value代表训练模型时需要过去多长时间内的数据，Pre_size代表预测未来多少数据，Lstm_input_size代表训练lstm需要输入的神经元个数
N_past_value = 60
Pre_size = 60
Lstm_input_size = len(need_name) + 1

# 以下内容基本无需更改，但可以视情况而定
# 提取时间索引、预测温度所在列索引和影响因子所在列索引
time_index = [i for i, col in enumerate(df_ori.columns) if col == time_name]
print(time_index)
target_index = [i for i, col in enumerate(df_ori.columns) if col == target_name]
print(target_index)
need_index = [i for i, col in enumerate(df_ori.columns) if col in need_name]
print(need_index)
T = df_ori.iloc[:, time_index]
X = df_ori.iloc[:, need_index]
y = df_ori.iloc[:, target_index]

# 拼接形成训练数据，格式为：时间+目标值+影响因子
df_train = pd.concat([T, y, X], axis=1)
print(df_train.head())
# df_train.to_excel(trainfile_path, index=False, engine='xlsxwriter')

df_train.to_csv(trainfile_path.replace('.xlsx', '.csv'), index=False)
# 对拼接数据进行时间解析
df = pd.read_csv(trainfile_path, parse_dates=["rectime"], index_col=[0])

n = len(df)

# 定义划分比例（可根据数据量调整）
train_ratio = 0.7
val_ratio = 0.15
test_ratio = 0.15

# 计算分割点（使用整数除法保证比例准确）
train_size = int(n * train_ratio)
val_size = int(n * val_ratio)
test_size = n - train_size - val_size  # 剩余作为测试集

# 按时间顺序划分（假设df已按时间升序排列）
df_for_training = df.iloc[:train_size]  # 前80%作为训练集
df_for_validation = df.iloc[train_size:train_size + val_size]  # 中间10%作为验证集
df_for_testing = df.iloc[-test_size:]  # 最后10%作为测试集

# 验证划分结果
print(f"训练集: {train_size}条 ({train_ratio * 100:.1f}%)")
print(f"验证集: {val_size}条 ({val_ratio * 100:.1f}%)")
print(f"测试集: {test_size}条 ({test_ratio * 100:.1f}%)")
# 对数据做缩放，在0-1内
scaler = MinMaxScaler(feature_range=(0, 1))
df_for_training_scaled = scaler.fit_transform(df_for_training)  # fit计算最大值和最小值，transform用最大最小值进行缩放
df_for_testing_scaled = scaler.transform(df_for_testing)  # 用已有的最大最小值进行缩放
df_for_validation_scaled = scaler.transform(df_for_validation)

# 保存scaler
scaler_path = os.path.join(writefile_folder, 'GeneratorWindingTemperature1_LSTM.pkl')  #####
with open(scaler_path, 'wb') as f:
    pickle.dump(scaler, f)


# 将数据拆分为X和Y，其中dataset为数据集，N_past是我们在预测下一个目标值时将在过去查看的步骤数，如果为30就是过去30天的数据
def createXY(dataset, n_past, Pre_size):
    dataX = []
    dataY = []
    for i in range(n_past, len(dataset) - Pre_size):
        dataX.append(dataset[i - n_past:i, 0:dataset.shape[1]])  # 30行（0：30是索引0-29）；全部列；
        dataY.append(dataset[i:i + Pre_size, 0])  # Open列
    return np.array(dataX), np.array(dataY)


trainX, trainY = createXY(df_for_training_scaled, N_past_value, Pre_size)
testX, testY = createXY(df_for_testing_scaled, N_past_value, Pre_size)
valX, valY = createXY(df_for_validation_scaled, N_past_value, Pre_size)

# 检查训练影响因子的数据大小
print("trainX Shape-- ", trainX.shape)
print("trainY Shape-- ", trainY.shape)
print("testX Shape-- ", testX.shape)
print("testY Shape-- ", testY.shape)
print("trainX[0]-- \n", trainX[0])
print("\ntrainY[0]-- ", trainY[0])


# 第二部分：构建模型
# 模型构建


def build_lstm_model(optimizer='adam', units=50, dropout_rate=0.3, l2_reg=1e-3):
    # 输入层
    inputs = Input(shape=(N_past_value, Lstm_input_size))

    # LSTM层
    x = LSTM(units=units,
             return_sequences=True,
             kernel_regularizer=l2(l2_reg))(inputs)
    x = Dropout(dropout_rate)(x)

    # 可选的第二层LSTM
    x = LSTM(units=units // 2,
             return_sequences=True,
             kernel_regularizer=l2(l2_reg))(x)
    x = Dropout(dropout_rate)(x)

    # 输出层
    outputs = TimeDistributed(Dense(1))(x)

    model = Model(inputs, outputs)

    if optimizer == 'adam':
        opt = Adam(learning_rate=0.001)
    else:
        opt = RMSprop(learning_rate=0.001)

    model.compile(loss='mse', optimizer=opt)
    return model


# 对应的参数网格
param_grid = {
    'units': [32, 64, 128],
    'batch_size': [32, 64],
    'epochs': [100],
    'dropout_rate': [0.2, 0.3],
    'l2_reg': [1e-3, 1e-4]
}





# 学习率调度器
def lr_scheduler(epoch, lr):
    if epoch > 20 and epoch % 5 == 0:
        return lr * 0.9
    return lr


# 自定义回调函数，实时打印每个epoch的损失
class PrintEpochProgress(Callback):
    def on_epoch_end(self, epoch, logs=None):
        logs = logs or {}
        train_loss = logs.get('loss')
        val_loss = logs.get('val_loss')
        print(f"Epoch {epoch + 1}: Train Loss: {train_loss:.6f}, Val Loss: {val_loss:.6f}")


lr_callback = LearningRateScheduler(lr_scheduler)
early_stop = EarlyStopping(monitor='val_loss', patience=2, verbose=1)
checkpoint = ModelCheckpoint(model_path,
                             monitor='val_loss',
                             save_best_only=True,
                             mode='min',
                             verbose=1)
custom_print = PrintEpochProgress()


# param_grid = {
#     'optimizer': ['adam', 'rmsprop'],
#     'filters': [32, 64],
#     'kernel_size':[3, 5],
#     'batch_size': [128, 64],
#     'epochs': [100],
#     'dropout_rate': [0.1,0.2, 0.3],
#     'l2_reg': [1e-3, 1e-4, 1e-2]
# }

# 创建模型实例
model = KerasRegressor(build_fn=build_lstm_model, verbose=0)

# 执行网格搜索
grid = GridSearchCV(estimator=model,
                    param_grid=param_grid,
                    cv=2,
                    n_jobs=1,
                    verbose=2)

# 训练模型（包含所有回调）
grid_result = grid.fit(trainX, trainY,
                       validation_data=(valX, valY),
                       callbacks=[early_stop, checkpoint, lr_callback, custom_print])
# 输出最佳参数
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
best_params = grid_result.best_params_

# 可视化训练过程
plt.figure(figsize=(12, 6))
plt.plot(grid_result.best_estimator_.model.history.history['loss'], label='Train Loss')
plt.plot(grid_result.best_estimator_.model.history.history['val_loss'], label='Validation Loss')
plt.title('Model Training Progress')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend()
plt.savefig(os.path.join(writefile_folder, 'training_curve.png'))
# plt.show()

# 加载最佳模型
from keras.models import load_model

best_model = load_model(model_path)

# 测试集预测

prediction = best_model.predict(testX)
print("prediction shape:", prediction.shape)


# 逆标准化处理（适配三维输出）
def inverse_scale_pred(pred_array, n_features=Lstm_input_size):
    # TCN输出形状为(样本数, Pre_size, 1)
    pred_reshaped = pred_array.reshape(-1, 1)
    pred_copies = np.repeat(pred_reshaped, n_features, axis=-1)
    return scaler.inverse_transform(pred_copies)[:, 0]


pred = inverse_scale_pred(prediction)

# 处理真实值（仅取最后一个时间步）
# original_last_step = testY[:, -1].reshape(-1, 1)
# original_copies = np.repeat(original_last_step, Lstm_input_size, axis=-1)
# original = scaler.inverse_transform(original_copies)[:, 0]
original = testY.reshape(-1, 1)
original_copies = np.repeat(original, Lstm_input_size, axis=-1)
original = scaler.inverse_transform(original_copies)[:, 0]
# 添加形状验证
print(f"Original shape: {original.shape}, Pred shape: {pred.shape}")
# 计算评估指标（此时original和pred长度一致）
test_r2 = r2_score(original, pred)
test_mae = mean_absolute_error(original, pred)
test_mse = mean_squared_error(original, pred)
test_rmse = np.sqrt(test_mse)
# 可视化预测结果对比
plt.figure(figsize=(12, 6))
plt.plot(original, label='Actual Values')
plt.plot(pred, label='Predicted Values')
plt.title('Test Set Prediction Comparison')
plt.xlabel('Time Steps')
plt.ylabel('Value')
plt.legend()
plt.savefig(os.path.join(writefile_folder, 'test_pred_comparison.png'))
# plt.show()

print(f"\n未来30步预测指标:")
print(f"R²: {test_r2:.4f}")
print(f"MSE: {test_mse:.4f}")
print(f"MAE: {test_mae:.4f}")
print(f"RMSE: {test_rmse:.4f}")
