import os.path
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import pickle
from sklearn.metrics import r2_score, mean_squared_error
from keras.losses import mean_absolute_error
from keras.models import Model
from keras.layers import Input, TimeDistributed
from keras.wrappers.scikit_learn import KerasRegressor
from keras.callbacks import Callback
from sklearn.model_selection import GridSearchCV
from keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler
import matplotlib.pyplot as plt
from keras.layers import Conv1D, Dropout, Dense,BatchNormalization
from keras.regularizers import l2
from keras.optimizers import Adam, RMSprop

# 第一部分：设置需要用到的数据和文件所在路径以及数据处理
# 输入文件，读取数据，处理过的文件
readfile_folder = r"F:\dataset\BQW_min\data_min"
readfile_name = "A1_2month.csv"
readfile_path = os.path.join(readfile_folder, readfile_name)
# 输出文件夹，用于保存结果
writefile_folder = r'F:\dataset\test'  #####
# 输出文件，处理过后的训练文件
trainfile_name = "Train_GeneratorWindingTemperature1.csv"  #####
trainfile_path = os.path.join(writefile_folder, trainfile_name)

# 模型名称
model_name = 'GeneratorWindingTemperature1_Model.h5'  #####
model_path = os.path.join(writefile_folder, model_name)

# 加载数据并检查大小
df_ori = pd.read_csv(readfile_path)  #
print("Data Shape--", df_ori.shape)
# 对读取的数据进行处理，只需要时间列、预测列和影响因子排前的因素
time_name = 'rectime'
target_name = 'V71'
need_name = ['V47', 'V77']
print(need_name)

# 其余可改参数：N_past_value代表训练模型时需要过去多长时间内的数据，Pre_size代表预测未来多少数据，Lstm_input_size代表训练lstm需要输入的神经元个数
N_past_value = 60
Pre_size = 60
Lstm_input_size = len(need_name) + 1

# 以下内容基本无需更改，但可以视情况而定
# 提取时间索引、预测温度所在列索引和影响因子所在列索引
time_index = [i for i, col in enumerate(df_ori.columns) if col == time_name]
print(time_index)
target_index = [i for i, col in enumerate(df_ori.columns) if col == target_name]
print(target_index)
need_index = [i for i, col in enumerate(df_ori.columns) if col in need_name]
print(need_index)
T = df_ori.iloc[:, time_index]
X = df_ori.iloc[:, need_index]
y = df_ori.iloc[:, target_index]

# 拼接形成训练数据，格式为：时间+目标值+影响因子
df_train = pd.concat([T, y, X], axis=1)
print(df_train.head())
# df_train.to_excel(trainfile_path, index=False, engine='xlsxwriter')

df_train.to_csv(trainfile_path.replace('.xlsx', '.csv'), index=False)
# 对拼接数据进行时间解析
df = pd.read_csv(trainfile_path, parse_dates=["rectime"], index_col=[0])

n = len(df)

# 定义划分比例（可根据数据量调整）
train_ratio = 0.7
val_ratio = 0.15
test_ratio = 0.15

# 计算分割点（使用整数除法保证比例准确）
train_size = int(n * train_ratio)
val_size = int(n * val_ratio)
test_size = n - train_size - val_size  # 剩余作为测试集

# 按时间顺序划分（假设df已按时间升序排列）
df_for_training = df.iloc[:train_size]  # 前80%作为训练集
df_for_validation = df.iloc[train_size:train_size + val_size]  # 中间10%作为验证集
df_for_testing = df.iloc[-test_size:]  # 最后10%作为测试集

# 验证划分结果
print(f"训练集: {train_size}条 ({train_ratio * 100:.1f}%)")
print(f"验证集: {val_size}条 ({val_ratio * 100:.1f}%)")
print(f"测试集: {test_size}条 ({test_ratio * 100:.1f}%)")
# 对数据做缩放，在0-1内
scaler = MinMaxScaler(feature_range=(0, 1))
df_for_training_scaled = scaler.fit_transform(df_for_training)  # fit计算最大值和最小值，transform用最大最小值进行缩放
df_for_testing_scaled = scaler.transform(df_for_testing)  # 用已有的最大最小值进行缩放
df_for_validation_scaled = scaler.transform(df_for_validation)

# 保存scaler
scaler_path = os.path.join(writefile_folder, 'GeneratorWindingTemperature1_scaler.pkl')  #####
with open(scaler_path, 'wb') as f:
    pickle.dump(scaler, f)


# 将数据拆分为X和Y，其中dataset为数据集，N_past是我们在预测下一个目标值时将在过去查看的步骤数，如果为30就是过去30天的数据
def createXY(dataset, n_past, Pre_size):
    dataX = []
    dataY = []
    for i in range(n_past, len(dataset) - Pre_size):
        dataX.append(dataset[i - n_past:i, 0:dataset.shape[1]])  # 30行（0：30是索引0-29）；全部列；
        dataY.append(dataset[i:i + Pre_size, 0])  # Open列
    return np.array(dataX), np.array(dataY)


trainX, trainY = createXY(df_for_training_scaled, N_past_value, Pre_size)
testX, testY = createXY(df_for_testing_scaled, N_past_value, Pre_size)
valX, valY = createXY(df_for_validation_scaled, N_past_value, Pre_size)

# 检查训练影响因子的数据大小
print("trainX Shape-- ", trainX.shape)
print("trainY Shape-- ", trainY.shape)
print("testX Shape-- ", testX.shape)
print("testY Shape-- ", testY.shape)
print("trainX[0]-- \n", trainX[0])
print("\ntrainY[0]-- ", trainY[0])


# 第二部分：构建模型
# 模型构建

'''def build_lightweight_seq2seq(optimizer='adam', filters=16, kernel_size=3,
                              dropout_rate=0.3, l2_reg=1e-3):
    # 输入层
    inputs = Input(shape=(N_past_value, Lstm_input_size))

    # 增强的TCN结构，添加更多正则化
    x = Conv1D(filters=filters,
               kernel_size=kernel_size,
               padding='causal',
               activation='relu',
               kernel_regularizer=l2(l2_reg))(inputs)
    x = BatchNormalization()(x)  # 添加批归一化
    x = Dropout(dropout_rate)(x)

    # 第二层卷积
    x = Conv1D(filters=filters * 2,
               kernel_size=kernel_size,
               padding='causal',
               activation='relu',
               kernel_regularizer=l2(l2_reg))(x)
    x = BatchNormalization()(x)
    x = Dropout(dropout_rate)(x)

    # 第三层卷积（可选，根据数据复杂度）
    x = Conv1D(filters=filters,
               kernel_size=kernel_size,
               padding='causal',
               activation='relu',
               kernel_regularizer=l2(l2_reg))(x)
    x = BatchNormalization()(x)
    x = Dropout(dropout_rate * 0.8)(x)  # 稍微降低dropout率

    # 残差连接
    if filters != Lstm_input_size:
        residual = Conv1D(filters, kernel_size=1, padding='same')(inputs)
    else:
        residual = inputs
    x = x + residual

    # 全局平均池化替代全连接层，减少参数
    # x = GlobalAveragePooling1D()(x)

    # 输出层
    outputs = TimeDistributed(Dense(1))(x)

    model = Model(inputs, outputs)'''


def build_lightweight_seq2seq(optimizer='adam', filters=32, kernel_size=3,
                         dropout_rate=0.4, l2_reg=1e-3, learning_rate=0.001):
    # 输入层
    inputs = Input(shape=(N_past_value, Lstm_input_size))

    # 增强的TCN结构，添加更多正则化
    x = Conv1D(filters=filters,
               kernel_size=kernel_size,
               padding='causal',
               activation='relu',
               kernel_regularizer=l2(l2_reg))(inputs)
    x = BatchNormalization()(x)  # 添加批归一化
    x = Dropout(dropout_rate)(x)

    # 第二层卷积
    x = Conv1D(filters=filters * 2,
               kernel_size=kernel_size,
               padding='causal',
               activation='relu',
               kernel_regularizer=l2(l2_reg))(x)
    x = BatchNormalization()(x)
    x = Dropout(dropout_rate)(x)

  # # 第三层卷积（可选，根据数据复杂度）
  #   x = Conv1D(filters=filters,
  #              kernel_size=kernel_size,
  #              padding='causal',
  #              activation='relu',
  #              kernel_regularizer=l2(l2_reg))(x)
  #   x = BatchNormalization()(x)
  #   x = Dropout(dropout_rate * 0.8)(x)  # 稍微降低dropout率


    # 残差连接
    if filters != Lstm_input_size:
        residual = Conv1D(filters*2, kernel_size=1, padding='same')(inputs)
    else:
        residual = inputs
    x = x + residual

    # 全局平均池化替代全连接层，减少参数
    # x = GlobalAveragePooling1D()(x)

    # 输出层
    outputs = TimeDistributed(Dense(1))(x)

    model = Model(inputs, outputs)

    # 使用较低的学习率
    if optimizer == 'adam':
        opt = Adam(learning_rate=learning_rate)
    else:
        opt = RMSprop(learning_rate=learning_rate)

    model.compile(loss='mse', optimizer=opt, metrics=['mae'])
    return model

# 学习率调度器
'''def lr_scheduler(epoch, lr):
    if epoch > 20 and epoch % 5 == 0:
        return lr * 0.9
    return lr'''
def lr_scheduler(epoch, lr):
    if epoch < 10:
        return lr  # 前10个epoch保持初始学习率
    elif epoch < 30:
        return lr * 0.95  # 逐渐降低
    else:
        return lr * 0.9   # 更大幅度降低

# 自定义回调函数，实时打印每个epoch的损失
class PrintEpochProgress(Callback):
    def on_epoch_end(self, epoch, logs=None):
        logs = logs or {}
        train_loss = logs.get('loss')
        val_loss = logs.get('val_loss')
        print(f"Epoch {epoch + 1}: Train Loss: {train_loss:.6f}, Val Loss: {val_loss:.6f}")


lr_callback = LearningRateScheduler(lr_scheduler)
early_stop = EarlyStopping(monitor='val_loss', patience=10, verbose=1)
checkpoint = ModelCheckpoint(model_path,
                             monitor='val_loss',
                             save_best_only=True,
                             mode='min',
                             verbose=1)
custom_print = PrintEpochProgress()

# 简化的参数网格
param_grid = {
  #  'optimizer': ['adam'],
    'filters': [32, 48],
    'batch_size': [512, 256],
    'epochs': [50],
    'dropout_rate': [0.3, 0.4],
    'l2_reg': [1e-3, 1e-4]
}

# param_grid = {
#     'optimizer': ['adam', 'rmsprop'],
#     'filters': [32, 64],
#     'kernel_size':[3, 5],
#     'batch_size': [128, 64],
#     'epochs': [100],
#     'dropout_rate': [0.1,0.2, 0.3],
#     'l2_reg': [1e-3, 1e-4, 1e-2]
# }

# 创建模型实例
model = KerasRegressor(build_fn=build_lightweight_seq2seq, verbose=0)

# 执行网格搜索
grid = GridSearchCV(estimator=model,
                    param_grid=param_grid,
                    cv=2,
                    n_jobs=1,
                    verbose=2)

# 训练模型（包含所有回调）
grid_result = grid.fit(trainX, trainY,
                       validation_data=(valX, valY),
                       callbacks=[early_stop, checkpoint, lr_callback, custom_print])
# 输出最佳参数
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
best_params = grid_result.best_params_

# 可视化训练过程
plt.figure(figsize=(12, 6))
plt.plot(grid_result.best_estimator_.model.history.history['loss'], label='Train Loss')
plt.plot(grid_result.best_estimator_.model.history.history['val_loss'], label='Validation Loss')
plt.title('Model Training Progress')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend()
plt.savefig(os.path.join(writefile_folder, 'training_curve.png'))
# plt.show()

# 加载最佳模型
from keras.models import load_model

best_model = load_model(model_path)

# 测试集预测

prediction = best_model.predict(testX)
print("prediction shape:", prediction.shape)


# 逆标准化处理（适配三维输出）
def inverse_scale_pred(pred_array, n_features=Lstm_input_size):
    # TCN输出形状为(样本数, Pre_size, 1)
    pred_reshaped = pred_array.reshape(-1, 1)
    pred_copies = np.repeat(pred_reshaped, n_features, axis=-1)
    return scaler.inverse_transform(pred_copies)[:, 0]


pred = inverse_scale_pred(prediction)

# 处理真实值（仅取最后一个时间步）
# original_last_step = testY[:, -1].reshape(-1, 1)
# original_copies = np.repeat(original_last_step, Lstm_input_size, axis=-1)
# original = scaler.inverse_transform(original_copies)[:, 0]
original = testY.reshape(-1, 1)
original_copies = np.repeat(original, Lstm_input_size, axis=-1)
original = scaler.inverse_transform(original_copies)[:, 0]
# 添加形状验证
print(f"Original shape: {original.shape}, Pred shape: {pred.shape}")
# 计算评估指标（此时original和pred长度一致）
test_r2 = r2_score(original, pred)
test_mae = mean_absolute_error(original, pred)
test_mse = mean_squared_error(original, pred)
test_rmse = np.sqrt(test_mse)
# 可视化预测结果对比
plt.figure(figsize=(12, 6))
plt.plot(original, label='Actual Values')
plt.plot(pred, label='Predicted Values')
plt.title('Test Set Prediction Comparison')
plt.xlabel('Time Steps')
plt.ylabel('Value')
plt.legend()
plt.savefig(os.path.join(writefile_folder, 'test_pred_comparison.png'))
# plt.show()

print(f"\n未来30步预测指标:")
print(f"R²: {test_r2:.4f}")
print(f"MSE: {test_mse:.4f}")
print(f"MAE: {test_mae:.4f}")
print(f"RMSE: {test_rmse:.4f}")
