import os.path
import numpy as np
import pandas as pda
from sklearn.preprocessing import MinMaxScaler
import pickle
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
from keras.models import Model
from keras.layers import Input, TimeDistributed
from keras.wrappers.scikit_learn import KerasRegressor
from keras.callbacks import Callback
from sklearn.model_selection import GridSearchCV
from keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler
import matplotlib.pyplot as plt
from keras.layers import Dropout, Dense, GRU,LayerNormalization,Concatenate,Embedding,MultiHeadAttention
from keras.regularizers import l2
from keras.optimizers import Adam, RMSprop
from keras.models import load_model
import tensorflow as tf
from keras import layers
import math
# 第一部分：设置需要用到的数据和文件所在路径以及数据处理
readfile_folder = r"F:\dataset\BQW_min\data_min"
readfile_name = "A1_2month.csv"
readfile_path = os.path.join(readfile_folder, readfile_name)
writefile_folder = r"F:\dataset\test\transformer"
trainfile_name = "Train_GeneratorWindingTemperature1.csv"
trainfile_path = os.path.join(writefile_folder, trainfile_name)

# 模型名称
model_name = 'GeneratorWindingTemperature1_Model.h5'
model_path = os.path.join(writefile_folder, model_name)

# 加载数据并检查大小
df_ori = pda.read_csv(readfile_path)
print("Data Shape--", df_ori.shape)

# 对读取的数据进行处理，只需要时间列、预测列和影响因子排前的因素
time_name = 'rectime'
target_name = 'V71'
need_name = ['V74', 'V73', 'V72', 'V76']
print(need_name)

# 参数设置
N_past_value = 60
Pre_size = 60
Lstm_input_size = len(need_name) + 1  # 目标值 + 特征数

# 提取时间索引、预测温度所在列索引和影响因子所在列索引
time_index = [i for i, col in enumerate(df_ori.columns) if col == time_name]
target_index = [i for i, col in enumerate(df_ori.columns) if col == target_name]
need_index = [i for i, col in enumerate(df_ori.columns) if col in need_name]

T = df_ori.iloc[:, time_index]
X = df_ori.iloc[:, need_index]
y = df_ori.iloc[:, target_index]

# 拼接形成训练数据，格式为：时间+目标值+影响因子
df_train = pda.concat([T, y, X], axis=1)
print(df_train.head())
df_train.to_csv(trainfile_path, index=False)

# 对拼接数据进行时间解析
df = pda.read_csv(trainfile_path, parse_dates=["rectime"], index_col=[0])

n = len(df)

# 定义划分比例
train_ratio = 0.7
val_ratio = 0.15
test_ratio = 0.15

# 计算分割点
train_size = int(n * train_ratio)
val_size = int(n * val_ratio)
test_size = n - train_size - val_size

# 按时间顺序划分
df_for_training = df.iloc[:train_size]
df_for_validation = df.iloc[train_size:train_size + val_size]
df_for_testing = df.iloc[-test_size:]

# 验证划分结果
print(f"训练集: {train_size}条 ({train_ratio * 100:.1f}%)")
print(f"验证集: {val_size}条 ({val_ratio * 100:.1f}%)")
print(f"测试集: {test_size}条 ({test_ratio * 100:.1f}%)")

# 对数据做缩放
scaler = MinMaxScaler(feature_range=(0, 1))
df_for_training_scaled = scaler.fit_transform(df_for_training)
df_for_testing_scaled = scaler.transform(df_for_testing)
df_for_validation_scaled = scaler.transform(df_for_validation)

# 保存scaler
scaler_path = os.path.join(writefile_folder, 'GeneratorWindingTemperature1_scaler.pkl')
with open(scaler_path, 'wb') as f:
    pickle.dump(scaler, f)

# 将数据拆分为X和Y
def createXY(dataset, n_past, Pre_size):
    dataX = []
    dataY = []
    for i in range(n_past, len(dataset) - Pre_size):
        dataX.append(dataset[i - n_past:i, 0:dataset.shape[1]])
        dataY.append(dataset[i:i + Pre_size, 0])  # 目标列
    return np.array(dataX), np.array(dataY)

trainX, trainY = createXY(df_for_training_scaled, N_past_value, Pre_size)
testX, testY = createXY(df_for_testing_scaled, N_past_value, Pre_size)
valX, valY = createXY(df_for_validation_scaled, N_past_value, Pre_size)

# 检查数据形状
print("trainX Shape-- ", trainX.shape)
print("trainY Shape-- ", trainY.shape)
print("testX Shape-- ", testX.shape)
print("testY Shape-- ", testY.shape)

# 第二部分：构建模型
def transformer_encoder(inputs, head_size, num_heads, ff_dim, dropout=0):
    # 多头自注意力
    attention_output = layers.MultiHeadAttention(
        key_dim=head_size, num_heads=num_heads, dropout=dropout
    )(inputs, inputs)

    # 第一个残差连接和层归一化
    attention_output = layers.Dropout(dropout)(attention_output)
    x = layers.LayerNormalization(epsilon=1e-6)(inputs + attention_output)

    # 前馈网络
    ffn_output = layers.Dense(ff_dim, activation="relu")(x)
    ffn_output = layers.Dense(inputs.shape[-1])(ffn_output)
    ffn_output = layers.Dropout(dropout)(ffn_output)

    # 第二个残差连接和层归一化
    return layers.LayerNormalization(epsilon=1e-6)(x + ffn_output)


def build_transformer_model(
        input_shape,
        head_size=32,
        num_heads=4,
        ff_dim=128,
        num_transformer_blocks=3,
        mlp_units=[128],
        dropout=0.1,
        mlp_dropout=0.1
):
    inputs = tf.keras.Input(shape=input_shape)
    x = inputs

    # 添加位置编码
    positions = tf.range(start=0, limit=input_shape[0], delta=1)
    position_embedding = layers.Embedding(
        input_dim=input_shape[0], output_dim=input_shape[1]
    )(positions)
    x = x + position_embedding

    # 多个Transformer块
    for _ in range(num_transformer_blocks):
        x = transformer_encoder(x, head_size, num_heads, ff_dim, dropout)

    # 全局平均池化
    x = layers.GlobalAveragePooling1D(data_format="channels_first")(x)

    # MLP分类头
    for dim in mlp_units:
        x = layers.Dense(dim, activation="relu")(x)
        x = layers.Dropout(mlp_dropout)(x)

    # 输出层 - 直接输出Pre_size个预测值
    outputs = layers.Dense(Pre_size)(x)

    return tf.keras.Model(inputs, outputs)


def build_time_series_transformer(optimizer='adam', head_size=32, num_heads=4,
                                  ff_dim=128, num_blocks=3, dropout_rate=0.1):
    """专门为时间序列预测设计的Transformer模型"""
    inputs = Input(shape=(N_past_value, Lstm_input_size))

    # 1. 输入投影到更高的维度
    x = Dense(head_size * 2)(inputs)

    # 2. 位置编码 (可学习的位置编码)
    positions = tf.range(start=0, limit=N_past_value, delta=1)
    position_embedding = Embedding(input_dim=N_past_value, output_dim=head_size * 2)(positions)
    position_embedding = tf.expand_dims(position_embedding, 0)  # 添加batch维度
    position_embedding = tf.tile(position_embedding, [tf.shape(x)[0], 1, 1])  # 扩展到batch size

    x = x + position_embedding

    # 3. 多个Transformer编码器块
    for _ in range(num_blocks):
        # 层归一化
        x_norm = LayerNormalization(epsilon=1e-6)(x)

        # 多头自注意力
        attention_output = MultiHeadAttention(
            num_heads=num_heads,
            key_dim=head_size,
            dropout=dropout_rate
        )(x_norm, x_norm)

        # 残差连接
        x = x + Dropout(dropout_rate)(attention_output)

        # 前馈网络
        x_norm = LayerNormalization(epsilon=1e-6)(x)
        ff_output = Dense(ff_dim, activation="relu")(x_norm)
        ff_output = Dropout(dropout_rate)(ff_output)
        ff_output = Dense(head_size * 2)(ff_output)
        ff_output = Dropout(dropout_rate)(ff_output)

        # 残差连接
        x = x + ff_output

    # 4. 输出处理 - 使用解码器风格的方法
    # 取最后一个时间步
    last_timestep = x[:, -1, :]

    # 或者使用注意力池化
    attention_weights = Dense(1, activation="softmax")(x)  # (batch, seq, 1)
    context_vector = tf.reduce_sum(x * attention_weights, axis=1)  # (batch, features)

    # 合并两种表示
    combined = Concatenate()([last_timestep, context_vector])

    # 5. 输出层
    outputs = Dense(Pre_size, activation="linear")(combined)

    model = Model(inputs, outputs)

    if optimizer == 'adam':
        opt = Adam(learning_rate=0.0005)
    else:
        opt = RMSprop(learning_rate=0.0005)

    model.compile(loss='mse', optimizer=opt, metrics=['mae'])
    return model

# 对应的参数网格
param_grid = {
    'head_size': [32, 64],
    'num_heads': [4, 8],
    'ff_dim': [128, 256],
    'num_blocks': [2, 3],
    'batch_size': [32, 64],  # Transformer通常需要较小的batch size
  #  'epochs': [100, 150],
    'dropout_rate': [0.1, 0.2]
}

# 学习率调度器
def lr_scheduler(epoch, lr):
    if epoch > 20 and epoch % 5 == 0:
        return lr * 0.9
    return lr

# 自定义回调函数，实时打印每个epoch的损失
class PrintEpochProgress(Callback):
    def on_epoch_end(self, epoch, logs=None):
        logs = logs or {}
        train_loss = logs.get('loss')
        val_loss = logs.get('val_loss')
        print(f"Epoch {epoch + 1}: Train Loss: {train_loss:.6f}, Val Loss: {val_loss:.6f}")

lr_callback = LearningRateScheduler(lr_scheduler)
early_stop = EarlyStopping(monitor='val_loss', patience=5, verbose=1)  # 增加patience
checkpoint = ModelCheckpoint(model_path,
                             monitor='val_loss',
                             save_best_only=True,
                             mode='min',
                             verbose=1)
custom_print = PrintEpochProgress()

# 创建模型实例
model = KerasRegressor(build_fn=build_time_series_transformer, epochs=50, verbose=0)

# 执行网格搜索
grid = GridSearchCV(estimator=model,
                    param_grid=param_grid,
                    cv=2,
                    n_jobs=1,
                    verbose=2)

# 训练模型
print("开始训练模型...")
grid_result = grid.fit(trainX, trainY,
                       validation_data=(valX, valY),
                       callbacks=[early_stop, checkpoint, lr_callback, custom_print])

# 输出最佳参数
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
best_params = grid_result.best_params_

# 可视化训练过程
plt.figure(figsize=(12, 6))
plt.plot(grid_result.best_estimator_.model.history.history['loss'], label='Train Loss')
plt.plot(grid_result.best_estimator_.model.history.history['val_loss'], label='Validation Loss')
plt.title('Model Training Progress')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend()
plt.savefig(os.path.join(writefile_folder, 'training_curve.png'))
plt.close()

# 加载最佳模型
best_model = load_model(model_path)

# 测试集预测
print("进行测试集预测...")
prediction = best_model.predict(testX)
print("prediction shape:", prediction.shape)

# 修正的逆标准化函数
def inverse_scale_values(values_array, scaler, n_features):
    """
    逆标准化函数
    values_array: 需要逆标准化的数组
    scaler: 标准化器
    n_features: 原始特征数量
    """
    # 重塑为2D
    values_2d = values_array.reshape(-1, 1)
    # 创建与原始数据相同维度的数组
    dummy_array = np.zeros((len(values_2d), n_features))
    dummy_array[:, 0] = values_2d[:, 0]  # 将值放在第一列（目标值位置）
    # 逆标准化
    inverted_array = scaler.inverse_transform(dummy_array)
    return inverted_array[:, 0]  # 返回目标列

# 对预测值和真实值进行逆标准化
pred = inverse_scale_values(prediction, scaler, Lstm_input_size)
original = inverse_scale_values(testY, scaler, Lstm_input_size)

# 添加形状验证
print(f"Original shape: {original.shape}, Pred shape: {pred.shape}")

# 计算评估指标
test_r2 = r2_score(original, pred)
test_mae = mean_absolute_error(original, pred)
test_mse = mean_squared_error(original, pred)
test_rmse = np.sqrt(test_mse)

# 可视化预测结果对比
plt.figure(figsize=(12, 6))
plt.plot(original, label='Actual Values', alpha=0.7)
plt.plot(pred, label='Predicted Values', alpha=0.7)
plt.title('Test Set Prediction Comparison')
plt.xlabel('Time Steps')
plt.ylabel('Value')
plt.legend()
plt.savefig(os.path.join(writefile_folder, 'test_pred_comparison.png'))
plt.close()

print(f"\n未来{Pre_size}步预测指标:")
print(f"R²: {test_r2:.4f}")
print(f"MSE: {test_mse:.4f}")
print(f"MAE: {test_mae:.4f}")
print(f"RMSE: {test_rmse:.4f}")

# 保存预测结果
results_df = pda.DataFrame({
    'Actual': original,
    'Predicted': pred
})
results_path = os.path.join(writefile_folder, 'prediction_results.csv')
results_df.to_csv(results_path, index=False)
print(f"预测结果已保存至: {results_path}")