import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv1D, MaxPooling1D, GRU, Dense, Dropout, BatchNormalization
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.regularizers import l2
import os
import matplotlib.pyplot as plt
import joblib

# 读取数据
data = pd.read_excel('datasets/guangfu2019.xlsx')

# 时间特征处理
data['时间'] = pd.to_datetime(data['时间']).astype('int64') // 10**9

# 分离特征和目标
X = data.drop('实际发电功率(mw)', axis=1).values
y = data['实际发电功率(mw)'].values.reshape(-1, 1)

# 数据划分（保持时序性）
train_size = int(0.8 * len(X))
X_train, X_test = X[:train_size], X[train_size:]
y_train, y_test = y[:train_size], y[train_size:]

# 归一化处理（仅在训练集上拟合）
scaler_x = MinMaxScaler()
scaler_y = MinMaxScaler()

X_train_scaled = scaler_x.fit_transform(X_train)
X_test_scaled = scaler_x.transform(X_test)
y_train_scaled = scaler_y.fit_transform(y_train)
y_test_scaled = scaler_y.transform(y_test)

# 创建时间序列数据
def create_sequences(features, targets, seq_length):
    X_seq, y_seq = [], []
    for i in range(len(features) - seq_length):
        X_seq.append(features[i:i+seq_length])
        y_seq.append(targets[i+seq_length])
    return np.array(X_seq), np.array(y_seq)

SEQ_LENGTH = 24  # 使用24个时间点（约2小时）预测下一个点
X_train_seq, y_train_seq = create_sequences(X_train_scaled, y_train_scaled, SEQ_LENGTH)
X_test_seq, y_test_seq = create_sequences(X_test_scaled, y_test_scaled, SEQ_LENGTH)

# 构建优化后的CNN-GRU模型
model = Sequential([
    Conv1D(128, 3, activation='relu', input_shape=(SEQ_LENGTH, X_train_seq.shape[2]),
          kernel_regularizer=l2(0.001)),
    BatchNormalization(),
    MaxPooling1D(2),
    Dropout(0.3),
    Conv1D(64, 2, activation='relu', kernel_regularizer=l2(0.001)),
    BatchNormalization(),
    GRU(128, return_sequences=True, kernel_regularizer=l2(0.001)),
    Dropout(0.2),
    GRU(64),
    Dense(64, activation='relu'),
    Dropout(0.2),
    Dense(1)
])

model.compile(optimizer='adam', loss='mse', metrics=['mae'])

# 回调函数
callbacks = [
    EarlyStopping(monitor='val_loss', patience=15, restore_best_weights=True),
    ModelCheckpoint('model/best_model.h5', save_best_only=True)
]

# 训练模型
history = model.fit(
    X_train_seq, y_train_seq,
    epochs=100,
    batch_size=64,
    validation_split=0.2,
    callbacks=callbacks,
    verbose=1
)

# 保存最终模型和归一化器
os.makedirs('model', exist_ok=True)
model.save('model/cnn_gru_model.h5')
joblib.dump(scaler_x, 'model/scaler_x.pkl')
joblib.dump(scaler_y, 'model/scaler_y.pkl')

# 可视化训练过程
plt.figure(figsize=(12, 6))
plt.plot(history.history['loss'], label='Training Loss')
plt.plot(history.history['val_loss'], label='Validation Loss')
plt.title('Training and Validation Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend()
plt.savefig('model/training_loss.png')
plt.show()

# 在测试集上评估
test_loss = model.evaluate(X_test_seq, y_test_seq)
print(f'Test Loss: {test_loss[0]:.4f}, Test MAE: {test_loss[1]:.4f}')

# 预测测试集
y_pred_scaled = model.predict(X_test_seq)
y_pred = scaler_y.inverse_transform(y_pred_scaled)
y_true = scaler_y.inverse_transform(y_test_seq)

# 可视化预测结果
plt.figure(figsize=(16, 8))
plt.plot(y_true[:500], label='True Values', alpha=0.7)
plt.plot(y_pred[:500], label='Predictions', alpha=0.7)
plt.title('Test Set Prediction Comparison')
plt.ylabel('Actual Power (mw)')
plt.xlabel('Time Steps')
plt.legend()
plt.grid(True)
plt.savefig('model/prediction_comparison_test.png')
plt.show()