import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
from sklearn.preprocessing import MinMaxScaler
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset

plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus'] = False

# 配置类
class Config:
    data_path = 'data/train.xlsx'
    timestep = 30  # 时间步长，利用前30天的窗口
    batch_size = 256  # 批次大小
    feature_size = 1  # 每个步长对应的特征数量，这里只使用1维，每天的风速
    hidden_size = 256  # 隐层大小
    output_size = 1  # 单输出任务，预测未来1天风速
    num_layers = 1  # GRU的层数
    epochs = 10 # 迭代轮数
    best_loss = 0 # 记录损失
    learning_rate = 0.00005 # 学习率
    model_name = 'gru' # 模型名称
    save_path = 'data/{}.pth'.format(model_name) # 最优模型保存路径

config = Config()

# 1.加载时间序列数据
df = pd.read_excel(config.data_path, index_col=0)
df = df.head(10000)
data = df.values

# 2.数据预处理
scaler = MinMaxScaler(feature_range=(0, 1))
data = scaler.fit_transform(data)

# 3.划分训练集和测试集
train_size = int(len(data) * 0.7)
train_data = data[:train_size]
test_data = data[train_size:]

# 4.创建数据集
def create_dataset(data, timestep):
    X, y = [], []
    for i in range(len(data) - timestep):
        X.append(data[i:i + timestep])
        y.append(data[i + timestep])
    return np.array(X), np.array(y)

X_train, y_train = create_dataset(train_data, config.timestep)
X_test, y_test = create_dataset(test_data, config.timestep)

# 转换为Tensor
X_train = torch.tensor(X_train, dtype=torch.float32)
y_train = torch.tensor(y_train, dtype=torch.float32)
X_test = torch.tensor(X_test, dtype=torch.float32)
y_test = torch.tensor(y_test, dtype=torch.float32)

# 创建DataLoader
train_loader = DataLoader(TensorDataset(X_train, y_train), batch_size=config.batch_size, shuffle=True)
test_loader = DataLoader(TensorDataset(X_test, y_test), batch_size=config.batch_size, shuffle=False)

# 定义模型
class GRUModel(nn.Module):
    def __init__(self, input_size, hidden_size, output_size, num_layers):
        super(GRUModel, self).__init__()
        self.gru = nn.GRU(input_size, hidden_size, num_layers, batch_first=True)
        self.fc = nn.Linear(hidden_size, output_size)
        self.dropout = nn.Dropout(p=0.5)  # 添加Dropout层

    def forward(self, x):
        out, _ = self.gru(x)
        out = self.dropout(out[:, -1, :])  # 在训练时应用Dropout
        out = self.fc(out)
        return out

model = GRUModel(config.feature_size, config.hidden_size, config.output_size, config.num_layers)
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=config.learning_rate, weight_decay=1e-5)  # 添加L2正则化

# 训练模型
for epoch in range(config.epochs):
    model.train()
    epoch_loss = 0
    for X_batch, y_batch in train_loader:
        optimizer.zero_grad()
        output = model(X_batch)
        loss = criterion(output, y_batch)
        loss.backward()
        optimizer.step()
        epoch_loss += loss.item()
    print(f'Epoch {epoch+1}/{config.epochs}, Loss: {epoch_loss/len(train_loader)}')

# 计算训练集的预测值
model.eval()
with torch.no_grad():
    train_predictions = model(X_train).numpy()
train_actual = y_train.numpy()

# 反归一化
train_predictions = scaler.inverse_transform(train_predictions)
train_actual = scaler.inverse_transform(train_actual)

# 计算性能指标
r2 = r2_score(train_actual, train_predictions)
mae = mean_absolute_error(train_actual, train_predictions)
rmse = mean_squared_error(train_actual, train_predictions, squared=False)

print(f'R²: {r2}')
print(f'MAE: {mae}')
print(f'RMSE: {rmse}')

# 绘制训练集的拟合效果
plt.figure(figsize=(10, 5))
plt.plot(range(len(train_actual)), train_actual, label='实际值')
plt.plot(range(len(train_predictions)), train_predictions, label='训练集预测值', color='yellow', linestyle='--')
plt.legend()
plt.xlabel('样本')
plt.ylabel('值')
plt.title('训练集的拟合效果')
plt.show()

# 计算测试集的预测值
with torch.no_grad():
    test_predictions = model(X_test).numpy()
test_actual = y_test.numpy()

# 反归一化
test_predictions = scaler.inverse_transform(test_predictions)
test_actual = scaler.inverse_transform(test_actual)

# 计算性能指标
r2 = r2_score(test_actual, test_predictions)
mae = mean_absolute_error(test_actual, test_predictions)
rmse = mean_squared_error(test_actual, test_predictions, squared=False)

print(f'R²: {r2}')
print(f'MAE: {mae}')
print(f'RMSE: {rmse}')

# 绘制测试集的拟合效果
plt.figure(figsize=(10, 5))
plt.plot(range(len(test_actual)), test_actual, label='实际值')
plt.plot(range(len(test_predictions)), test_predictions, label='测试集预测值', color='yellow', linestyle='--')
plt.legend()
plt.xlabel('样本')
plt.ylabel('值')
plt.title('测试集的拟合效果')
plt.show()

# 使用测试集最后30个数据点进行未来1000天的预测
input_seq = test_data[-config.timestep:].reshape(1, config.timestep, config.feature_size)
input_seq = torch.tensor(input_seq, dtype=torch.float32)

predictions = []
for _ in range(1000):
    with torch.no_grad():
        pred = model(input_seq)
        predictions.append(pred.item())
        pred = pred.view(1, 1, 1)
        input_seq = torch.cat((input_seq[:, 1:, :], pred), dim=1)

# 反归一化
predictions = scaler.inverse_transform(np.array(predictions).reshape(-1, 1))

# 打印未来1000天的预测结果
print("未来1000天的预测结果:")
print(predictions)

# 绘制未来1000天的预测结果
plt.figure(figsize=(10, 5))
plt.plot(range(len(test_data)), scaler.inverse_transform(test_data), label='实际值')
plt.plot(range(len(test_data), len(test_data) + 1000), predictions, label='未来1000天预测值', color='red', linestyle='--')
plt.legend()
plt.xlabel('样本')
plt.ylabel('值')
plt.title('未来1000天的预测结果')
plt.show()