#https://zhuanlan.zhihu.com/p/20310288990


import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from sklearn.metrics import mean_squared_error
from torch.utils.data import DataLoader, TensorDataset
import matplotlib.pyplot as plt

# 定义GRU模型类
class GRU(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers, forecast_horizon):
        super(GRU, self).__init__()
        
        self.num_layers = num_layers
        self.input_size = input_size
        self.hidden_size = hidden_size        
        self.forecast_horizon = forecast_horizon
        # GRU层
        self.gru = nn.GRU(input_size=self.input_size, hidden_size=self.hidden_size,
                          num_layers=self.num_layers, batch_first=True)
        
        # 全连接层1
        self.fc1 = nn.Linear(self.hidden_size, 20)
        # 全连接层2
        self.fc2 = nn.Linear(20, self.forecast_horizon)
        
        # Dropout层，用于防止过拟合
        self.dropout = nn.Dropout(0.2)

    def forward(self, x):
        # 初始化隐藏状态并确保其在GPU上
        h_0 = torch.randn(self.num_layers, x.size(0), self.hidden_size).to(device)
        
        # 通过GRU层进行前向传播
        out, _ = self.gru(x, h_0)
        
        # 通过全连接层和激活函数
        out = F.relu(self.fc1(out[:, -1, :]))  # 只取最后一个时间步的输出
        out = self.fc2(out)  # 输出层
        return out

# 设置设备，使用GPU（如果可用）
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f'Using device: {device}')

# 将数据转换为torch tensors，并转移到设备（GPU/CPU）
X_train_tensor = torch.Tensor(X_train).to(device)
X_test_tensor = torch.Tensor(X_test).to(device)

y_train_tensor = torch.Tensor(y_train).squeeze(-1).to(device)
y_test_tensor = torch.Tensor(y_test).squeeze(-1).to(device)

# 创建训练数据和验证数据集
train_dataset = TensorDataset(X_train_tensor, y_train_tensor)
test_dataset = TensorDataset(X_test_tensor, y_test_tensor)

# 定义 DataLoader
batch_size = 512
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)

# 初始化RNN模型
input_size = X_train.shape[2]  # 特征数量
hidden_size = 64  # 隐藏层神经元数量
num_layers = 2  # RNN层数
forecast_horizon = 5  # 预测的目标步数

model = GRU(input_size, hidden_size, num_layers, forecast_horizon).to(device)

def train_model_with_dataloader(model, train_loader, test_loader, epochs=50, lr=0.001):
    # 定义损失函数和优化器
    criterion = nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr=lr)
    
    train_loss = []
    val_loss = []
    
    for epoch in range(epochs):
        # 训练阶段
        model.train()
        epoch_train_loss = 0
        for X_batch, y_batch in train_loader:
            optimizer.zero_grad()
            
            # 前向传播
            output_train = model(X_batch)
            
            # 计算损失
            loss = criterion(output_train, y_batch)
            loss.backward()  # 反向传播
            optimizer.step()  # 更新参数
            
            epoch_train_loss += loss.item()  # 累计批次损失
            
        train_loss.append(epoch_train_loss / len(train_loader))  # 计算平均损失
        
        # 验证阶段
        model.eval()
        epoch_val_loss = 0
        with torch.no_grad():
            for X_batch, y_batch in test_loader:
                output_val = model(X_batch)
                loss = criterion(output_val, y_batch)
                epoch_val_loss += loss.item()
        
        val_loss.append(epoch_val_loss / len(test_loader))  # 计算平均验证损失
        
        # 打印日志
        if (epoch + 1) % 100 == 0:
            print(f'Epoch [{epoch+1}/{epochs}], Train Loss: {train_loss[-1]:.4f}, Validation Loss: {val_loss[-1]:.4f}')
    
    # 绘制训练损失和验证损失曲线
    plt.plot(train_loss, label='Train Loss')
    plt.plot(val_loss, label='Validation Loss')
    plt.title('Loss vs Epochs')
    plt.xlabel('Epochs')
    plt.ylabel('Loss')
    plt.legend()
    plt.show()

def evaluate_model_with_dataloader(model, test_loader):
    model.eval()
    y_pred_list = []
    y_test_list = []
    
    with torch.no_grad():
        for X_batch, y_batch in test_loader:
            y_pred = model(X_batch)
            y_pred_list.append(y_pred.cpu().numpy())
            y_test_list.append(y_batch.cpu().numpy())
    
    # 将所有批次结果拼接
    y_pred_rescaled = np.concatenate(y_pred_list, axis=0)
    y_test_rescaled = np.concatenate(y_test_list, axis=0)
    
    # 计算均方误差
    mse = mean_squared_error(y_test_rescaled, y_pred_rescaled)
    print(f'Mean Squared Error: {mse:.4f}')
    
    return y_pred_rescaled, y_test_rescaled

# 训练模型
train_model_with_dataloader(model, train_loader, test_loader, epochs=2000)

# 评估模型性能
y_pred_rescaled, y_test_rescaled = evaluate_model_with_dataloader(model, test_loader)

# 保存模型
def save_model(model, path='./model_files/multistep_gru_model.pth'):
    torch.save(model.state_dict(), path)
    print(f'Model saved to {path}')

# 保存训练好的模型
save_model(model)