# 第四题：基于PyTorch 2.0的LSTM神经网络模型实现

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error, mean_absolute_error
import math

# 设置随机种子以确保结果可重现
torch.manual_seed(42)
np.random.seed(42)

# 检查PyTorch版本
print(f"PyTorch版本: {torch.__version__}")

# 1. 基础LSTM模型实现
class BasicLSTM(nn.Module):
    """基础LSTM模型，用于序列预测"""
    
    def __init__(self, input_size, hidden_size, num_layers, output_size, dropout=0.2):
        """
        初始化LSTM模型
        :param input_size: 输入特征维度
        :param hidden_size: 隐藏层维度
        :param num_layers: LSTM层数
        :param output_size: 输出维度
        :param dropout: Dropout概率
        """
        super(BasicLSTM, self).__init__()
        
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        
        # LSTM层
        self.lstm = nn.LSTM(
            input_size=input_size,
            hidden_size=hidden_size,
            num_layers=num_layers,
            batch_first=True,  # 输入张量形状为 (batch_size, seq_length, input_size)
            dropout=dropout if num_layers > 1 else 0
        )
        
        # 全连接层
        self.fc = nn.Linear(hidden_size, output_size)
        
        # Dropout层
        self.dropout = nn.Dropout(dropout)
    
    def forward(self, x):
        """
        前向传播
        :param x: 输入张量，形状为 (batch_size, seq_length, input_size)
        :return: 输出张量，形状为 (batch_size, output_size)
        """
        # 初始化隐藏状态和细胞状态
        #self.num_layers - LSTM的层数
        h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device)
        c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device)
        
        # LSTM前向传播
        out, _ = self.lstm(x, (h0, c0))
        
        # 只取最后一个时间步的输出
        out = out[:, -1, :]
        
        # 应用Dropout
        out = self.dropout(out)
        
        # 全连接层
        out = self.fc(out)
        
        return out

# 2. 双向LSTM模型
class BiLSTM(nn.Module):
    """双向LSTM模型，用于序列预测"""
    
    def __init__(self, input_size, hidden_size, num_layers, output_size, dropout=0.2):
        """
        初始化双向LSTM模型
        :param input_size: 输入特征维度
        :param hidden_size: 隐藏层维度
        :param num_layers: LSTM层数
        :param output_size: 输出维度
        :param dropout: Dropout概率
        """
        super(BiLSTM, self).__init__()
        
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        
        # 双向LSTM层
        self.lstm = nn.LSTM(
            input_size=input_size,
            hidden_size=hidden_size,
            num_layers=num_layers,
            batch_first=True,
            bidirectional=True,  # 双向LSTM
            dropout=dropout if num_layers > 1 else 0
        )
        
        # 全连接层，注意双向LSTM的输出维度是hidden_size*2
        self.fc = nn.Linear(hidden_size * 2, output_size)
        
        # Dropout层
        self.dropout = nn.Dropout(dropout)
    
    def forward(self, x):
        """
        前向传播
        :param x: 输入张量，形状为 (batch_size, seq_length, input_size)
        :return: 输出张量，形状为 (batch_size, output_size)
        """
        # 初始化隐藏状态和细胞状态
        # 双向LSTM需要两倍的隐藏状态
        h0 = torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size).to(x.device)
        c0 = torch.zeros(self.num_layers * 2, x.size(0), self.hidden_size).to(x.device)
        
        # LSTM前向传播
        out, _ = self.lstm(x, (h0, c0))
        
        # 只取最后一个时间步的输出
        out = out[:, -1, :]
        
        # 应用Dropout
        out = self.dropout(out)
        
        # 全连接层
        out = self.fc(out)
        
        return out

# 3. LSTM+Attention模型
class Attention(nn.Module):
    """注意力机制"""
    
    def __init__(self, hidden_size):
        """
        初始化注意力机制
        :param hidden_size: 隐藏层维度
        """
        super(Attention, self).__init__()
        self.attention = nn.Sequential(
            nn.Linear(hidden_size, hidden_size),
            nn.Tanh(),
            nn.Linear(hidden_size, 1)
        )
    
    def forward(self, lstm_output):
        """
        前向传播
        :param lstm_output: LSTM输出，形状为 (batch_size, seq_length, hidden_size)
        :return: 注意力加权的输出，形状为 (batch_size, hidden_size)
        """
        # 计算注意力权重
        attention_weights = self.attention(lstm_output)  # (batch_size, seq_length, 1)
        attention_weights = torch.softmax(attention_weights, dim=1)  # (batch_size, seq_length, 1)
        
        # 应用注意力权重
        weighted_output = torch.sum(lstm_output * attention_weights, dim=1)  # (batch_size, hidden_size)
        
        return weighted_output

class LSTMWithAttention(nn.Module):
    """带注意力机制的LSTM模型"""
    
    def __init__(self, input_size, hidden_size, num_layers, output_size, dropout=0.2):
        """
        初始化带注意力机制的LSTM模型
        :param input_size: 输入特征维度
        :param hidden_size: 隐藏层维度
        :param num_layers: LSTM层数
        :param output_size: 输出维度
        :param dropout: Dropout概率
        """
        super(LSTMWithAttention, self).__init__()
        
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        
        # LSTM层
        self.lstm = nn.LSTM(
            input_size=input_size,
            hidden_size=hidden_size,
            num_layers=num_layers,
            batch_first=True,
            dropout=dropout if num_layers > 1 else 0
        )
        
        # 注意力机制
        self.attention = Attention(hidden_size)
        
        # 全连接层
        self.fc = nn.Linear(hidden_size, output_size)
        
        # Dropout层
        self.dropout = nn.Dropout(dropout)
    
    def forward(self, x):
        """
        前向传播
        :param x: 输入张量，形状为 (batch_size, seq_length, input_size)
        :return: 输出张量，形状为 (batch_size, output_size)
        """
        # 初始化隐藏状态和细胞状态
        h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device)
        c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device)
        
        # LSTM前向传播
        lstm_output, _ = self.lstm(x, (h0, c0))
        
        # 应用注意力机制
        attended_output = self.attention(lstm_output)
        
        # 应用Dropout
        attended_output = self.dropout(attended_output)
        
        # 全连接层
        output = self.fc(attended_output)
        
        return output

# 4. 模型训练和评估函数
def train_model(model, train_loader, criterion, optimizer, device, num_epochs=100):
    """
    训练模型
    :param model: 模型
    :param train_loader: 训练数据加载器
    :param criterion: 损失函数
    :param optimizer: 优化器
    :param device: 设备
    :param num_epochs: 训练轮数
    :return: 训练损失历史
    """
    model.train()
    train_loss_history = []
    
    for epoch in range(num_epochs):
        running_loss = 0.0
        
        for inputs, targets in train_loader:
            inputs, targets = inputs.to(device), targets.to(device)
            
            # 前向传播
            outputs = model(inputs)
            loss = criterion(outputs, targets)
            
            # 反向传播和优化
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
            running_loss += loss.item()
        
        epoch_loss = running_loss / len(train_loader)
        train_loss_history.append(epoch_loss)
        
        if (epoch + 1) % 10 == 0:
            print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {epoch_loss:.6f}')
    
    return train_loss_history

def evaluate_model(model, test_loader, criterion, device):
    """
    评估模型
    :param model: 模型
    :param test_loader: 测试数据加载器
    :param criterion: 损失函数
    :param device: 设备
    :return: 测试损失和预测结果
    """
    model.eval()
    test_loss = 0.0
    predictions = []
    actuals = []
    
    with torch.no_grad():
        for inputs, targets in test_loader:
            inputs, targets = inputs.to(device), targets.to(device)
            
            # 前向传播
            outputs = model(inputs)
            loss = criterion(outputs, targets)
            
            test_loss += loss.item()
            
            # 保存预测结果和实际值
            predictions.extend(outputs.cpu().numpy())
            actuals.extend(targets.cpu().numpy())
    
    test_loss = test_loss / len(test_loader)
    print(f'Test Loss: {test_loss:.6f}')
    
    # 计算评估指标
    predictions = np.array(predictions)
    actuals = np.array(actuals)
    
    mse = mean_squared_error(actuals, predictions)
    rmse = math.sqrt(mse)
    mae = mean_absolute_error(actuals, predictions)
    
    print(f'MSE: {mse:.6f}, RMSE: {rmse:.6f}, MAE: {mae:.6f}')
    
    return test_loss, predictions, actuals

def plot_training_history(train_loss_history):
    """
    绘制训练历史
    :param train_loss_history: 训练损失历史
    """
    plt.figure(figsize=(10, 5))
    plt.plot(train_loss_history)
    plt.title('Training Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.grid(True)
    plt.show()

def plot_predictions(predictions, actuals, title='Predictions vs Actuals'):
    """
    绘制预测结果和实际值
    :param predictions: 预测结果
    :param actuals: 实际值
    :param title: 图表标题
    """
    plt.figure(figsize=(12, 6))
    plt.plot(actuals, label='Actuals')
    plt.plot(predictions, label='Predictions')
    plt.title(title)
    plt.xlabel('Time')
    plt.ylabel('Value')
    plt.legend()
    plt.grid(True)
    plt.show()

# 5. 数据准备函数
def prepare_time_series_data(data, seq_length, train_ratio=0.8):
    """
    准备时间序列数据
    :param data: 时间序列数据
    :param seq_length: 序列长度
    :param train_ratio: 训练集比例
    :return: 训练和测试数据加载器
    """
    # 数据标准化
    scaler = MinMaxScaler(feature_range=(0, 1))
    data_scaled = scaler.fit_transform(data.reshape(-1, 1))
    
    # 创建序列
    X, y = [], []
    for i in range(len(data_scaled) - seq_length):
        X.append(data_scaled[i:i+seq_length, 0])
        y.append(data_scaled[i+seq_length, 0])
    
    X, y = np.array(X), np.array(y)
    
    # 转换为PyTorch张量
    X = torch.FloatTensor(X).reshape(-1, seq_length, 1)  # (samples, seq_length, features)
    y = torch.FloatTensor(y).reshape(-1, 1)  # (samples, output_size)
    
    # 划分训练集和测试集
    train_size = int(len(X) * train_ratio)
    X_train, X_test = X[:train_size], X[train_size:]
    y_train, y_test = y[:train_size], y[train_size:]
    
    # 创建数据加载器
    train_dataset = TensorDataset(X_train, y_train)
    test_dataset = TensorDataset(X_test, y_test)
    
    train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False)
    
    return train_loader, test_loader, scaler

def generate_sine_wave_data(n_points=1000, noise_level=0.1):
    """
    生成正弦波数据
    :param n_points: 数据点数量
    :param noise_level: 噪声水平
    :return: 正弦波数据
    """
    t = np.linspace(0, 50, n_points)
    data = np.sin(t) + noise_level * np.random.randn(n_points)
    return data

# 6. 主函数和测试代码
if __name__ == "__main__":
    # 检查GPU是否可用
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")
    
    # 生成示例数据
    data = generate_sine_wave_data(n_points=1000, noise_level=0.1)
    
    # 准备数据
    seq_length = 20
    train_loader, test_loader, scaler = prepare_time_series_data(data, seq_length)
    
    # 模型参数
    input_size = 1  # 单变量时间序列
    hidden_size = 64
    num_layers = 2
    output_size = 1  # 预测单个值
    learning_rate = 0.001
    num_epochs = 100
    
    # 1. 训练基础LSTM模型
    print("训练基础LSTM模型...")
    basic_lstm = BasicLSTM(input_size, hidden_size, num_layers, output_size).to(device)
    criterion = nn.MSELoss()
    optimizer = optim.Adam(basic_lstm.parameters(), lr=learning_rate)
    
    basic_loss_history = train_model(
        basic_lstm, train_loader, criterion, optimizer, device, num_epochs=num_epochs
    )
    
    # 评估基础LSTM模型
    print("\n评估基础LSTM模型:")
    basic_test_loss, basic_predictions, basic_actuals = evaluate_model(
        basic_lstm, test_loader, criterion, device
    )
    
    # 反标准化预测结果和实际值
    basic_predictions = scaler.inverse_transform(np.array(basic_predictions).reshape(-1, 1))
    basic_actuals = scaler.inverse_transform(np.array(basic_actuals).reshape(-1, 1))
    
    # 绘制结果
    plot_training_history(basic_loss_history)
    plot_predictions(basic_predictions, basic_actuals, 'Basic LSTM Predictions vs Actuals')
    
    # 2. 训练双向LSTM模型
    print("\n训练双向LSTM模型...")
    bi_lstm = BiLSTM(input_size, hidden_size, num_layers, output_size).to(device)
    criterion = nn.MSELoss()
    optimizer = optim.Adam(bi_lstm.parameters(), lr=learning_rate)
    
    bi_loss_history = train_model(
        bi_lstm, train_loader, criterion, optimizer, device, num_epochs=num_epochs
    )
    
    # 评估双向LSTM模型
    print("\n评估双向LSTM模型:")
    bi_test_loss, bi_predictions, bi_actuals = evaluate_model(
        bi_lstm, test_loader, criterion, device
    )
    
    # 反标准化预测结果和实际值
    bi_predictions = scaler.inverse_transform(np.array(bi_predictions).reshape(-1, 1))
    bi_actuals = scaler.inverse_transform(np.array(bi_actuals).reshape(-1, 1))
    
    # 绘制结果
    plot_training_history(bi_loss_history)
    plot_predictions(bi_predictions, bi_actuals, 'Bidirectional LSTM Predictions vs Actuals')
    
    # 3. 训练带注意力机制的LSTM模型
    print("\n训练带注意力机制的LSTM模型...")
    lstm_attention = LSTMWithAttention(input_size, hidden_size, num_layers, output_size).to(device)
    criterion = nn.MSELoss()
    optimizer = optim.Adam(lstm_attention.parameters(), lr=learning_rate)
    
    attention_loss_history = train_model(
        lstm_attention, train_loader, criterion, optimizer, device, num_epochs=num_epochs
    )
    
    # 评估带注意力机制的LSTM模型
    print("\n评估带注意力机制的LSTM模型:")
    attention_test_loss, attention_predictions, attention_actuals = evaluate_model(
        lstm_attention, test_loader, criterion, device
    )
    
    # 反标准化预测结果和实际值
    attention_predictions = scaler.inverse_transform(np.array(attention_predictions).reshape(-1, 1))
    attention_actuals = scaler.inverse_transform(np.array(attention_actuals).reshape(-1, 1))
    
    # 绘制结果
    plot_training_history(attention_loss_history)
    plot_predictions(attention_predictions, attention_actuals, 'LSTM with Attention Predictions vs Actuals')
    
    # 比较三种模型的性能
    print("\n模型性能比较:")
    print(f"基础LSTM测试损失: {basic_test_loss:.6f}")
    print(f"双向LSTM测试损失: {bi_test_loss:.6f}")
    print(f"带注意力机制的LSTM测试损失: {attention_test_loss:.6f}")
    
    # 保存模型
    torch.save(basic_lstm.state_dict(), 'basic_lstm.pth')
    torch.save(bi_lstm.state_dict(), 'bi_lstm.pth')
    torch.save(lstm_attention.state_dict(), 'lstm_attention.pth')
    print("模型已保存")