# 第五题：基于PyTorch 2.0的Transformer和LSTM/CNN综合应用

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
import math
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error, mean_absolute_error

# 设置随机种子以确保结果可重现
torch.manual_seed(42)
np.random.seed(42)

# 检查PyTorch版本
print(f"PyTorch版本: {torch.__version__}")

# 1. Transformer+LSTM混合模型
class TransformerLSTMModel(nn.Module):
    """Transformer和LSTM混合模型，用于序列预测"""
    
    def __init__(self, input_size, hidden_size, num_layers, nhead, num_classes, dropout=0.2):
        """
        初始化TransformerLSTM模型
        :param input_size: 输入特征维度
        :param hidden_size: 隐藏层维度
        :param num_layers: LSTM层数
        :param nhead: Transformer注意力头的数量
        :param num_classes: 输出类别数
        :param dropout: Dropout概率
        """
        super(TransformerLSTMModel, self).__init__()
        
        # 输入投影层
        self.input_projection = nn.Linear(input_size, hidden_size)
        
        # Transformer编码器层
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=hidden_size,
            nhead=nhead,
            dim_feedforward=hidden_size * 4,
            dropout=dropout,
            activation='relu',
            batch_first=True
        )
        self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=2)
        
        # LSTM层
        self.lstm = nn.LSTM(
            input_size=hidden_size,
            hidden_size=hidden_size,
            num_layers=num_layers,
            batch_first=True,
            dropout=dropout if num_layers > 1 else 0
        )
        
        # 输出层
        self.fc = nn.Linear(hidden_size, num_classes)
        self.dropout = nn.Dropout(dropout)
    
    def forward(self, x):
        """
        前向传播
        :param x: 输入张量，形状为 (batch_size, seq_length, input_size)
        :return: 输出张量，形状为 (batch_size, num_classes)
        """
        # 输入投影
        x = self.input_projection(x)
        
        # Transformer编码
        transformer_out = self.transformer_encoder(x)
        
        # LSTM处理
        lstm_out, _ = self.lstm(transformer_out)
        
        # 只取最后一个时间步的输出
        output = lstm_out[:, -1, :]
        
        # 应用Dropout和全连接层
        output = self.dropout(output)
        output = self.fc(output)
        
        return output

# 2. Transformer+CNN混合模型
class TransformerCNNModel(nn.Module):
    """Transformer和CNN混合模型，用于序列预测"""
    
    def __init__(self, input_size, hidden_size, nhead, num_classes, dropout=0.2):
        """
        初始化TransformerCNN模型
        :param input_size: 输入特征维度
        :param hidden_size: 隐藏层维度
        :param nhead: Transformer注意力头的数量
        :param num_classes: 输出类别数
        :param dropout: Dropout概率
        """
        super(TransformerCNNModel, self).__init__()
        
        # 输入投影层
        self.input_projection = nn.Linear(input_size, hidden_size)
        
        # 1D CNN层
        self.conv1d = nn.Conv1d(in_channels=hidden_size, out_channels=hidden_size, kernel_size=3, padding=1)
        self.bn = nn.BatchNorm1d(hidden_size)
        self.relu = nn.ReLU()
        
        # Transformer编码器层
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=hidden_size,
            nhead=nhead,
            dim_feedforward=hidden_size * 4,
            dropout=dropout,
            activation='relu',
            batch_first=True
        )
        self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=2)
        
        # 输出层
        self.fc = nn.Linear(hidden_size, num_classes)
        self.dropout = nn.Dropout(dropout)
    
    def forward(self, x):
        """
        前向传播
        :param x: 输入张量，形状为 (batch_size, seq_length, input_size)
        :return: 输出张量，形状为 (batch_size, num_classes)
        """
        # 输入投影
        x = self.input_projection(x)
        
        # 1D CNN处理
        # 调整维度以适应Conv1d: (batch_size, seq_length, hidden_size) -> (batch_size, hidden_size, seq_length)
        x = x.permute(0, 2, 1)
        x = self.conv1d(x)
        x = self.bn(x)
        x = self.relu(x)
        # 调整回原始维度: (batch_size, hidden_size, seq_length) -> (batch_size, seq_length, hidden_size)
        x = x.permute(0, 2, 1)
        
        # Transformer编码
        transformer_out = self.transformer_encoder(x)
        
        # 全局平均池化
        output = torch.mean(transformer_out, dim=1)
        
        # 应用Dropout和全连接层
        output = self.dropout(output)
        output = self.fc(output)
        
        return output

# 3. Transformer+CNN+LSTM混合模型
class TransformerCNNLSTMModel(nn.Module):
    """Transformer、CNN和LSTM三重混合模型，用于序列预测"""
    
    def __init__(self, input_size, hidden_size, num_layers, nhead, num_classes, dropout=0.2):
        """
        初始化TransformerCNNLSTM模型
        :param input_size: 输入特征维度
        :param hidden_size: 隐藏层维度
        :param num_layers: LSTM层数
        :param nhead: Transformer注意力头的数量
        :param num_classes: 输出类别数
        :param dropout: Dropout概率
        """
        super(TransformerCNNLSTMModel, self).__init__()
        
        # 输入投影层
        self.input_projection = nn.Linear(input_size, hidden_size)
        
        # 1D CNN层
        self.conv1d = nn.Conv1d(in_channels=hidden_size, out_channels=hidden_size, kernel_size=3, padding=1)
        self.bn = nn.BatchNorm1d(hidden_size)
        self.relu = nn.ReLU()
        
        # Transformer编码器层
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=hidden_size,
            nhead=nhead,
            dim_feedforward=hidden_size * 4,
            dropout=dropout,
            activation='relu',
            batch_first=True
        )
        self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=2)
        
        # LSTM层
        self.lstm = nn.LSTM(
            input_size=hidden_size,
            hidden_size=hidden_size,
            num_layers=num_layers,
            batch_first=True,
            dropout=dropout if num_layers > 1 else 0
        )
        
        # 注意力机制
        self.attention = nn.MultiheadAttention(embed_dim=hidden_size, num_heads=nhead, batch_first=True)
        
        # 输出层
        self.fc = nn.Linear(hidden_size, num_classes)
        self.dropout = nn.Dropout(dropout)
    
    def forward(self, x):
        """
        前向传播
        :param x: 输入张量，形状为 (batch_size, seq_length, input_size)
        :return: 输出张量，形状为 (batch_size, num_classes)
        """
        # 输入投影
        x = self.input_projection(x)
        
        # 1D CNN处理
        # 调整维度以适应Conv1d: (batch_size, seq_length, hidden_size) -> (batch_size, hidden_size, seq_length)
        x = x.permute(0, 2, 1)
        x = self.conv1d(x)
        x = self.bn(x)
        x = self.relu(x)
        # 调整回原始维度: (batch_size, hidden_size, seq_length) -> (batch_size, seq_length, hidden_size)
        x = x.permute(0, 2, 1)
        
        # Transformer编码
        transformer_out = self.transformer_encoder(x)
        
        # LSTM处理
        lstm_out, _ = self.lstm(transformer_out)
        
        # 多头注意力机制
        attn_out, _ = self.attention(lstm_out, lstm_out, lstm_out)
        
        # 只取最后一个时间步的输出
        output = attn_out[:, -1, :]
        
        # 应用Dropout和全连接层
        output = self.dropout(output)
        output = self.fc(output)
        
        return output

# 4. 模型训练和评估函数
def train_model(model, train_loader, criterion, optimizer, device, num_epochs=50):
    """
    训练模型
    :param model: 模型
    :param train_loader: 训练数据加载器
    :param criterion: 损失函数
    :param optimizer: 优化器
    :param device: 设备
    :param num_epochs: 训练轮数
    :return: 训练损失历史
    """
    model.train()
    train_loss_history = []
    
    for epoch in range(num_epochs):
        running_loss = 0.0
        
        for inputs, targets in train_loader:
            inputs, targets = inputs.to(device), targets.to(device)
            
            # 前向传播
            outputs = model(inputs)
            loss = criterion(outputs, targets)
            
            # 反向传播和优化
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
            running_loss += loss.item()
        
        epoch_loss = running_loss / len(train_loader)
        train_loss_history.append(epoch_loss)
        
        if (epoch + 1) % 10 == 0:
            print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {epoch_loss:.6f}')
    
    return train_loss_history

def evaluate_model(model, test_loader, criterion, device, task_type='classification'):
    """
    评估模型
    :param model: 模型
    :param test_loader: 测试数据加载器
    :param criterion: 损失函数
    :param device: 设备
    :param task_type: 任务类型，'classification'或'regression'
    :return: 测试损失和预测结果
    """
    model.eval()
    test_loss = 0.0
    predictions = []
    actuals = []
    
    with torch.no_grad():
        for inputs, targets in test_loader:
            inputs, targets = inputs.to(device), targets.to(device)
            
            # 前向传播
            outputs = model(inputs)
            loss = criterion(outputs, targets)
            
            test_loss += loss.item()
            
            # 保存预测结果和实际值
            if task_type == 'classification':
                predictions.extend(torch.argmax(outputs, dim=1).cpu().numpy())
                actuals.extend(targets.cpu().numpy())
            else:  # regression
                predictions.extend(outputs.cpu().numpy())
                actuals.extend(targets.cpu().numpy())
    
    test_loss = test_loss / len(test_loader)
    print(f'Test Loss: {test_loss:.6f}')
    
    # 计算评估指标
    if task_type == 'classification':
        from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
        accuracy = accuracy_score(actuals, predictions)
        precision = precision_score(actuals, predictions, average='weighted')
        recall = recall_score(actuals, predictions, average='weighted')
        f1 = f1_score(actuals, predictions, average='weighted')
        
        print(f'Accuracy: {accuracy:.4f}, Precision: {precision:.4f}, Recall: {recall:.4f}, F1: {f1:.4f}')
    else:  # regression
        predictions = np.array(predictions)
        actuals = np.array(actuals)
        
        mse = mean_squared_error(actuals, predictions)
        rmse = math.sqrt(mse)
        mae = mean_absolute_error(actuals, predictions)
        
        print(f'MSE: {mse:.6f}, RMSE: {rmse:.6f}, MAE: {mae:.6f}')
    
    return test_loss, predictions, actuals

def plot_training_history(train_loss_history, title='Training Loss'):
    """
    绘制训练历史
    :param train_loss_history: 训练损失历史
    :param title: 图表标题
    """
    plt.figure(figsize=(10, 5))
    plt.plot(train_loss_history)
    plt.title(title)
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.grid(True)
    plt.show()

# 5. 数据准备函数
def prepare_time_series_data(data, seq_length, train_ratio=0.8):
    """
    准备时间序列数据
    :param data: 时间序列数据
    :param seq_length: 序列长度
    :param train_ratio: 训练集比例
    :return: 训练和测试数据加载器
    """
    # 数据标准化
    scaler = MinMaxScaler(feature_range=(0, 1))
    data_scaled = scaler.fit_transform(data.reshape(-1, 1))
    
    # 创建序列
    X, y = [], []
    for i in range(len(data_scaled) - seq_length):
        X.append(data_scaled[i:i+seq_length, 0])
        y.append(data_scaled[i+seq_length, 0])
    
    X, y = np.array(X), np.array(y)
    
    # 转换为PyTorch张量
    X = torch.FloatTensor(X).reshape(-1, seq_length, 1)  # (samples, seq_length, features)
    y = torch.FloatTensor(y).reshape(-1, 1)  # (samples, output_size)
    
    # 划分训练集和测试集
    train_size = int(len(X) * train_ratio)
    X_train, X_test = X[:train_size], X[train_size:]
    y_train, y_test = y[:train_size], y[train_size:]
    
    # 创建数据加载器
    train_dataset = TensorDataset(X_train, y_train)
    test_dataset = TensorDataset(X_test, y_test)
    
    train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False)
    
    return train_loader, test_loader, scaler

def prepare_classification_data(n_samples=1000, seq_length=20, n_features=5, n_classes=3, train_ratio=0.8):
    """
    准备分类数据
    :param n_samples: 样本数量
    :param seq_length: 序列长度
    :param n_features: 特征数量
    :param n_classes: 类别数量
    :param train_ratio: 训练集比例
    :return: 训练和测试数据加载器
    """
    # 生成随机数据
    X = np.random.randn(n_samples, seq_length, n_features)
    y = np.random.randint(0, n_classes, n_samples)
    
    # 转换为PyTorch张量
    X = torch.FloatTensor(X)
    y = torch.LongTensor(y)
    
    # 划分训练集和测试集
    train_size = int(len(X) * train_ratio)
    X_train, X_test = X[:train_size], X[train_size:]
    y_train, y_test = y[:train_size], y[train_size:]
    
    # 创建数据加载器
    train_dataset = TensorDataset(X_train, y_train)
    test_dataset = TensorDataset(X_test, y_test)
    
    train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False)
    
    return train_loader, test_loader

def generate_sine_wave_data(n_points=1000, noise_level=0.1):
    """
    生成正弦波数据
    :param n_points: 数据点数量
    :param noise_level: 噪声水平
    :return: 正弦波数据
    """
    t = np.linspace(0, 50, n_points)
    data = np.sin(t) + noise_level * np.random.randn(n_points)
    return data

# 6. 主函数和测试代码
if __name__ == "__main__":
    # 检查GPU是否可用
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")
    
    # 模型参数
    input_size = 1  # 单变量时间序列
    hidden_size = 64
    num_layers = 2
    nhead = 4  # 注意力头的数量
    num_classes = 1  # 回归任务输出单个值
    learning_rate = 0.001
    num_epochs = 50
    
    # 1. 时间序列预测任务 (回归)
    print("=== 时间序列预测任务 ===")
    # 生成示例数据
    data = generate_sine_wave_data(n_points=1000, noise_level=0.1)
    
    # 准备数据
    seq_length = 20
    train_loader, test_loader, scaler = prepare_time_series_data(data, seq_length)
    
    # 1.1 训练Transformer+LSTM模型
    print("\n训练Transformer+LSTM模型...")
    transformer_lstm = TransformerLSTMModel(input_size, hidden_size, num_layers, nhead, num_classes).to(device)
    criterion = nn.MSELoss()
    optimizer = optim.Adam(transformer_lstm.parameters(), lr=learning_rate)
    
    transformer_lstm_loss_history = train_model(
        transformer_lstm, train_loader, criterion, optimizer, device, num_epochs=num_epochs
    )
    
    # 评估Transformer+LSTM模型
    print("\n评估Transformer+LSTM模型:")
    transformer_lstm_test_loss, transformer_lstm_predictions, transformer_lstm_actuals = evaluate_model(
        transformer_lstm, test_loader, criterion, device, task_type='regression'
    )
    
    # 反标准化预测结果和实际值
    transformer_lstm_predictions = scaler.inverse_transform(np.array(transformer_lstm_predictions).reshape(-1, 1))
    transformer_lstm_actuals = scaler.inverse_transform(np.array(transformer_lstm_actuals).reshape(-1, 1))
    
    # 绘制结果
    plot_training_history(transformer_lstm_loss_history, 'Transformer+LSTM Training Loss')
    
    # 1.2 训练Transformer+CNN模型
    print("\n训练Transformer+CNN模型...")
    transformer_cnn = TransformerCNNModel(input_size, hidden_size, nhead, num_classes).to(device)
    criterion = nn.MSELoss()
    optimizer = optim.Adam(transformer_cnn.parameters(), lr=learning_rate)
    
    transformer_cnn_loss_history = train_model(
        transformer_cnn, train_loader, criterion, optimizer, device, num_epochs=num_epochs
    )
    
    # 评估Transformer+CNN模型
    print("\n评估Transformer+CNN模型:")
    transformer_cnn_test_loss, transformer_cnn_predictions, transformer_cnn_actuals = evaluate_model(
        transformer_cnn, test_loader, criterion, device, task_type='regression'
    )
    
    # 反标准化预测结果和实际值
    transformer_cnn_predictions = scaler.inverse_transform(np.array(transformer_cnn_predictions).reshape(-1, 1))
    transformer_cnn_actuals = scaler.inverse_transform(np.array(transformer_cnn_actuals).reshape(-1, 1))
    
    # 绘制结果
    plot_training_history(transformer_cnn_loss_history, 'Transformer+CNN Training Loss')
    
    # 1.3 训练Transformer+CNN+LSTM模型
    print("\n训练Transformer+CNN+LSTM模型...")
    transformer_cnn_lstm = TransformerCNNLSTMModel(input_size, hidden_size, num_layers, nhead, num_classes).to(device)
    criterion = nn.MSELoss()
    optimizer = optim.Adam(transformer_cnn_lstm.parameters(), lr=learning_rate)
    
    transformer_cnn_lstm_loss_history = train_model(
        transformer_cnn_lstm, train_loader, criterion, optimizer, device, num_epochs=num_epochs
    )
    
    # 评估Transformer+CNN+LSTM模型
    print("\n评估Transformer+CNN+LSTM模型:")
    transformer_cnn_lstm_test_loss, transformer_cnn_lstm_predictions, transformer_cnn_lstm_actuals = evaluate_model(
        transformer_cnn_lstm, test_loader, criterion, device, task_type='regression'
    )
    
    # 反标准化预测结果和实际值
    transformer_cnn_lstm_predictions = scaler.inverse_transform(np.array(transformer_cnn_lstm_predictions).reshape(-1, 1))
    transformer_cnn_lstm_actuals = scaler.inverse_transform(np.array(transformer_cnn_lstm_actuals).reshape(-1, 1))
    
    # 绘制结果
    plot_training_history(transformer_cnn_lstm_loss_history, 'Transformer+CNN+LSTM Training Loss')
    
    # 比较三种模型的性能
    print("\n时间序列预测模型性能比较:")
    print(f"Transformer+LSTM测试损失: {transformer_lstm_test_loss:.6f}")
    print(f"Transformer+CNN测试损失: {transformer_cnn_test_loss:.6f}")
    print(f"Transformer+CNN+LSTM测试损失: {transformer_cnn_lstm_test_loss:.6f}")
    
    # 2. 序列分类任务
    print("\n=== 序列分类任务 ===")
    # 准备分类数据
    n_classes = 3
    train_loader_cls, test_loader_cls = prepare_classification_data(n_samples=1000, seq_length=20, n_features=5, n_classes=n_classes)
    
    # 2.1 训练Transformer+LSTM分类模型
    print("\n训练Transformer+LSTM分类模型...")
    transformer_lstm_cls = TransformerLSTMModel(5, hidden_size, num_layers, nhead, n_classes).to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(transformer_lstm_cls.parameters(), lr=learning_rate)
    
    transformer_lstm_cls_loss_history = train_model(
        transformer_lstm_cls, train_loader_cls, criterion, optimizer, device, num_epochs=num_epochs
    )
    
    # 评估Transformer+LSTM分类模型
    print("\n评估Transformer+LSTM分类模型:")
    transformer_lstm_cls_test_loss, _, _ = evaluate_model(
        transformer_lstm_cls, test_loader_cls, criterion, device, task_type='classification'
    )
    
    # 绘制结果
    plot_training_history(transformer_lstm_cls_loss_history, 'Transformer+LSTM Classification Training Loss')
    
    # 2.2 训练Transformer+CNN分类模型
    print("\n训练Transformer+CNN分类模型...")
    transformer_cnn_cls = TransformerCNNModel(5, hidden_size, nhead, n_classes).to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(transformer_cnn_cls.parameters(), lr=learning_rate)
    
    transformer_cnn_cls_loss_history = train_model(
        transformer_cnn_cls, train_loader_cls, criterion, optimizer, device, num_epochs=num_epochs
    )
    
    # 评估Transformer+CNN分类模型
    print("\n评估Transformer+CNN分类模型:")
    transformer_cnn_cls_test_loss, _, _ = evaluate_model(
        transformer_cnn_cls, test_loader_cls, criterion, device, task_type='classification'
    )
    
    # 绘制结果
    plot_training_history(transformer_cnn_cls_loss_history, 'Transformer+CNN Classification Training Loss')
    
    # 2.3 训练Transformer+CNN+LSTM分类模型
    print("\n训练Transformer+CNN+LSTM分类模型...")
    transformer_cnn_lstm_cls = TransformerCNNLSTMModel(5, hidden_size, num_layers, nhead, n_classes).to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(transformer_cnn_lstm_cls.parameters(), lr=learning_rate)
    
    transformer_cnn_lstm_cls_loss_history = train_model(
        transformer_cnn_lstm_cls, train_loader_cls, criterion, optimizer, device, num_epochs=num_epochs
    )
    
    # 评估Transformer+CNN+LSTM分类模型
    print("\n评估Transformer+CNN+LSTM分类模型:")
    transformer_cnn_lstm_cls_test_loss, _, _ = evaluate_model(
        transformer_cnn_lstm_cls, test_loader_cls, criterion, device, task_type='classification'
    )
    
    # 绘制结果
    plot_training_history(transformer_cnn_lstm_cls_loss_history, 'Transformer+CNN+LSTM Classification Training Loss')
    
    # 比较三种分类模型的性能
    print("\n序列分类模型性能比较:")
    print(f"Transformer+LSTM分类测试损失: {transformer_lstm_cls_test_loss:.6f}")
    print(f"Transformer+CNN分类测试损失: {transformer_cnn_cls_test_loss:.6f}")
    print(f"Transformer+CNN+LSTM分类测试损失: {transformer_cnn_lstm_cls_test_loss:.6f}")
    
    # 保存模型
    torch.save(transformer_lstm.state_dict(), 'transformer_lstm.pth')
    torch.save(transformer_cnn.state_dict(), 'transformer_cnn.pth')
    torch.save(transformer_cnn_lstm.state_dict(), 'transformer_cnn_lstm.pth')
    print("模型已保存")