"""
高速列车轴承智能故障诊断 - PyTorch深度学习模型构建模块

本模块包含：
1. 基于PyTorch的多种深度学习模型构建（CNN、ResNet、LSTM、混合模型）
2. 模型训练和优化
3. 模型集成和性能评估
4. 基于task1提取特征的源域故障诊断

作者：数学建模团队
版本：2.0 (PyTorch版本)
"""

import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Agg')  # 使用非交互式后端
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from sklearn.preprocessing import StandardScaler, LabelEncoder
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader, TensorDataset
import torch.nn.functional as F
from torch.optim.lr_scheduler import ReduceLROnPlateau, StepLR
import warnings
warnings.filterwarnings('ignore')

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False

# 设置随机种子
torch.manual_seed(42)
np.random.seed(42)

class BearingDataset(Dataset):
    """轴承故障数据集类"""
    
    def __init__(self, features, labels, transform=None):
        """
        初始化数据集
        
        Args:
            features (np.ndarray): 特征数据
            labels (np.ndarray): 标签数据
            transform (callable, optional): 数据变换函数
        """
        self.features = torch.FloatTensor(features)
        self.labels = torch.LongTensor(labels)
        self.transform = transform
    
    def __len__(self):
        return len(self.features)
    
    def __getitem__(self, idx):
        sample = self.features[idx]
        label = self.labels[idx]
        
        if self.transform:
            sample = self.transform(sample)
        
        return sample, label


class CNN1D(nn.Module):
    """1D卷积神经网络"""
    
    def __init__(self, input_size, num_classes, dropout_rate=0.5):
        super(CNN1D, self).__init__()
        
        self.conv_layers = nn.Sequential(
            # 第一层卷积
            nn.Conv1d(1, 32, kernel_size=3, padding=1),
            nn.BatchNorm1d(32),
            nn.ReLU(),
            nn.MaxPool1d(2),
            nn.Dropout(dropout_rate),
            
            # 第二层卷积
            nn.Conv1d(32, 64, kernel_size=3, padding=1),
            nn.BatchNorm1d(64),
            nn.ReLU(),
            nn.MaxPool1d(2),
            nn.Dropout(dropout_rate),
            
            # 第三层卷积
            nn.Conv1d(64, 128, kernel_size=3, padding=1),
            nn.BatchNorm1d(128),
            nn.ReLU(),
            nn.MaxPool1d(2),
            nn.Dropout(dropout_rate),
        )
        
        # 计算全连接层输入大小
        self.fc_input_size = self._get_fc_input_size(input_size)
        
        self.classifier = nn.Sequential(
            nn.Linear(self.fc_input_size, 256),
            nn.ReLU(),
            nn.Dropout(dropout_rate),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Dropout(dropout_rate),
            nn.Linear(128, num_classes)
        )
    
    def _get_fc_input_size(self, input_size):
        """计算全连接层输入大小"""
        x = torch.randn(1, 1, input_size)
        x = self.conv_layers(x)
        return x.view(1, -1).size(1)
    
    def forward(self, x):
        # 添加通道维度
        x = x.unsqueeze(1)  # (batch_size, 1, input_size)
        
        # 卷积层
        x = self.conv_layers(x)
        
        # 展平
        x = x.view(x.size(0), -1)
        
        # 分类器
        x = self.classifier(x)
        
        return x


class ResNet1D(nn.Module):
    """1D ResNet网络"""
    
    def __init__(self, input_size, num_classes, dropout_rate=0.5):
        super(ResNet1D, self).__init__()
        
        self.conv1 = nn.Conv1d(1, 64, kernel_size=7, padding=3)
        self.bn1 = nn.BatchNorm1d(64)
        self.relu = nn.ReLU()
        self.maxpool = nn.MaxPool1d(3, stride=2, padding=1)
        
        # ResNet blocks
        self.layer1 = self._make_layer(64, 64, 2)
        self.layer2 = self._make_layer(64, 128, 2, stride=2)
        self.layer3 = self._make_layer(128, 256, 2, stride=2)
        
        self.avgpool = nn.AdaptiveAvgPool1d(1)
        self.fc = nn.Linear(256, num_classes)
        self.dropout = nn.Dropout(dropout_rate)
    
    def _make_layer(self, in_channels, out_channels, blocks, stride=1):
        layers = []
        layers.append(BasicBlock1D(in_channels, out_channels, stride))
        for _ in range(1, blocks):
            layers.append(BasicBlock1D(out_channels, out_channels))
        return nn.Sequential(*layers)
    
    def forward(self, x):
        x = x.unsqueeze(1)  # 添加通道维度
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)
        
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        
        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.dropout(x)
        x = self.fc(x)
        
        return x


class BasicBlock1D(nn.Module):
    """1D ResNet基本块"""
    
    def __init__(self, in_channels, out_channels, stride=1):
        super(BasicBlock1D, self).__init__()
        
        self.conv1 = nn.Conv1d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1)
        self.bn1 = nn.BatchNorm1d(out_channels)
        self.conv2 = nn.Conv1d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
        self.bn2 = nn.BatchNorm1d(out_channels)
        
        self.shortcut = nn.Sequential()
        if stride != 1 or in_channels != out_channels:
            self.shortcut = nn.Sequential(
                nn.Conv1d(in_channels, out_channels, kernel_size=1, stride=stride),
                nn.BatchNorm1d(out_channels)
            )
    
    def forward(self, x):
        residual = x
        
        out = self.conv1(x)
        out = self.bn1(out)
        out = F.relu(out)
        
        out = self.conv2(out)
        out = self.bn2(out)
        
        out += self.shortcut(residual)
        out = F.relu(out)
        
        return out


class LSTMNet(nn.Module):
    """LSTM网络"""
    
    def __init__(self, input_size, hidden_size=128, num_layers=2, num_classes=4, dropout_rate=0.5):
        super(LSTMNet, self).__init__()
        
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers, 
                           batch_first=True, dropout=dropout_rate, bidirectional=True)
        
        self.fc = nn.Sequential(
            nn.Linear(hidden_size * 2, 256),  # 双向LSTM
            nn.ReLU(),
            nn.Dropout(dropout_rate),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Dropout(dropout_rate),
            nn.Linear(128, num_classes)
        )
    
    def forward(self, x):
        # 添加序列维度
        x = x.unsqueeze(1)  # (batch_size, 1, input_size)
        
        # LSTM层
        lstm_out, (hidden, cell) = self.lstm(x)
        
        # 使用最后一个时间步的输出
        output = lstm_out[:, -1, :]
        
        # 全连接层
        output = self.fc(output)
        
        return output


class HybridModel(nn.Module):
    """混合模型：CNN + LSTM"""
    
    def __init__(self, input_size, num_classes, dropout_rate=0.5):
        super(HybridModel, self).__init__()
        
        # CNN部分
        self.cnn = nn.Sequential(
            nn.Conv1d(1, 32, kernel_size=3, padding=1),
            nn.BatchNorm1d(32),
            nn.ReLU(),
            nn.MaxPool1d(2),
            nn.Dropout(dropout_rate),
            
            nn.Conv1d(32, 64, kernel_size=3, padding=1),
            nn.BatchNorm1d(64),
            nn.ReLU(),
            nn.MaxPool1d(2),
            nn.Dropout(dropout_rate),
        )
        
        # 计算CNN输出大小
        self.cnn_output_size = self._get_cnn_output_size(input_size)
        
        # LSTM部分
        self.lstm = nn.LSTM(self.cnn_output_size, 128, 2, 
                           batch_first=True, dropout=dropout_rate, bidirectional=True)
        
        # 分类器
        self.classifier = nn.Sequential(
            nn.Linear(128 * 2, 256),  # 双向LSTM
            nn.ReLU(),
            nn.Dropout(dropout_rate),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Dropout(dropout_rate),
            nn.Linear(128, num_classes)
        )
    
    def _get_cnn_output_size(self, input_size):
        x = torch.randn(1, 1, input_size)
        x = self.cnn(x)
        return x.size(1)  # 只返回通道数，不乘以长度
    
    def forward(self, x):
        # 添加通道维度
        x = x.unsqueeze(1)  # (batch_size, 1, input_size)
        
        # CNN特征提取
        cnn_out = self.cnn(x)  # (batch_size, channels, length)
        
        # 重新整形为LSTM输入格式
        cnn_out = cnn_out.permute(0, 2, 1)  # (batch_size, length, channels)
        
        # LSTM处理
        lstm_out, _ = self.lstm(cnn_out)
        
        # 使用最后一个时间步的输出
        output = lstm_out[:, -1, :]
        
        # 分类
        output = self.classifier(output)
        
        return output


class AttentionModel(nn.Module):
    """注意力机制模型"""
    
    def __init__(self, input_size, num_classes, dropout_rate=0.5):
        super(AttentionModel, self).__init__()
        
        self.input_size = input_size
        self.num_classes = num_classes
        
        # 特征提取层
        self.feature_extractor = nn.Sequential(
            nn.Linear(input_size, 256),
            nn.ReLU(),
            nn.Dropout(dropout_rate),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Dropout(dropout_rate),
        )
        
        # 注意力机制
        self.attention = nn.MultiheadAttention(embed_dim=128, num_heads=8, dropout=dropout_rate)
        
        # 分类器
        self.classifier = nn.Sequential(
            nn.Linear(128, 64),
            nn.ReLU(),
            nn.Dropout(dropout_rate),
            nn.Linear(64, num_classes)
        )
    
    def forward(self, x):
        # 特征提取
        features = self.feature_extractor(x)  # (batch_size, 128)
        
        # 添加序列维度用于注意力机制
        features = features.unsqueeze(1)  # (batch_size, 1, 128)
        
        # 自注意力
        attn_output, _ = self.attention(features, features, features)
        
        # 移除序列维度
        attn_output = attn_output.squeeze(1)  # (batch_size, 128)
        
        # 分类
        output = self.classifier(attn_output)
        
        return output


class PyTorchDeepLearningModels:
    """PyTorch深度学习模型构建类"""
    
    def __init__(self, input_size, num_classes, device=None):
        """
        初始化模型构建器
        
        Args:
            input_size (int): 输入特征维度
            num_classes (int): 类别数量
            device (str): 设备类型 ('cpu' 或 'cuda')
        """
        self.input_size = input_size
        self.num_classes = num_classes
        
        if device is None:
            self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        else:
            self.device = torch.device(device)
        
        print(f"使用设备: {self.device}")
        
        # 存储训练历史
        self.training_history = {}
    
    def build_model(self, model_type='cnn', **kwargs):
        """
        构建指定类型的模型
        
        Args:
            model_type (str): 模型类型 ('cnn', 'resnet', 'lstm', 'hybrid', 'attention')
            **kwargs: 模型参数
        
        Returns:
            torch.nn.Module: 构建的模型
        """
        if model_type == 'cnn':
            model = CNN1D(self.input_size, self.num_classes, **kwargs)
        elif model_type == 'resnet':
            model = ResNet1D(self.input_size, self.num_classes, **kwargs)
        elif model_type == 'lstm':
            model = LSTMNet(self.input_size, num_classes=self.num_classes, **kwargs)
        elif model_type == 'hybrid':
            model = HybridModel(self.input_size, self.num_classes, **kwargs)
        elif model_type == 'attention':
            model = AttentionModel(self.input_size, self.num_classes, **kwargs)
        else:
            raise ValueError(f"不支持的模型类型: {model_type}")
        
        model = model.to(self.device)
        return model
    
    def train_model(self, model, train_loader, val_loader, epochs=100, lr=0.001, 
                   patience=10, model_name='model'):
        """
        训练模型
        
        Args:
            model: PyTorch模型
            train_loader: 训练数据加载器
            val_loader: 验证数据加载器
            epochs (int): 训练轮数
            lr (float): 学习率
            patience (int): 早停耐心值
            model_name (str): 模型名称
        
        Returns:
            dict: 训练历史
        """
        model.train()
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=1e-5)
        scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=5, verbose=True)
        
        train_losses = []
        train_accuracies = []
        val_losses = []
        val_accuracies = []
        
        best_val_loss = float('inf')
        patience_counter = 0
        
        print(f"\n🚀 开始训练 {model_name} 模型...")
        print(f"📊 训练配置: epochs={epochs}, lr={lr}, patience={patience}")
        print(f"📈 训练批次: {len(train_loader)}, 验证批次: {len(val_loader)}")
        print("=" * 60)
        
        for epoch in range(epochs):
            # 训练阶段
            print(f"\n🔄 Epoch {epoch+1}/{epochs}")
            print("📚 训练阶段...")
            model.train()
            train_loss = 0.0
            train_correct = 0
            train_total = 0
            
            for batch_idx, (data, target) in enumerate(train_loader):
                data, target = data.to(self.device), target.to(self.device)
                
                optimizer.zero_grad()
                output = model(data)
                loss = criterion(output, target)
                loss.backward()
                optimizer.step()
                
                train_loss += loss.item()
                _, predicted = torch.max(output.data, 1)
                train_total += target.size(0)
                train_correct += (predicted == target).sum().item()
                
                # 显示批次进度
                if batch_idx % 5 == 0 or batch_idx == len(train_loader) - 1:
                    current_acc = 100. * train_correct / train_total
                    print(f"  📦 批次 {batch_idx+1}/{len(train_loader)}: "
                          f"Loss={loss.item():.4f}, Acc={current_acc:.2f}%", end='\r')
            
            # 验证阶段
            print(f"\n🔍 验证阶段...")
            model.eval()
            val_loss = 0.0
            val_correct = 0
            val_total = 0
            
            with torch.no_grad():
                for batch_idx, (data, target) in enumerate(val_loader):
                    data, target = data.to(self.device), target.to(self.device)
                    output = model(data)
                    loss = criterion(output, target)
                    
                    val_loss += loss.item()
                    _, predicted = torch.max(output.data, 1)
                    val_total += target.size(0)
                    val_correct += (predicted == target).sum().item()
                    
                    # 显示验证进度
                    if batch_idx % 2 == 0 or batch_idx == len(val_loader) - 1:
                        current_acc = 100. * val_correct / val_total
                        print(f"  🔍 验证批次 {batch_idx+1}/{len(val_loader)}: "
                              f"Loss={loss.item():.4f}, Acc={current_acc:.2f}%", end='\r')
            
            # 计算平均损失和准确率
            train_loss /= len(train_loader)
            train_acc = 100. * train_correct / train_total
            val_loss /= len(val_loader)
            val_acc = 100. * val_correct / val_total
            
            train_losses.append(train_loss)
            train_accuracies.append(train_acc)
            val_losses.append(val_loss)
            val_accuracies.append(val_acc)
            
            # 学习率调度
            old_lr = optimizer.param_groups[0]['lr']
            scheduler.step(val_loss)
            new_lr = optimizer.param_groups[0]['lr']
            
            # 早停检查
            if val_loss < best_val_loss:
                best_val_loss = val_loss
                patience_counter = 0
                # 保存最佳模型
                torch.save(model.state_dict(), f'best_{model_name}.pth')
                print(f"\n💾 保存最佳模型 (Val Loss: {val_loss:.4f})")
            else:
                patience_counter += 1
            
            # 显示epoch结果
            print(f"\n📊 Epoch {epoch+1} 结果:")
            print(f"  🎯 训练: Loss={train_loss:.4f}, Acc={train_acc:.2f}%")
            print(f"  🔍 验证: Loss={val_loss:.4f}, Acc={val_acc:.2f}%")
            print(f"  📈 学习率: {old_lr:.6f} → {new_lr:.6f}")
            print(f"  ⏰ 耐心值: {patience_counter}/{patience}")
            print(f"  🏆 最佳验证损失: {best_val_loss:.4f}")
            
            # 早停检查
            if patience_counter >= patience:
                print(f"\n⏹️ 早停于第 {epoch+1} 轮 (耐心值达到 {patience})")
                break
            
            print("-" * 60)
        
        # 保存训练历史
        history = {
            'train_loss': train_losses,
            'train_accuracy': train_accuracies,
            'val_loss': val_losses,
            'val_accuracy': val_accuracies
        }
        
        self.training_history[model_name] = history
        
        print(f"\n🎉 {model_name} 模型训练完成!")
        print(f"📊 训练总结:")
        print(f"  🔢 总轮数: {len(train_losses)}")
        print(f"  🏆 最佳验证损失: {best_val_loss:.4f}")
        print(f"  📈 最终训练准确率: {train_accuracies[-1]:.2f}%")
        print(f"  🔍 最终验证准确率: {val_accuracies[-1]:.2f}%")
        print(f"  💾 模型已保存: best_{model_name}.pth")
        print("=" * 60)
        
        return history
    
    def evaluate_model(self, model, test_loader, model_name='model'):
        """
        评估模型性能
        
        Args:
            model: PyTorch模型
            test_loader: 测试数据加载器
            model_name (str): 模型名称
        
        Returns:
            dict: 评估结果
        """
        model.eval()
        all_predictions = []
        all_targets = []
        
        with torch.no_grad():
            for data, target in test_loader:
                data, target = data.to(self.device), target.to(self.device)
                output = model(data)
                _, predicted = torch.max(output, 1)
                
                all_predictions.extend(predicted.cpu().detach().tolist())
                all_targets.extend(target.cpu().detach().tolist())
        
        # 计算指标
        accuracy = accuracy_score(all_targets, all_predictions)
        report = classification_report(all_targets, all_predictions, output_dict=True)
        cm = confusion_matrix(all_targets, all_predictions)
        
        results = {
            'accuracy': accuracy,
            'classification_report': report,
            'confusion_matrix': cm,
            'predictions': all_predictions,
            'targets': all_targets
        }
        
        print(f'{model_name} 测试准确率: {accuracy:.4f}')
        
        return results
    
    def plot_training_history(self, model_name='model', save_path=None):
        """
        绘制训练历史
        
        Args:
            model_name (str): 模型名称
            save_path (str): 保存路径
        """
        if model_name not in self.training_history:
            print(f"未找到模型 {model_name} 的训练历史")
            return
        
        history = self.training_history[model_name]
        
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))
        
        # 损失曲线
        ax1.plot(history['train_loss'], label='训练损失', color='blue')
        ax1.plot(history['val_loss'], label='验证损失', color='red')
        ax1.set_title(f'{model_name} - 训练和验证损失')
        ax1.set_xlabel('Epoch')
        ax1.set_ylabel('Loss')
        ax1.legend()
        ax1.grid(True)
        
        # 准确率曲线
        ax2.plot(history['train_accuracy'], label='训练准确率', color='blue')
        ax2.plot(history['val_accuracy'], label='验证准确率', color='red')
        ax2.set_title(f'{model_name} - 训练和验证准确率')
        ax2.set_xlabel('Epoch')
        ax2.set_ylabel('Accuracy (%)')
        ax2.legend()
        ax2.grid(True)
        
        plt.tight_layout()
        
        if save_path:
            plt.savefig(save_path, dpi=300, bbox_inches='tight')
            print(f"训练历史图已保存到: {save_path}")
        
        plt.close()  # 关闭图形，避免阻塞
    
    def plot_confusion_matrix(self, cm, class_names, model_name='model', save_path=None):
        """
        绘制混淆矩阵
        
        Args:
            cm: 混淆矩阵
            class_names: 类别名称
            model_name (str): 模型名称
            save_path (str): 保存路径
        """
        plt.figure(figsize=(8, 6))
        sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', 
                   xticklabels=class_names, yticklabels=class_names)
        plt.title(f'{model_name} - 混淆矩阵')
        plt.xlabel('预测标签')
        plt.ylabel('真实标签')
        
        if save_path:
            plt.savefig(save_path, dpi=300, bbox_inches='tight')
            print(f"混淆矩阵已保存到: {save_path}")
        
        plt.close()  # 关闭图形，避免阻塞
    
    def ensemble_predict(self, models, test_loader, method='voting'):
        """
        集成预测
        
        Args:
            models: 模型列表
            test_loader: 测试数据加载器
            method (str): 集成方法 ('voting', 'averaging')
        
        Returns:
            np.ndarray: 预测结果
        """
        all_predictions = []
        all_targets = []
        
        # 首先收集所有目标标签
        for data, target in test_loader:
            all_targets.extend(target.cpu().detach().tolist())
        
        # 然后对每个模型进行预测
        for model in models:
            model.eval()
            predictions = []
            
            with torch.no_grad():
                for data, target in test_loader:
                    data, target = data.to(self.device), target.to(self.device)
                    output = model(data)
                    # 直接获取预测概率，不要嵌套列表
                    pred_probs = F.softmax(output, dim=1).cpu().detach().tolist()
                    predictions.extend(pred_probs)
            
            all_predictions.append(np.array(predictions))
        
        if method == 'voting':
            # 投票法
            ensemble_pred = np.argmax(np.mean(all_predictions, axis=0), axis=1)
        elif method == 'averaging':
            # 平均法
            ensemble_pred = np.argmax(np.mean(all_predictions, axis=0), axis=1)
        else:
            raise ValueError(f"不支持的集成方法: {method}")
        
        # 确保预测结果和目标标签数量一致
        if len(ensemble_pred) != len(all_targets):
            min_len = min(len(ensemble_pred), len(all_targets))
            ensemble_pred = ensemble_pred[:min_len]
            all_targets = all_targets[:min_len]
        
        # 确保数据类型一致，并且是1D数组
        ensemble_pred = np.array(ensemble_pred, dtype=int).flatten()
        all_targets = np.array(all_targets, dtype=int).flatten()
        
        return ensemble_pred, all_targets


def load_task1_features(csv_path):
    """
    加载task1提取的特征数据
    
    Args:
        csv_path (str): CSV文件路径
    
    Returns:
        tuple: (X_source, y_source, X_target, feature_names, label_encoder)
    """
    print("加载task1特征数据...")
    
    # 读取数据
    df = pd.read_csv(csv_path)
    
    # 分离源域和目标域数据
    source_df = df[df['data_type'] == 'source'].copy()
    target_df = df[df['data_type'] == 'target'].copy()
    
    # 特征列（排除元数据列）
    feature_cols = [col for col in df.columns if col not in 
                   ['file_name', 'fault_type', 'fault_size', 'load_condition', 
                    'sampling_rate', 'rpm', 'data_type']]
    
    # 提取特征和标签
    X_source = source_df[feature_cols].values
    y_source = source_df['fault_type'].values
    X_target = target_df[feature_cols].values
    
    # 标签编码
    label_encoder = LabelEncoder()
    y_source_encoded = label_encoder.fit_transform(y_source)
    
    print(f"源域数据: {X_source.shape}, 标签: {len(np.unique(y_source_encoded))} 类")
    print(f"目标域数据: {X_target.shape}")
    print(f"特征维度: {len(feature_cols)}")
    print(f"类别: {label_encoder.classes_}")
    
    return X_source, y_source_encoded, X_target, feature_cols, label_encoder


def prepare_data(X, y, test_size=0.2, random_state=42, batch_size=32):
    """
    准备训练数据
    
    Args:
        X: 特征数据
        y: 标签数据
        test_size: 测试集比例
        random_state: 随机种子
        batch_size: 批次大小
    
    Returns:
        tuple: (train_loader, val_loader, test_loader)
    """
    # 划分训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=test_size, random_state=random_state, stratify=y
    )
    
    # 进一步划分训练集和验证集
    X_train, X_val, y_train, y_val = train_test_split(
        X_train, y_train, test_size=0.2, random_state=random_state, stratify=y_train
    )
    
    # 数据标准化
    scaler = StandardScaler()
    X_train_scaled = scaler.fit_transform(X_train)
    X_val_scaled = scaler.transform(X_val)
    X_test_scaled = scaler.transform(X_test)
    
    # 创建数据集
    train_dataset = BearingDataset(X_train_scaled, y_train)
    val_dataset = BearingDataset(X_val_scaled, y_val)
    test_dataset = BearingDataset(X_test_scaled, y_test)
    
    # 创建数据加载器
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
    
    print(f"训练集: {len(train_dataset)} 样本")
    print(f"验证集: {len(val_dataset)} 样本")
    print(f"测试集: {len(test_dataset)} 样本")
    
    return train_loader, val_loader, test_loader, scaler


if __name__ == "__main__":
    # 测试代码
    print("PyTorch深度学习模型模块加载成功!")
    print("可用的模型类型: cnn, resnet, lstm, hybrid, attention")
