#train&test.py
import os
import pickle
import hashlib
from datetime import datetime
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader, random_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report, confusion_matrix, f1_score, accuracy_score, \
    precision_score, recall_score, balanced_accuracy_score
import matplotlib.pyplot as plt
import seaborn as sns
import random
from scipy import stats
import pywt
import traceback
from sklearn.cluster import KMeans
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.ensemble import VotingClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression



# 设置随机种子保证可复现
def set_seed(seed=42):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)


set_seed(42)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Using device:", device)

# ---------------------- 全局配置 ----------------------
CFG = {
    # 数据和模型结构
    "input_dim": 26,  # 统计特征的维度
    "signal_length": 450,  # 原始信号长度（如有）
    "hidden_dim": 512,  # MLP 隐藏层维度
    "dropout_rate": 0.35,  # Dropout 概率
    "num_classes": 10,  # 已知类个数
    "unknown_classes": 1,  # 开放集测试集中包含的未知类个数
    "known_classes": [0, 1, 10, 11, 20, 21, 30, 31, 40, 41],
    #"class_centers":None,

    # 训练参数
    "epochs": 90,
    "batch_size": 32,
    "lr": 2.5e-4,
    "weight_decay": 3e-4,
    "label_smoothing": 0.1,
    "mixup_alpha": 0.45,
    "patience": 30,  # EarlyStopping 的耐心轮数

    # 学习率调度
    "T_max": 800,  # CosineAnnealingLR 的周期

    # 开放集参数
    "conf_threshold": 0.45,  # 最大置信度阈值，低于该值视为未知类
    "distance_threshold": 2.7,  # 特征空间距离阈值，高于该值视为未知类

    # 训练细节
    "num_workers": 4,  # DataLoader 的多线程加载
    "seed": 42,  # 随机种子，确保可复现
    "device": "cuda" if torch.cuda.is_available() else "cpu",

    # 模型保存
    "output_dir": "./results",  # 模型与结果保存路径
}

def create_scheduler(optimizer, epochs, train_loader):
    return torch.optim.lr_scheduler.OneCycleLR(
        optimizer,
        max_lr=[param_group['lr'] * 3 for param_group in optimizer.param_groups],  # More moderate multiplier
        total_steps=epochs * len(train_loader),
        pct_start=0.3,  # Keep 30% warmup
        div_factor=20.0,  # More balanced initial division
        final_div_factor=100.0  # Much more moderate final LR reduction
    )

def setup_class_specific_thresholds():
    thresholds = ClassSpecificThresholds(
        default_conf_threshold=0.45,  # Keeping your default confidence threshold
        default_dist_threshold=2.70  # Keeping your default distance threshold
    )

    # Class 0: Standard thresholds perform well based on confusion matrix
    thresholds.set_threshold(class_label=0, conf_threshold=0.45, dist_threshold=2.2)

    # Class 10: Several samples misclassified as Unknown, lower distance threshold
    thresholds.set_threshold(class_label=10, conf_threshold=0.42, dist_threshold=3.5)

    # Classes 20, 21, 30: Well clustered in feature space, standard thresholds work well
    thresholds.set_threshold(class_label=20, conf_threshold=0.45, dist_threshold=2.3)
    thresholds.set_threshold(class_label=21, conf_threshold=0.45, dist_threshold=2.4)
    thresholds.set_threshold(class_label=30, conf_threshold=0.45, dist_threshold=2.5)

    # Class 31: Has samples misclassified as Unknown, lower threshold
    thresholds.set_threshold(class_label=31, conf_threshold=0.35, dist_threshold=3.5)

    # Class 40: Some samples misclassified as Unknown, adjust thresholds
    thresholds.set_threshold(class_label=40, conf_threshold=0.42, dist_threshold=3.0)

    # Class 41: Well separated in feature space
    thresholds.set_threshold(class_label=41, conf_threshold=0.45, dist_threshold=2.5)

    return thresholds

# ---------------------- 特征名称配置 ----------------------
FEATURE_NAMES = [
    # Time domain features (7)
    'Mean', 'Std', 'Median', 'Range', 'RMS', 'Skewness', 'Kurtosis',
    # Frequency domain features (5)
    'Main Freq', 'Freq 0-10', 'Freq 10-20', 'Freq 20-30', 'Freq 30-40',
    # Wavelet features (4)
    'Wavelet cA Std', 'Wavelet cD Mean', 'Wavelet cD Range', 'Wavelet cD IQR',
    # Hjorth parameters (2)
    'Hjorth Activity', 'Hjorth Complexity',
    # Additional frequency band features (4)
    'Freq 40-50', 'Freq 50-60', 'Freq 60-80', 'Freq 80-100',
    # Entropy features (2)
    'Sample Entropy', 'Approximate Entropy',
    # Additional time domain statistics (2)
    'Q1', 'Q3'
]

# ---------------------- 网络结构 ----------------------
class ChannelAttention(nn.Module):
    """通道注意力（更适合特征向量）"""

    def __init__(self, dim, reduction_ratio=16):
        super().__init__()
        self.norm = nn.LayerNorm(dim)
        self.gate = nn.Sequential(
            nn.Linear(dim, max(4, dim // reduction_ratio)),
            nn.GELU(),
            nn.Dropout(0.2),  # Add dropout to prevent overfitting
            nn.Linear(max(4, dim // reduction_ratio), dim),
            nn.Sigmoid()
        )

    def forward(self, x):
        residual = x
        x = self.norm(x)
        return residual * self.gate(x)


class SpatialAttention(nn.Module):
    """空间注意力（更适合时间序列信号）"""

    def __init__(self, in_channels):
        super().__init__()
        self.conv = nn.Conv1d(in_channels, 1, kernel_size=7, padding=3)

    def forward(self, x):
        # x 形状: [B, C, L]
        attn = self.conv(x)  # [B, 1, L]
        attn = torch.sigmoid(attn)
        return x * attn  # 应用注意力权重


class DualBranchModel(nn.Module):
    def __init__(self, signal_input_dim=450, feat_input_dim=26, hidden_dim=512, num_classes=10):
        super().__init__()

        # Signal branch with more filters and attention mechanisms
        self.signal_branch = nn.Sequential(
            nn.Conv1d(1, 64, kernel_size=7, stride=1, padding=3),  # Increased from 48
            nn.BatchNorm1d(64),
            nn.ReLU(),
            nn.MaxPool1d(2),

            SpatialAttention(64),

            nn.Conv1d(64, 128, kernel_size=5, stride=1, padding=2),  # Increased from 96
            nn.BatchNorm1d(128),
            nn.ReLU(),
            nn.MaxPool1d(2),

            SpatialAttention(128),

            nn.Conv1d(128, 256, kernel_size=5, stride=1, padding=2),  # Increased from 192
            nn.BatchNorm1d(256),
            nn.ReLU(),
            nn.MaxPool1d(2),

            SpatialAttention(256),

            nn.Conv1d(256, 384, kernel_size=3, stride=1, padding=1),  # Increased from 256
            nn.BatchNorm1d(384),
            nn.ReLU(),
            nn.AdaptiveAvgPool1d(1)
        )

        # Feature branch with improved architecture
        self.feature_branch = nn.Sequential(
            nn.Linear(feat_input_dim, hidden_dim),
            nn.BatchNorm1d(hidden_dim),
            nn.GELU(),
            nn.Dropout(0.25),  # Reduced dropout

            ChannelAttention(hidden_dim, reduction_ratio=4),  # Reduced reduction ratio for more capacity

            nn.Linear(hidden_dim, hidden_dim // 2),
            nn.BatchNorm1d(hidden_dim // 2),
            nn.GELU(),
            nn.Dropout(0.25),

            ChannelAttention(hidden_dim // 2, reduction_ratio=8),

            nn.Linear(hidden_dim // 2, hidden_dim // 3),
            nn.BatchNorm1d(hidden_dim // 3),
            nn.GELU(),
            nn.Dropout(0.2),

            nn.Linear(hidden_dim // 3, hidden_dim // 4),
            nn.BatchNorm1d(hidden_dim // 4),
            nn.GELU()
        )

        # Fusion and classification
        fusion_dim = (hidden_dim // 4) + 384  # Updated to match increased signal branch dim

        # Enhanced fusion attention
        self.fusion_attention = ChannelAttention(fusion_dim, reduction_ratio=4)  # Reduced for more capacity
        self.fusion_bn = nn.BatchNorm1d(fusion_dim)

        # Improved classifier
        self.classifier = nn.Sequential(
            nn.Linear(fusion_dim, hidden_dim // 2),
            nn.BatchNorm1d(hidden_dim // 2),
            nn.GELU(),
            nn.Dropout(0.25),

            nn.Linear(hidden_dim // 2, hidden_dim // 3),
            nn.BatchNorm1d(hidden_dim // 3),
            nn.GELU(),
            nn.Dropout(0.2),

            nn.Linear(hidden_dim // 3, hidden_dim // 4),
            nn.BatchNorm1d(hidden_dim // 4),
            nn.GELU(),
            nn.Dropout(0.15),

            nn.Linear(hidden_dim // 4, num_classes)
        )

        # Improved embedding layer for better feature separation
        self.embedding = nn.Sequential(
            nn.Linear(fusion_dim, 320),  # Increased size (was 256)
            nn.BatchNorm1d(320),
            nn.GELU(),
            nn.Dropout(0.15),
            nn.Linear(320, 192),  # Increased intermediate layer (was 128)
            nn.BatchNorm1d(192),
            nn.GELU(),
            nn.Linear(192, 128),  # Keep final embedding size at 128
            nn.BatchNorm1d(128),
            nn.GELU()
        )

        # Fusion and classification
        fusion_dim = (hidden_dim // 4) + 384  # Updated to match increased signal branch dim

        # Enhanced fusion attention
        self.fusion_attention = ChannelAttention(fusion_dim, reduction_ratio=4)  # Reduced for more capacity
        self.fusion_bn = nn.BatchNorm1d(fusion_dim)

        # Improved classifier
        self.classifier = nn.Sequential(
            nn.Linear(fusion_dim, hidden_dim // 2),
            nn.BatchNorm1d(hidden_dim // 2),
            nn.GELU(),
            nn.Dropout(0.25),

            nn.Linear(hidden_dim // 2, hidden_dim // 3),
            nn.BatchNorm1d(hidden_dim // 3),
            nn.GELU(),
            nn.Dropout(0.2),

            nn.Linear(hidden_dim // 3, hidden_dim // 4),
            nn.BatchNorm1d(hidden_dim // 4),
            nn.GELU(),
            nn.Dropout(0.15),

            nn.Linear(hidden_dim // 4, num_classes)
        )

        # Improved embedding layer for better feature separation
        self.embedding = nn.Sequential(
            nn.Linear(fusion_dim, 256),  # Increased (was 192)
            nn.BatchNorm1d(256),
            nn.GELU(),
            nn.Dropout(0.15),
            nn.Linear(256, 128),
            nn.BatchNorm1d(128),
            nn.GELU()
        )

    def forward(self, signal, features):
        # 处理信号
        signal = signal.unsqueeze(1)
        signal_features = self.signal_branch(signal).squeeze(-1)

        # 处理统计特征
        statistical_features = self.feature_branch(features)

        # 融合
        combined = torch.cat([signal_features, statistical_features], dim=1)
        combined = self.fusion_attention(combined)
        combined = self.fusion_bn(combined)  # 添加批归一化

        embedding = self.embedding(combined)
        logits = self.classifier(combined)

        return logits, embedding

    def extract_features(self, signal, features):
        signal = signal.unsqueeze(1)
        signal_features = self.signal_branch(signal).squeeze(-1)
        statistical_features = self.feature_branch(features)
        combined = torch.cat([signal_features, statistical_features], dim=1)
        combined = self.fusion_attention(combined)
        combined = self.fusion_bn(combined)  # 确保特征提取与前向传播一致

        return self.embedding(combined)

class FocalLoss(nn.Module):
    """Focal Loss to better handle class imbalance and hard examples"""
    def __init__(self, alpha=2.2, gamma=2.8, reduction='mean'):  # Increased alpha and gamma
        super(FocalLoss, self).__init__()
        self.alpha = alpha
        self.gamma = gamma
        self.reduction = reduction
        self.cross_entropy = nn.CrossEntropyLoss(reduction='none')

    def forward(self, inputs, targets):
        CE_loss = self.cross_entropy(inputs, targets)
        pt = torch.exp(-CE_loss)
        F_loss = self.alpha * (1-pt)**self.gamma * CE_loss

        if self.reduction == 'mean':
            return torch.mean(F_loss)
        elif self.reduction == 'sum':
            return torch.sum(F_loss)
        else:
            return F_loss

class ClassSpecificThresholds:
    def __init__(self, default_conf_threshold=0.6, default_dist_threshold=2.5):
        """
        Initialize with default thresholds

        Args:
            default_conf_threshold: Default confidence threshold
            default_dist_threshold: Default distance threshold
        """
        self.default_conf_threshold = default_conf_threshold
        self.default_dist_threshold = default_dist_threshold
        self.class_thresholds = {}

        # Track which classes need tuning based on error patterns
        self.tuning_candidates = set()

    def set_threshold(self, class_label, conf_threshold=None, dist_threshold=None):
        """Set custom thresholds for a specific class"""
        if class_label not in self.class_thresholds:
            self.class_thresholds[class_label] = {
                'conf': self.default_conf_threshold,
                'dist': self.default_dist_threshold
            }

        if conf_threshold is not None:
            self.class_thresholds[class_label]['conf'] = conf_threshold
        if dist_threshold is not None:
            self.class_thresholds[class_label]['dist'] = dist_threshold

        # Add to tuning candidates for auto-tuning
        self.tuning_candidates.add(class_label)

    def get_thresholds(self, class_label):
        """Get thresholds for a specific class or return defaults"""
        if class_label in self.class_thresholds:
            return self.class_thresholds[class_label]['conf'], self.class_thresholds[class_label]['dist']
        return self.default_conf_threshold, self.default_dist_threshold

    def auto_tune_from_validation(self, model, val_loader, device, class_centers, known_classes, label_dict):
        """
        Automatically tune thresholds for problematic classes using validation data

        Args:
            model: Neural network model
            val_loader: Validation data loader
            device: Computing device
            class_centers: Dictionary of class centers
            known_classes: List of known class indices
            label_dict: Mapping from original labels to model indices
        """
        print("🔧 Auto-tuning class-specific thresholds...")

        # Reverse dictionary for converting indices to original labels
        reverse_dict = {v: k for k, v in label_dict.items()}

        # Extract features, confidences, and distances for validation set
        class_data = {}

        with torch.no_grad():
            for signal, features, labels in val_loader:
                signal, features = signal.to(device), features.to(device)

                outputs, _ = model(signal, features)
                batch_features = model.extract_features(signal, features)

                probs = F.softmax(outputs, dim=1).cpu().numpy()
                max_confidences = np.max(probs, axis=1)

                # Calculate distances to class centers
                for i, (feat, label) in enumerate(zip(batch_features, labels)):
                    # Convert to numpy for distance calculation
                    feat_np = feat.cpu().numpy()
                    label_np = label.item()

                    # Skip unknown class
                    if label_np == -1:
                        continue

                    # Get original label
                    orig_label = reverse_dict.get(label_np, label_np)

                    # Calculate minimum distance to class center
                    min_distance = float('inf')
                    if label_np in class_centers:
                        center = class_centers[label_np]
                        # Handle different center formats
                        if isinstance(center, dict) and 'centers' in center:
                            # Multiple centers case
                            centers = center['centers']
                            weights = center.get('weights', np.ones(len(centers)) / len(centers))

                            # Find weighted distance to each center
                            center_distances = []
                            for c, w in zip(centers, weights):
                                dist = np.linalg.norm(feat_np - c) * (1 / w)  # Inverse weight for distance
                                center_distances.append(dist)

                            min_distance = min(center_distances)
                        else:
                            # Single center case
                            min_distance = np.linalg.norm(feat_np - center)

                    # Store data for this class
                    if orig_label not in class_data:
                        class_data[orig_label] = {
                            'confidences': [],
                            'distances': [],
                        }

                    class_data[orig_label]['confidences'].append(max_confidences[i])
                    class_data[orig_label]['distances'].append(min_distance)

        # Set thresholds for each class based on collected data
        tuned_classes = 0
        for orig_label, data in class_data.items():
            # Only tune if we have enough data
            if len(data['confidences']) >= 5:
                confidences = np.array(data['confidences'])
                distances = np.array(data['distances'])

                # Set confidence threshold at 5th percentile with a minimum
                conf_threshold = max(0.3, np.percentile(confidences, 5) - 0.05)

                # Set distance threshold at 95th percentile plus a margin
                dist_threshold = np.percentile(distances, 95) + 0.5

                # Limit to reasonable range
                dist_threshold = min(5.0, max(1.0, dist_threshold))

                # Only update if this class needs tuning or has extreme values
                if (orig_label in self.tuning_candidates or
                        np.min(confidences) < self.default_conf_threshold or
                        np.max(distances) > self.default_dist_threshold):

                    # Find model index for this original label
                    model_idx = label_dict.get(orig_label)
                    if model_idx is not None:
                        self.set_threshold(model_idx, conf_threshold, dist_threshold)
                        tuned_classes += 1

                        print(f"  Class {orig_label}: conf={conf_threshold:.2f}, dist={dist_threshold:.2f} " +
                              f"(from {len(data['confidences'])} samples)")

        print(f"✅ Tuned thresholds for {tuned_classes} classes")

    def serialize(self):
        """Convert to dictionary for saving"""
        return {
            'default_conf': self.default_conf_threshold,
            'default_dist': self.default_dist_threshold,
            'class_thresholds': self.class_thresholds,
        }

    @classmethod
    def deserialize(cls, data):
        """Create from saved dictionary"""
        instance = cls(
            default_conf_threshold=data['default_conf'],
            default_dist_threshold=data['default_dist']
        )
        instance.class_thresholds = data['class_thresholds']
        return instance

    def __str__(self):
        """String representation for debugging"""
        parts = [f"Default: conf={self.default_conf_threshold:.2f}, dist={self.default_dist_threshold:.2f}"]

        for class_label, thresholds in sorted(self.class_thresholds.items()):
            parts.append(f"Class {class_label}: conf={thresholds['conf']:.2f}, dist={thresholds['dist']:.2f}")

        return "\n".join(parts)

class ClassCenterTracker:
    """
    用于动态更新和维护类中心的工具类
    """

    def __init__(self, num_classes, feature_dim, device):
        self.num_classes = num_classes
        self.feature_dim = feature_dim
        self.device = device

        # 初始化类中心和计数器
        self.centers = torch.zeros(num_classes, feature_dim).to(device)
        self.counts = torch.zeros(num_classes).to(device)

    def update(self, features, labels, momentum=0.9):
        for c in range(self.num_classes):
            indices = (labels == c)
            if torch.sum(indices) == 0:
                continue

            class_features = features[indices].mean(0)

            # 使用动量更新类中心
            if self.counts[c] > 0:
                self.centers[c] = momentum * self.centers[c] + (1 - momentum) * class_features
            else:
                self.centers[c] = class_features

            self.counts[c] += torch.sum(indices)

        # Debugging: Print updated centers and counts
        print(f"Updated class centers: {self.centers}")
        print(f"Counts: {self.counts}")

    def get_centers(self):
        centers_dict = {}
        for c in range(self.num_classes):
            if self.counts[c] > 0:  # Only include classes with data
                centers_dict[c] = self.centers[c].detach().cpu().numpy()

        # Debugging: Print class centers dictionary
        print(f"Class centers: {centers_dict}")
        return centers_dict

    def compute_distances(self, features):
        """
        计算特征到所有类中心的距离

        参数:
        - features: 特征向量 [batch_size, feature_dim]

        返回:
        - 距离矩阵 [batch_size, num_classes]
        """
        # 分离特征张量以避免梯度影响距离计算
        with torch.no_grad():
            distances = torch.cdist(features, self.centers)
        return distances

class ModelEnsemble:
    """Ensemble of models for more robust predictions"""

    def __init__(self, model_list, device):
        self.models = model_list
        self.device = device
        self.n_models = len(model_list)

    def __call__(self, signal, features):
        """Make the ensemble callable like a regular model"""
        return self.predict(signal, features)

    def predict(self, signal, features):
        """Combine predictions from all models"""
        signal = signal.to(self.device)
        features = features.to(self.device)

        all_logits = []
        all_embeddings = []

        with torch.no_grad():
            for model in self.models:
                model.eval()
                logits, embeddings = model(signal, features)
                all_logits.append(logits)
                all_embeddings.append(embeddings)

        # Average logits
        avg_logits = torch.mean(torch.stack(all_logits), dim=0)
        # Average embeddings
        avg_embeddings = torch.mean(torch.stack(all_embeddings), dim=0)

        return avg_logits, avg_embeddings

    def extract_features(self, signal, features):
        """Extract averaged feature embeddings"""
        signal = signal.to(self.device)
        features = features.to(self.device)

        all_features = []

        with torch.no_grad():
            for model in self.models:
                model.eval()
                features_i = model.extract_features(signal, features)
                all_features.append(features_i)

        # Average features
        avg_features = torch.mean(torch.stack(all_features), dim=0)

        return avg_features

    def eval(self):
        """Set all models in the ensemble to evaluation mode"""
        for model in self.models:
            model.eval()
        return self

    def train(self, mode=True):
        """Set all models in the ensemble to training mode"""
        for model in self.models:
            model.train(mode)
        return self

def save_class_centers(class_centers, path="class_centers.pkl"):
    with open(path, "wb") as f:
        pickle.dump(class_centers, f)
    print(f"✅ 类中心已保存到 {path}")

def add_noise(signal, level=0.02):
    """添加高斯噪声"""
    noise = level * torch.randn_like(signal)
    return signal + noise

def shift_signal(signal, max_shift=10):
    """信号随机平移"""
    shift = random.randint(-max_shift, max_shift)
    if shift > 0:
        return torch.cat([signal[shift:], torch.zeros(shift)])
    elif shift < 0:
        return torch.cat([torch.zeros(-shift), signal[:shift]])
    return signal

def scale_signal(signal, scale_range=(0.85, 1.15)):  # New augmentation
    """Randomly scale signal amplitude"""
    scale = random.uniform(scale_range[0], scale_range[1])
    return signal * scale


def preprocess_signal(signal, apply_filtering=True):
    """Enhanced signal preprocessing with optional filtering"""
    # Copy to avoid modifying original
    processed = np.copy(signal)

    # Apply bandpass filtering if requested
    if apply_filtering:
        from scipy import signal as sp_signal
        # Design bandpass filter - adjust frequencies based on your signal characteristics
        # This helps reduce noise while preserving important features
        b, a = sp_signal.butter(4, [0.05, 0.95], btype='band')
        try:
            processed = sp_signal.filtfilt(b, a, processed)
        except Exception as e:
            print(f"Warning: Filtering failed, using original signal. Error: {str(e)}")

    # Normalize to zero mean and unit variance (standardization)
    mean = np.mean(processed)
    std = np.std(processed)
    if std > 0:
        processed = (processed - mean) / std
    else:
        processed = processed - mean

    # Apply Hampel filter to remove outliers
    window_size = 7
    n = len(processed)
    processed_filtered = np.copy(processed)
    half_window = window_size // 2

    for i in range(n):
        start = max(0, i - half_window)
        end = min(n, i + half_window + 1)
        window = processed[start:end]
        median = np.median(window)
        mad = np.median(np.abs(window - median)) * 1.4826  # Approximation for standard deviation

        if mad > 0 and abs(processed[i] - median) > 3 * mad:
            processed_filtered[i] = median

    return processed_filtered


def extract_enhanced_features(signal):
    """Enhanced feature extraction with additional features to improve class separation"""
    # Get the original features first
    features = extract_features(signal)

    # Add additional features specifically to help distinguish problematic classes
    additional_features = []

    # 1. Calculate energy in specific frequency bands
    # This can help distinguish class 10 which might have unique frequency characteristics
    fft_values = np.abs(np.fft.rfft(signal))
    freqs = np.fft.rfftfreq(len(signal))

    # Custom frequency bands that might separate class 10 better
    custom_bands = [(0.05, 0.15), (0.15, 0.25), (0.25, 0.35), (0.35, 0.45), (0.45, 0.55)]
    total_energy = np.sum(fft_values)

    for low, high in custom_bands:
        mask = (freqs >= low) & (freqs < high)
        if total_energy > 0:
            band_energy = np.sum(fft_values[mask]) / total_energy
        else:
            band_energy = 0
        additional_features.append(band_energy)

    # 2. Calculate signal envelope features
    from scipy.signal import hilbert
    try:
        analytic_signal = hilbert(signal)
        envelope = np.abs(analytic_signal)
        additional_features.append(np.mean(envelope))
        additional_features.append(np.std(envelope))
        additional_features.append(np.max(envelope))
        additional_features.append(stats.skew(envelope))
    except:
        # Fallback if Hilbert transform fails
        additional_features.extend([0, 0, 0, 0])

    # 3. Add zero-crossing rate - good for distinguishing oscillation patterns
    zero_crossings = np.where(np.diff(np.signbit(signal)))[0]
    additional_features.append(len(zero_crossings) / len(signal))

    # 4. Add wavelet packet decomposition features - better frequency resolution
    try:
        import pywt
        wp = pywt.WaveletPacket(signal, 'db4', maxlevel=2)
        nodes = [node.path for node in wp.get_level(2, 'natural')]
        for node in nodes:
            wp_node = wp[node]
            coef = wp_node.data
            additional_features.append(np.mean(np.abs(coef)))
            additional_features.append(np.std(coef))
    except:
        # Fallback if wavelet packet decomposition fails
        additional_features.extend([0, 0, 0, 0, 0, 0, 0, 0])

    # 5. Add autoregressive coefficients - capture temporal dependencies
    try:
        from statsmodels.tsa.ar_model import AutoReg
        model = AutoReg(signal, lags=4).fit()
        additional_features.extend(model.params[1:])  # Skip the constant
    except:
        # Fallback if AR model fails
        additional_features.extend([0, 0, 0, 0])

    # Combine original and additional features
    all_features = np.concatenate([features, additional_features])

    return all_features


def create_optimizer(model, lr=2.5e-4, weight_decay=3e-4):
    """Create optimizer with carefully balanced parameters for different model components"""
    # Separate parameters by model component
    cnn_params = []  # Convolutional layers
    bn_params = []  # BatchNorm layers
    other_params = []  # All other layers

    # Categorize parameters by layer type
    for name, param in model.named_parameters():
        if 'conv' in name.lower():
            cnn_params.append(param)
        elif 'bn' in name.lower() or 'batch' in name.lower() or 'norm' in name.lower():
            bn_params.append(param)
        else:
            other_params.append(param)

    # Create AdamW optimizer with component-specific settings
    return torch.optim.AdamW([
        {'params': cnn_params, 'lr': lr * 1.3, 'weight_decay': weight_decay},
        {'params': bn_params, 'lr': lr * 1.0, 'weight_decay': weight_decay * 0.5},  # Less regularization for BN
        {'params': other_params, 'lr': lr, 'weight_decay': weight_decay}
    ])

def enhanced_augment_signal(signal, prob=0.8):
    """Apply multiple augmentations with probability"""
    if random.random() < prob:
        # Create a copy to avoid modifying the original
        augmented = signal.clone()

        # Noise augmentation with balanced parameters
        if random.random() < 0.7:
            noise_level = random.uniform(0.01, 0.035)  # More controlled noise range
            noise = noise_level * torch.randn_like(augmented)
            augmented = augmented + noise

        # Shift augmentation
        if random.random() < 0.6:
            max_shift = min(15, len(signal) // 30)  # Ensure shift isn't too large
            shift = random.randint(-max_shift, max_shift)
            if shift > 0:
                augmented = torch.cat([augmented[shift:], torch.zeros(shift, device=augmented.device)])
            elif shift < 0:
                augmented = torch.cat([torch.zeros(-shift, device=augmented.device), augmented[:shift]])

        # Scale augmentation
        if random.random() < 0.5:
            scale = random.uniform(0.85, 1.15)  # More conservative scaling
            augmented = augmented * scale

        return augmented
    else:
        return signal

def mixup_criterion(criterion, pred, y_a, y_b, lam):
    """应用mixup的损失函数"""
    return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)

def mixup_data(signals, features, labels, alpha=0.2):
    """Apply mixup augmentation to the data"""
    if alpha > 0:
        lam = np.random.beta(alpha, alpha)
    else:
        lam = 1

    batch_size = signals.size(0)
    index = torch.randperm(batch_size).to(signals.device)

    mixed_signals = lam * signals + (1 - lam) * signals[index]
    mixed_features = lam * features + (1 - lam) * features[index]

    return mixed_signals, mixed_features, labels, labels[index], lam

def get_cache_path(file_path, prefix="features_cache"):
    """
    Generate a cache file path based on the input file path and its modification time

    Args:
        file_path: Path to the original data file
        prefix: Prefix for the cache file

    Returns:
        Cache file path
    """
    # 对于还不存在的文件，不要尝试获取修改时间
    if os.path.exists(file_path):
        # 获取文件统计信息，获取修改时间
        file_stats = os.stat(file_path)
        mod_time = file_stats.st_mtime
    else:
        # 如果文件不存在，使用当前时间作为修改时间
        mod_time = datetime.now().timestamp()

    # 基于文件路径和修改时间生成哈希
    file_hash = hashlib.md5(f"{file_path}_{mod_time}".encode()).hexdigest()

    # 创建缓存目录（如果不存在）
    # 对于已存在的文件，使用文件所在目录
    if os.path.exists(file_path) and os.path.isfile(file_path):
        cache_dir = os.path.join(os.path.dirname(os.path.abspath(file_path)), "cache")
    else:
        # 对于不存在的文件，使用与文件路径相同的目录
        # 首先确保文件路径的目录部分存在
        file_dir = os.path.dirname(os.path.abspath(file_path))
        os.makedirs(file_dir, exist_ok=True)
        cache_dir = os.path.join(file_dir, "cache")

    os.makedirs(cache_dir, exist_ok=True)

    # 返回缓存文件路径
    cache_file = os.path.join(cache_dir, f"{prefix}_{file_hash}.pkl")
    return cache_file

def save_features_cache(features, labels, raw_labels, signal_data, file_path):
    """
    Save processed features to a cache file

    Args:
        features: Extracted features
        labels: Processed labels
        raw_labels: Original labels
        signal_data: Signal data
        file_path: Path to save the cache
    """
    cache_data = {
        'features': features,
        'labels': labels,
        'raw_labels': raw_labels,
        'signal': signal_data,
        'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
        'version': '1.0'  # Cache version for future compatibility
    }

    with open(file_path, 'wb') as f:
        pickle.dump(cache_data, f)

    print(f"✅ Features cache saved to {file_path}")

def load_features_cache(file_path):
    """
    Load processed features from a cache file

    Args:
        file_path: Path to the cache file

    Returns:
        Dictionary containing features, labels, etc.
    """
    try:
        with open(file_path, 'rb') as f:
            cache_data = pickle.load(f)

        print(f"✅ Loaded features cache from {file_path}")
        print(f"   Cache created: {cache_data.get('timestamp', 'unknown')}")
        return cache_data
    except Exception as e:
        print(f"❌ Error loading cache: {str(e)}")
        return None

class SteelDataset(Dataset):
    def __init__(self, file_path, augment=False, mixup=False, label_dict=None, known_classes=None, use_cache=True,
                 force_recompute=False):
        # Cache handling
        self.cache_path = get_cache_path(file_path)
        self.use_cache = use_cache
        cache_data = None

        # Try to load from cache first if enabled and not forced to recompute
        if self.use_cache and not force_recompute and os.path.exists(self.cache_path):
            cache_data = load_features_cache(self.cache_path)

        # If we have valid cache data, use it
        if cache_data is not None:
            print("Using cached features and skipping feature extraction...")
            self.features = cache_data['features']
            self.labels = cache_data['labels']
            self.raw_labels = cache_data['raw_labels']
            self.signal = cache_data['signal']

            # We still need to create the label dictionary
            if label_dict is not None:
                # Use external provided label_dict
                self.label_dict = label_dict
                print(f"Using provided label dictionary: {self.label_dict}")
            else:
                # Create label dictionary based on cached raw_labels
                unique_labels = np.unique(self.raw_labels)
                self.label_dict = {label: idx for idx, label in enumerate(unique_labels)}
                print(f"Created new label dictionary from cache: {self.label_dict}")
        else:
            # No cache available or recomputation forced, process from scratch
            print(f"Processing data from {file_path}...")
            # Load data from Excel
            df = pd.read_excel(file_path, header=None)

            # Check for invalid labels
            error_rows = []
            self.raw_labels = []

            # Check each row for valid labels
            for idx, row in df.iterrows():
                raw_label = row[0]  # Assuming label is in first column
                try:
                    # Try to convert to integer
                    label = int(float(raw_label))  # Handle float representation of integers
                    self.raw_labels.append(label)
                except (ValueError, TypeError) as e:
                    excel_row = idx + 2  # Excel row numbers start at 1, and header is row 1
                    print(f"Invalid label found - Row: {excel_row}, Value: '{raw_label}', Error: {str(e)}")
                    error_rows.append({
                        "excel_row": excel_row,
                        "raw_value": raw_label,
                        "error_type": str(e)
                    })

            # Raise error if invalid labels were found
            if error_rows:
                raise ValueError(f"Found {len(error_rows)} invalid labels. Check the rows listed above.")

            # Convert to numpy array
            self.raw_labels = np.array(self.raw_labels)

            # Extract signals (columns 1-450 assuming 450 voltage data points)
            self.signal = df.iloc[:, 1:451].values

            # Print distribution of original labels
            unique_raw_labels, counts = np.unique(self.raw_labels, return_counts=True)
            print(f"Original label distribution: {list(zip(unique_raw_labels, counts))}")

            # Create or use label dictionary
            if label_dict is not None:
                # Use provided label_dict
                self.label_dict = label_dict
                print(f"Using provided label dictionary: {self.label_dict}")
            else:
                # Create new label dictionary
                unique_labels = np.unique(self.raw_labels)
                self.label_dict = {label: idx for idx, label in enumerate(unique_labels)}
                print(f"Created new label dictionary: {self.label_dict}")

            # Create reverse mapping for debugging
            self.reverse_dict = {idx: label for label, idx in self.label_dict.items()}
            print(f"Reverse mapping dictionary: {self.reverse_dict}")

            # Map original labels to model indices
            self.labels = np.array([self.label_dict.get(label, -1) for label in self.raw_labels])

            # Handle known classes filtering
            self.known_classes = known_classes
            if known_classes is not None:
                known_indices = [self.label_dict.get(label) for label in known_classes if label in self.label_dict]
                # Update labels: if sample index is in known_indices, keep original; otherwise mark as -1 (unknown)
                mask = np.isin(self.labels, known_indices)
                self.labels = np.where(mask, self.labels, -1)

                # Print known/unknown distribution
                known_count = sum(mask)
                unknown_count = len(self.labels) - known_count
                print(f"Dataset has {known_count} known samples and {unknown_count} unknown samples")

                # Print known class mapping details
                print(f"Known classes original labels: {known_classes}")
                print(f"Known classes mapped indices: {known_indices}")

                # Check for missing classes
                if len(known_indices) != len(known_classes):
                    missing = set(known_classes) - set(self.label_dict.keys())
                    print(f"Warning: Some known classes don't exist in the dataset: {missing}")
                    print(f"Unique labels in dataset: {list(self.label_dict.keys())}")

            # Extract features
            print(f"Extracting features for {len(df)} samples...")
            self.features = np.zeros((len(df), len(FEATURE_NAMES)))
            for i in range(len(df)):
                if i % 100 == 0:
                    print(f"Processing {i}/{len(df)} samples...")
                self.features[i] = extract_features(self.signal[i])

            # Save cache for future use
            if self.use_cache:
                save_features_cache(
                    features=self.features,
                    labels=self.labels,
                    raw_labels=self.raw_labels,
                    signal_data=self.signal,
                    file_path=self.cache_path
                )

        # Set properties for data augmentation
        self.augment = augment
        self.mixup = mixup
        self.reverse_dict = {idx: label for label, idx in self.label_dict.items()}

    def __len__(self):
        return len(self.labels)

    def __getitem__(self, idx):
        signal = self.signal[idx]
        features = self.features[idx]
        label = self.labels[idx]

        # Convert to tensors
        signal = torch.tensor(signal, dtype=torch.float32)
        features = torch.tensor(features, dtype=torch.float32)
        label = torch.tensor(label, dtype=torch.long)

        # Apply class-specific preprocessing and augmentation
        if self.augment:
            signal = augment_signal(signal, prob=0.8)

        return signal, features, label

class LabelSmoothingLoss(nn.Module):
    """标签平滑损失函数"""

    def __init__(self, classes, smoothing=0.1):
        super().__init__()
        self.confidence = 1.0 - smoothing
        self.smoothing = smoothing
        self.classes = classes

    def forward(self, pred, target):
        pred = pred.log_softmax(dim=-1)
        with torch.no_grad():
            true_dist = torch.zeros_like(pred)
            true_dist.fill_(self.smoothing / (self.classes - 1))
            true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
        return torch.mean(torch.sum(-true_dist * pred, dim=-1))

class SupConLoss(nn.Module):
    """监督对比学习损失函数"""

    def __init__(self, temperature=0.05):
        super(SupConLoss, self).__init__()
        self.temperature = temperature

    def forward(self, features, labels):
        """
        计算监督对比学习损失

        参数:
        - features: 特征向量 [batch_size, feature_dim]
        - labels: 标签 [batch_size]

        返回:
        - 损失值
        """
        device = features.device
        batch_size = features.shape[0]

        # 特征归一化
        features = F.normalize(features, dim=1)

        # 计算相似度矩阵
        sim_matrix = torch.matmul(features, features.T) / self.temperature

        # 对角线填充负无穷，避免自身对比
        mask = torch.eye(batch_size, dtype=torch.bool).to(device)
        sim_matrix = sim_matrix.masked_fill(mask, -float('inf'))

        # 找出正样本对
        positive_pairs = []
        for i in range(batch_size):
            positive_indices = torch.where(labels == labels[i])[0]
            positive_indices = positive_indices[positive_indices != i]  # 排除自己
            if len(positive_indices) > 0:
                positive_pairs.append((i, positive_indices))

        # 如果没有正样本对，则返回零损失
        if len(positive_pairs) == 0:
            return torch.tensor(0.0).to(device)

        # 计算对比损失
        loss = 0
        for i, pos_indices in positive_pairs:
            pos_sim = sim_matrix[i, pos_indices]
            numerator = torch.logsumexp(pos_sim, dim=0)
            denominator = torch.logsumexp(sim_matrix[i, :], dim=0)
            loss += denominator - numerator

        return loss / len(positive_pairs)


def visualize_class_specific_thresholds(model, data_loader, device, class_centers, thresholds=None, known_classes=None):
    """
    Visualizes confidence and distance distributions for each class to help with threshold setting

    Args:
        model: Trained model
        data_loader: DataLoader containing the data to visualize
        device: Computing device
        class_centers: Dictionary of class centers
        thresholds: Optional ClassSpecificThresholds object
        known_classes: List of known class indices
    """
    model.eval()

    # Get label mapping
    if hasattr(data_loader.dataset, 'dataset'):
        # This is a Subset
        label_dict = data_loader.dataset.dataset.label_dict
        reverse_dict = {v: k for k, v in label_dict.items()}
    else:
        # This is a regular dataset
        label_dict = data_loader.dataset.label_dict
        reverse_dict = {v: k for k, v in label_dict.items()}

    # Set default thresholds if none provided
    if thresholds is None:
        default_conf = CFG.get("conf_threshold", 0.5)
        default_dist = CFG.get("distance_threshold", 3.0)
        thresholds = ClassSpecificThresholds(default_conf, default_dist)

    # Collect data by class
    class_data = {}

    with torch.no_grad():
        for signal, features, labels in data_loader:
            signal, features = signal.to(device), features.to(device)

            outputs, _ = model(signal, features)
            batch_features = model.extract_features(signal, features)

            # Get confidence scores
            probs = F.softmax(outputs, dim=1).cpu().numpy()
            max_confidences = np.max(probs, axis=1)

            # Process each sample
            for i, (feat, label) in enumerate(zip(batch_features, labels)):
                # Convert to numpy for distance calculation
                feat_np = feat.cpu().numpy()
                label_np = label.item()

                # Get original label
                orig_label = reverse_dict.get(label_np, label_np)

                # Initialize class data if not exists
                if orig_label not in class_data:
                    class_data[orig_label] = {
                        'confidences': [],
                        'distances': [],
                        'to_own_center_distances': [],
                        'to_other_centers_distances': {}
                    }

                # Store confidence
                class_data[orig_label]['confidences'].append(max_confidences[i])

                # Calculate distances to all class centers
                for class_idx in class_centers:
                    center = class_centers[class_idx]
                    orig_class_idx = reverse_dict.get(class_idx, class_idx)

                    # Handle different center formats
                    if isinstance(center, dict) and 'centers' in center:
                        # Multiple centers case
                        centers = center['centers']
                        weights = center.get('weights', np.ones(len(centers)) / len(centers))

                        # Find minimum weighted distance to any center
                        min_dist = float('inf')
                        for c, w in zip(centers, weights):
                            dist = np.linalg.norm(feat_np - c) * (1 / w)
                            min_dist = min(min_dist, dist)
                        dist = min_dist
                    else:
                        # Single center case
                        dist = np.linalg.norm(feat_np - center)

                    # Store distance to own class center
                    if class_idx == label_np:
                        class_data[orig_label]['to_own_center_distances'].append(dist)

                    # Store distances to other centers
                    if orig_class_idx not in class_data[orig_label]['to_other_centers_distances']:
                        class_data[orig_label]['to_other_centers_distances'][orig_class_idx] = []

                    class_data[orig_label]['to_other_centers_distances'][orig_class_idx].append(dist)

                # Store minimum distance to any center as the main distance
                if len(class_centers) > 0:
                    min_distance = min(class_data[orig_label]['to_own_center_distances'][-1]
                                       if class_idx == label_np else float('inf')
                                       for class_idx in class_centers)
                    class_data[orig_label]['distances'].append(min_distance)

    # Filter out unknown class if needed
    if -1 in class_data and known_classes is not None:
        class_data.pop(-1, None)

    # Create visualizations for each class
    num_classes = len(class_data)

    if num_classes == 0:
        print("No class data available for visualization.")
        return

    # Determine grid size for subplots
    grid_size = int(np.ceil(np.sqrt(num_classes)))

    # Create a figure for confidence distributions
    plt.figure(figsize=(20, 15))
    plt.suptitle('Confidence Score Distributions by Class', fontsize=20, y=0.95)

    for i, (class_label, data) in enumerate(sorted(class_data.items())):
        plt.subplot(grid_size, grid_size, i + 1)

        if len(data['confidences']) > 0:
            # Plot histogram of confidence scores
            plt.hist(data['confidences'], bins=30, alpha=0.7, color='blue')

            # Get current threshold for this class
            if isinstance(class_label, int) and class_label in label_dict:
                model_idx = label_dict[class_label]
                conf_thresh, _ = thresholds.get_thresholds(model_idx)
            else:
                conf_thresh = thresholds.default_conf_threshold

            # Add threshold line
            plt.axvline(x=conf_thresh, color='red', linestyle='--',
                        label=f'Threshold: {conf_thresh:.2f}')

            # Add statistics
            mean_conf = np.mean(data['confidences'])
            min_conf = np.min(data['confidences'])
            q1_conf = np.percentile(data['confidences'], 5)

            plt.annotate(f'Mean: {mean_conf:.2f}\nMin: {min_conf:.2f}\n5th %: {q1_conf:.2f}',
                         xy=(0.05, 0.95), xycoords='axes fraction',
                         fontsize=9, bbox=dict(boxstyle='round', facecolor='white', alpha=0.8),
                         verticalalignment='top')

            # Suggested threshold
            suggested_conf = max(0.3, q1_conf - 0.05)
            plt.axvline(x=suggested_conf, color='green', linestyle=':',
                        label=f'Suggested: {suggested_conf:.2f}')

            plt.title(f'Class {class_label} (n={len(data["confidences"])})')
            plt.xlabel('Confidence Score')
            plt.ylabel('Count')
            plt.legend(loc='upper left', fontsize=8)
            plt.grid(alpha=0.3)

    plt.tight_layout(rect=[0, 0, 1, 0.95])
    plt.show()

    # Create a figure for distance distributions
    plt.figure(figsize=(20, 15))
    plt.suptitle('Distance to Center Distributions by Class', fontsize=20, y=0.95)

    for i, (class_label, data) in enumerate(sorted(class_data.items())):
        plt.subplot(grid_size, grid_size, i + 1)

        if len(data['to_own_center_distances']) > 0:
            # Plot histogram of distances to own center
            plt.hist(data['to_own_center_distances'], bins=30, alpha=0.7, color='blue',
                     label=f'To Own Center')

            # Get current threshold for this class
            if isinstance(class_label, int) and class_label in label_dict:
                model_idx = label_dict[class_label]
                _, dist_thresh = thresholds.get_thresholds(model_idx)
            else:
                dist_thresh = thresholds.default_dist_threshold

            # Add threshold line
            plt.axvline(x=dist_thresh, color='red', linestyle='--',
                        label=f'Threshold: {dist_thresh:.2f}')

            # Add statistics
            mean_dist = np.mean(data['to_own_center_distances'])
            max_dist = np.max(data['to_own_center_distances'])
            q3_dist = np.percentile(data['to_own_center_distances'], 95)

            plt.annotate(f'Mean: {mean_dist:.2f}\nMax: {max_dist:.2f}\n95th %: {q3_dist:.2f}',
                         xy=(0.05, 0.95), xycoords='axes fraction',
                         fontsize=9, bbox=dict(boxstyle='round', facecolor='white', alpha=0.8),
                         verticalalignment='top')

            # Suggested threshold
            suggested_dist = min(5.0, q3_dist + 0.5)
            plt.axvline(x=suggested_dist, color='green', linestyle=':',
                        label=f'Suggested: {suggested_dist:.2f}')

            plt.title(f'Class {class_label} (n={len(data["to_own_center_distances"])})')
            plt.xlabel('Distance to Center')
            plt.ylabel('Count')
            plt.legend(loc='upper right', fontsize=8)
            plt.grid(alpha=0.3)

    plt.tight_layout(rect=[0, 0, 1, 0.95])
    plt.show()

    # Create a figure for comparing distances to own vs. other centers
    plt.figure(figsize=(20, 15))
    plt.suptitle('Distance Comparison: Own vs. Other Centers', fontsize=20, y=0.95)

    for i, (class_label, data) in enumerate(sorted(class_data.items())):
        plt.subplot(grid_size, grid_size, i + 1)

        if len(data['to_own_center_distances']) > 0:
            # Plot own center distances
            plt.hist(data['to_own_center_distances'], bins=30, alpha=0.7, color='blue',
                     label=f'To Own Center')

            # Find most confusing other class (closest centers)
            other_class_mean_dists = {}
            for other_class, dists in data['to_other_centers_distances'].items():
                if other_class != class_label:
                    other_class_mean_dists[other_class] = np.mean(dists)

            if other_class_mean_dists:
                # Sort by mean distance (ascending)
                sorted_classes = sorted(other_class_mean_dists.items(), key=lambda x: x[1])

                # Plot closest other class
                closest_class, _ = sorted_classes[0]
                closest_dists = data['to_other_centers_distances'][closest_class]
                plt.hist(closest_dists, bins=30, alpha=0.5, color='red',
                         label=f'To Class {closest_class}')

                # Add separability measure
                own_mean = np.mean(data['to_own_center_distances'])
                other_mean = np.mean(closest_dists)
                separability = (other_mean - own_mean) / max(own_mean, 0.001)

                plt.annotate(f'Separability from Class {closest_class}: {separability:.2f}',
                             xy=(0.05, 0.85), xycoords='axes fraction',
                             fontsize=9, bbox=dict(boxstyle='round', facecolor='white', alpha=0.8),
                             verticalalignment='top')

                # Indicate if this class needs tuning
                if separability < 0.5:  # This threshold can be adjusted
                    plt.title(f'Class {class_label} ⚠️ Low Separability', color='red')
                else:
                    plt.title(f'Class {class_label}')

                plt.xlabel('Distance')
                plt.ylabel('Count')
                plt.legend(loc='upper right', fontsize=8)
                plt.grid(alpha=0.3)

    plt.tight_layout(rect=[0, 0, 1, 0.95])
    plt.show()

    # Print recommended thresholds
    print("\n📊 Recommended Class-Specific Thresholds:")
    for class_label, data in sorted(class_data.items()):
        if len(data['confidences']) < 5 or len(data['to_own_center_distances']) < 5:
            continue  # Skip classes with too few samples

        conf_5th = np.percentile(data['confidences'], 5)
        dist_95th = np.percentile(data['to_own_center_distances'], 95)

        # Calculate suggested thresholds
        suggested_conf = max(0.3, conf_5th - 0.05)
        suggested_dist = min(5.0, dist_95th + 0.5)

        # Get current thresholds
        if isinstance(class_label, int) and class_label in label_dict:
            model_idx = label_dict[class_label]
            current_conf, current_dist = thresholds.get_thresholds(model_idx)

            # Check if adjustment is needed
            conf_diff = abs(suggested_conf - current_conf)
            dist_diff = abs(suggested_dist - current_dist)

            status = " "
            if conf_diff > 0.1 or dist_diff > 0.5:
                status = "⚠️"

            print(f"{status} Class {class_label}: conf={suggested_conf:.2f} (current: {current_conf:.2f}), " +
                  f"dist={suggested_dist:.2f} (current: {current_dist:.2f})")
        else:
            print(f" Class {class_label}: conf={suggested_conf:.2f}, dist={suggested_dist:.2f}")

    # Generate detailed threshold recommendations for easy copy-paste
    print("\n# Copy-paste ready threshold settings:")
    print("def setup_class_specific_thresholds():")
    print("    thresholds = ClassSpecificThresholds(")
    print(f"        default_conf_threshold={thresholds.default_conf_threshold:.2f},")
    print(f"        default_dist_threshold={thresholds.default_dist_threshold:.2f}")
    print("    )")
    print("")

    for class_label, data in sorted(class_data.items()):
        if len(data['confidences']) < 5 or len(data['to_own_center_distances']) < 5:
            continue  # Skip classes with too few samples

        # Get suggested thresholds
        conf_5th = np.percentile(data['confidences'], 5)
        dist_95th = np.percentile(data['to_own_center_distances'], 95)

        suggested_conf = max(0.3, conf_5th - 0.05)
        suggested_dist = min(5.0, dist_95th + 0.5)

        # Only include if this is a known class
        if isinstance(class_label, int) and class_label in label_dict:
            orig_label = class_label
            print(
                f"    thresholds.set_threshold(class_label={orig_label}, conf_threshold={suggested_conf:.2f}, dist_threshold={suggested_dist:.2f})")

    print("\n    return thresholds")

    return class_data


def generate_threshold_visualizations(model, train_loader, val_loader, test_loader, device, class_centers,
                                      known_classes, thresholds=None):
    """
    Generate comprehensive visualizations for threshold setting and analysis

    Args:
        model: Trained model
        train_loader: DataLoader for training data
        val_loader: DataLoader for validation data
        test_loader: DataLoader for test data
        device: Computing device
        class_centers: Dictionary of class centers
        known_classes: List of known class indices
        thresholds: Optional ClassSpecificThresholds object
    """
    print("\n📊 Generating threshold visualizations...")

    # Set up thresholds if not provided
    if thresholds is None:
        thresholds = setup_class_specific_thresholds()

    # 1. First, analyze class distributions on validation data
    print("\n🔍 Analyzing validation data class distributions...")
    val_class_data = visualize_class_specific_thresholds(
        model, val_loader, device, class_centers, thresholds, known_classes
    )

    # 2. Then, analyze class distributions on test data
    print("\n🔍 Analyzing test data class distributions...")
    test_class_data = visualize_class_specific_thresholds(
        model, test_loader, device, class_centers, thresholds, known_classes
    )

    # 3. Generate the overall confidence vs. distance visualization
    print("\n🔍 Generating confidence vs. distance visualization...")
    confidences, distances, binary_labels = plot_threshold_visualization(
        model, test_loader, device, known_classes, class_centers
    )

    # 4. Generate 2D feature space visualization
    print("\n🔍 Generating 2D feature space visualization...")
    visualize_feature_space(model, train_loader, test_loader, device)

    # 5. Generate distributions of distances for each class
    print("\n🔍 Analyzing feature distributions...")
    train_full_loader = DataLoader(
        train_loader.dataset.dataset if hasattr(train_loader.dataset, 'dataset') else train_loader.dataset,
        batch_size=CFG["batch_size"],
        shuffle=False,
        num_workers=CFG["num_workers"]
    )

    train_distances, test_distances = visualize_feature_distributions(
        model, train_full_loader, test_loader, device, class_centers
    )

    # 6. Finally, print out recommended threshold settings based on all analyses
    print("\n📋 Final Class-Specific Threshold Recommendations:")

    # Get reverse mapping for ease of use
    if hasattr(train_loader.dataset, 'dataset'):
        label_dict = train_loader.dataset.dataset.label_dict
    else:
        label_dict = train_loader.dataset.label_dict

    reverse_dict = {v: k for k, v in label_dict.items()}

    # Create summary of all recommendations
    all_recommendations = {}

    # Process validation data recommendations
    for class_label, data in val_class_data.items():
        if len(data['confidences']) < 5:
            continue

        if class_label not in all_recommendations:
            all_recommendations[class_label] = {}

        conf_5th = np.percentile(data['confidences'], 5)
        dist_95th = np.percentile(data['to_own_center_distances'], 95)

        all_recommendations[class_label]['val_conf'] = max(0.3, conf_5th - 0.05)
        all_recommendations[class_label]['val_dist'] = min(5.0, dist_95th + 0.5)

    # Process test data recommendations
    for class_label, data in test_class_data.items():
        if len(data['confidences']) < 5:
            continue

        if class_label not in all_recommendations:
            all_recommendations[class_label] = {}

        conf_5th = np.percentile(data['confidences'], 5)
        dist_95th = np.percentile(data['to_own_center_distances'], 95)

        all_recommendations[class_label]['test_conf'] = max(0.3, conf_5th - 0.05)
        all_recommendations[class_label]['test_dist'] = min(5.0, dist_95th + 0.5)

    # Print final recommendations
    print("\ndef setup_class_specific_thresholds():")
    print("    thresholds = ClassSpecificThresholds(")
    print(f"        default_conf_threshold={thresholds.default_conf_threshold:.2f},")
    print(f"        default_dist_threshold={thresholds.default_dist_threshold:.2f}")
    print("    )")
    print("")

    for class_label, recs in sorted(all_recommendations.items()):
        if 'val_conf' in recs and 'test_conf' in recs:
            # Average the recommendations from validation and test
            avg_conf = (recs['val_conf'] + recs['test_conf']) / 2
            avg_dist = (recs['val_dist'] + recs['test_dist']) / 2

            # Original label if it's in the mapping
            orig_label = class_label
            if class_label in reverse_dict:
                orig_label = reverse_dict[class_label]

            print(
                f"    # Class {class_label} (original label: {orig_label if orig_label != class_label else class_label})")
            print(
                f"    thresholds.set_threshold(class_label={orig_label}, conf_threshold={avg_conf:.2f}, dist_threshold={avg_dist:.2f})")

    print("\n    return thresholds")

    return all_recommendations

def evaluate_open_set_improved(model, test_loader, device, known_classes, threshold=0.7, class_centers=None, distance_threshold=2.5, thresholds=None, verbose=True):
    """改进的开放集评估，考虑特征空间距离"""
    model.eval()
    all_preds = []
    all_true_labels = []
    all_raw_labels = []
    all_probs = []
    all_distances = []
    all_confidences = []

    # Track per-class metrics
    class_metrics = {}

    # Default thresholds if none provided
    if thresholds is None:
        default_conf = CFG.get("conf_threshold", 0.5)
        default_dist = CFG.get("distance_threshold", 3.0)
        thresholds = ClassSpecificThresholds(default_conf, default_dist)

    # Get label mapping
    label_dict = test_loader.dataset.label_dict
    mapped_known_classes = [label_dict.get(c) for c in known_classes if c in label_dict]
    reverse_dict = {v: k for k, v in label_dict.items()}

    if verbose:
        print(f"Open-set evaluation - Known classes {known_classes} mapped to indices: {mapped_known_classes}")
        print(f"Using class-specific thresholds: {thresholds}")

    # For debugging - track some examples
    debug_samples = []

    # Track threshold effectiveness
    threshold_results = {
        'true_positives': 0,  # Known correctly classified
        'false_positives': 0,  # Unknown classified as known
        'true_negatives': 0,  # Unknown correctly classified
        'false_negatives': 0,  # Known classified as unknown
        'misclassifications': 0  # Known class but wrong prediction
    }

    with torch.no_grad():
        for batch_idx, (signal, features, labels) in enumerate(test_loader):
            signal, features = signal.to(device), features.to(device)
            labels = labels.to(device)

            # Get model outputs
            outputs, _ = model(signal, features)
            batch_features = model.extract_features(signal, features)

            # Calculate probabilities
            probs = F.softmax(outputs, dim=1).cpu().numpy()
            max_probs = np.max(probs, axis=1)
            all_probs.append(probs)
            all_confidences.extend(max_probs)

            # Process each sample
            for i in range(len(outputs)):
                # Get predicted class index (in model space)
                pred_idx = np.argmax(probs[i])

                # Get the true label (in original label space)
                true_label = labels[i].item()
                original_label = reverse_dict.get(true_label, -1)
                all_raw_labels.append(original_label)

                # Extract feature vector for this sample
                feat = batch_features[i].cpu().numpy()

                # Calculate distance to each class center
                min_distance = float('inf')
                closest_class = -1

                # Track distances for debugging
                all_class_distances = {}

                for class_idx in mapped_known_classes:
                    if class_idx in class_centers:
                        center = class_centers[class_idx]
                        # Handle different center formats
                        if isinstance(center, dict) and 'centers' in center:
                            # Multiple centers case with weighted distances
                            centers = center['centers']
                            weights = center.get('weights', np.ones(len(centers)) / len(centers))

                            # Find closest center with weighted distance
                            class_dist = float('inf')
                            for c_idx, (c, w) in enumerate(zip(centers, weights)):
                                # Lower weight = more important center
                                weighted_dist = np.linalg.norm(feat - c) * (1 / w)
                                if weighted_dist < class_dist:
                                    class_dist = weighted_dist
                        else:
                            # Single center case
                            class_dist = np.linalg.norm(feat - center)

                        all_class_distances[class_idx] = class_dist

                        if class_dist < min_distance:
                            min_distance = class_dist
                            closest_class = class_idx

                # Store distance for this sample
                all_distances.append(min_distance)

                # Get confidence and distance thresholds for the closest class
                if closest_class != -1:
                    conf_threshold, dist_threshold = thresholds.get_thresholds(closest_class)
                else:
                    # Use default thresholds if no closest class found
                    conf_threshold, dist_threshold = thresholds.default_conf_threshold, thresholds.default_dist_threshold

                # Apply dual threshold logic with class-specific thresholds
                if max_probs[i] < conf_threshold or min_distance > dist_threshold:
                    final_pred = -1  # Unknown
                else:
                    final_pred = reverse_dict.get(closest_class, -1)  # Use closest class

                # Store predictions and labels
                all_preds.append(final_pred)
                all_true_labels.append(original_label)

                # Track threshold performance
                is_truly_known = original_label in known_classes
                is_predicted_known = final_pred != -1

                if is_truly_known and is_predicted_known:
                    if final_pred == original_label:
                        threshold_results['true_positives'] += 1
                    else:
                        threshold_results['misclassifications'] += 1
                elif is_truly_known and not is_predicted_known:
                    threshold_results['false_negatives'] += 1
                elif not is_truly_known and is_predicted_known:
                    threshold_results['false_positives'] += 1
                elif not is_truly_known and not is_predicted_known:
                    threshold_results['true_negatives'] += 1

                # Update per-class metrics
                if original_label not in class_metrics:
                    class_metrics[original_label] = {
                        'count': 0,
                        'correct': 0,
                        'unknown': 0,  # Falsely classified as unknown
                        'wrong_class': 0,  # Classified as wrong known class
                        'distances': [],
                        'confidences': []
                    }

                class_metrics[original_label]['count'] += 1
                class_metrics[original_label]['distances'].append(min_distance)
                class_metrics[original_label]['confidences'].append(max_probs[i])

                if final_pred == original_label:
                    class_metrics[original_label]['correct'] += 1
                elif final_pred == -1 and original_label != -1:
                    class_metrics[original_label]['unknown'] += 1
                elif final_pred != original_label and final_pred != -1 and original_label != -1:
                    class_metrics[original_label]['wrong_class'] += 1

                # Store some samples for debugging
                if len(debug_samples) < 5 or (final_pred != original_label and len(debug_samples) < 10):
                    debug_samples.append({
                        'index': batch_idx * test_loader.batch_size + i,
                        'true_label': original_label,
                        'pred_label': final_pred,
                        'confidence': max_probs[i],
                        'min_distance': min_distance,
                        'conf_threshold': conf_threshold,
                        'dist_threshold': dist_threshold,
                        'all_distances': {reverse_dict.get(k, k): v for k, v in all_class_distances.items()}
                    })

    if verbose:
        # Print debugging information
        print("\n🔍 Sample Predictions:")
        for i, sample in enumerate(debug_samples):
            print(f"Sample {i + 1}:")
            print(f"  True label: {sample['true_label']}, Predicted: {sample['pred_label']}")
            print(f"  Confidence: {sample['confidence']:.4f} (threshold: {sample['conf_threshold']:.2f})")
            print(f"  Min Distance: {sample['min_distance']:.4f} (threshold: {sample['dist_threshold']:.2f})")
            print(f"  Class distances: " +
                  ", ".join([f"{k}:{v:.2f}" for k, v in
                             sorted(sample['all_distances'].items(), key=lambda x: x[1])[:3]]) +
                  "...")

        # Basic statistics
        print(f"\nConfidence scores - Min: {min(all_confidences):.4f}, " +
              f"Max: {max(all_confidences):.4f}, " +
              f"Mean: {np.mean(all_confidences):.4f}")

        print(f"Distances - Min: {min(all_distances):.4f}, " +
              f"Max: {max(all_distances):.4f}, " +
              f"Mean: {np.mean(all_distances):.4f}")

        # Threshold effectiveness metrics
        print("\n📊 Threshold Effectiveness:")
        total_samples = sum(threshold_results.values())
        print(f"  Known correctly classified: {threshold_results['true_positives']} " +
              f"({100 * threshold_results['true_positives'] / total_samples:.1f}%)")
        print(f"  Known classified as unknown: {threshold_results['false_negatives']} " +
              f"({100 * threshold_results['false_negatives'] / total_samples:.1f}%)")
        print(f"  Known but wrong class: {threshold_results['misclassifications']} " +
              f"({100 * threshold_results['misclassifications'] / total_samples:.1f}%)")
        print(f"  Unknown classified as known: {threshold_results['false_positives']} " +
              f"({100 * threshold_results['false_positives'] / total_samples:.1f}%)")
        print(f"  Unknown correctly identified: {threshold_results['true_negatives']} " +
              f"({100 * threshold_results['true_negatives'] / total_samples:.1f}%)")

        # Per-class performance
        print("\n📊 Per-class Performance:")
        for label, metrics in sorted(class_metrics.items()):
            if metrics['count'] > 0:
                accuracy = 100 * metrics['correct'] / metrics['count']
                unknown_rate = 100 * metrics['unknown'] / metrics['count'] if metrics['unknown'] > 0 else 0
                wrong_rate = 100 * metrics['wrong_class'] / metrics['count'] if metrics['wrong_class'] > 0 else 0

                status = "✅" if accuracy > 90 else "⚠️" if accuracy > 70 else "❌"

                print(f"{status} Class {label}: {metrics['correct']}/{metrics['count']} correct ({accuracy:.1f}%)")

                if metrics['unknown'] > 0:
                    print(f"      Falsely unknown: {metrics['unknown']} ({unknown_rate:.1f}%)")

                if metrics['wrong_class'] > 0:
                    print(f"      Wrong class: {metrics['wrong_class']} ({wrong_rate:.1f}%)")

                if len(metrics['distances']) > 0 and len(metrics['confidences']) > 0:
                    print(f"      Avg distance: {np.mean(metrics['distances']):.2f}, " +
                          f"Avg confidence: {np.mean(metrics['confidences']):.2f}")

                # Suggest threshold tuning if needed
                if unknown_rate > 10 or wrong_rate > 10:
                    if unknown_rate > wrong_rate:
                        # Too many falsely classified as unknown - adjust thresholds
                        suggest_conf = max(0.3, np.percentile(metrics['confidences'], 5) - 0.05)
                        suggest_dist = np.percentile(metrics['distances'], 95) + 0.5
                        print(f"      Suggested thresholds: conf={suggest_conf:.2f}, dist={suggest_dist:.2f}")
                    else:
                        # Too many misclassifications - may need better features or model
                        print(f"      Consider model improvements for this class")

        # Generate classification report
        target_names = ["Unknown"] + [str(c) for c in known_classes]
        print("\n📊 Improved open-set recognition evaluation:")
        report = classification_report(
            all_true_labels, all_preds,
            labels=[-1] + known_classes,
            target_names=target_names,
            zero_division=0
        )
        print(report)

        # Plot confusion matrix
        plot_confusion_matrix(all_true_labels, all_preds, [-1] + known_classes, target_names)

    # Combine probabilities from batches
    all_probs_combined = np.concatenate(all_probs, axis=0) if all_probs else np.array([])

    return all_preds, all_true_labels, all_probs_combined

def plot_confusion_matrix(y_true, y_pred, class_labels, target_names=None):
    """
    绘制并显示混淆矩阵

    参数:
    - y_true: 真实标签列表
    - y_pred: 预测标签列表
    - class_labels: 类别标签列表，包括未知类(-1)
    - target_names: 类别名称列表，用于显示 (可选)
    """
    cm = confusion_matrix(y_true, y_pred, labels=class_labels)

    # Calculate normalized confusion matrix
    with np.errstate(divide='ignore', invalid='ignore'):  # Handle div by zero
        cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
        cm_normalized = np.nan_to_num(cm_normalized)  # Replace NaN with 0

    # Identify problematic classes
    diag = np.diag(cm_normalized)
    problem_classes = [(i, class_labels[i]) for i, acc in enumerate(diag) if acc < 0.9]

    # Create two confusion matrices: original counts and normalized
    fig, axes = plt.subplots(1, 2, figsize=(20, 10))

    # Plot counts matrix with improved coloring
    counts_cmap = plt.cm.Blues
    counts_cmap.set_under('white')  # Set color for zero values

    sns.heatmap(
        cm, annot=True, fmt='d', cmap=counts_cmap, ax=axes[0],
        cbar=True, square=True, linewidths=.5, linecolor='gray',
        vmin=0.1  # Set minimum value to use the under color for 0s
    )

    axes[0].set_xlabel('Predicted labels', fontsize=12, weight='bold')
    axes[0].set_ylabel('True labels', fontsize=12, weight='bold')
    axes[0].set_title('Confusion Matrix (counts)', fontsize=14, weight='bold')

    if target_names:
        axes[0].set_xticklabels(target_names, rotation=45, ha='right')
        axes[0].set_yticklabels(target_names, rotation=45, ha='right')
    else:
        axes[0].set_xticklabels(class_labels, rotation=45, ha='right')
        axes[0].set_yticklabels(class_labels, rotation=45, ha='right')

    # Highlight the diagonal for easier reading
    for i in range(len(cm)):
        axes[0].add_patch(plt.Rectangle((i, i), 1, 1, fill=False, edgecolor='darkblue', lw=2))

    # Plot normalized matrix with improved coloring
    norm_cmap = plt.cm.Greens
    norm_cmap.set_under('white')

    sns.heatmap(
        cm_normalized, annot=True, fmt='.2f', cmap=norm_cmap, ax=axes[1],
        cbar=True, square=True, linewidths=.5, linecolor='gray',
        vmin=0.001  # Set minimum value for coloring
    )

    axes[1].set_xlabel('Predicted labels', fontsize=12, weight='bold')
    axes[1].set_ylabel('True labels', fontsize=12, weight='bold')
    axes[1].set_title('Confusion Matrix (normalized)', fontsize=14, weight='bold')

    if target_names:
        axes[1].set_xticklabels(target_names, rotation=45, ha='right')
        axes[1].set_yticklabels(target_names, rotation=45, ha='right')
    else:
        axes[1].set_xticklabels(class_labels, rotation=45, ha='right')
        axes[1].set_yticklabels(class_labels, rotation=45, ha='right')

    # Highlight the diagonal for easier reading
    for i in range(len(cm_normalized)):
        axes[1].add_patch(plt.Rectangle((i, i), 1, 1, fill=False, edgecolor='darkgreen', lw=2))

    # Highlight problematic cells with low accuracy
    for i in range(len(cm_normalized)):
        for j in range(len(cm_normalized)):
            if i != j and cm_normalized[i, j] > 0.1:  # Significant misclassification
                axes[1].add_patch(plt.Rectangle((j, i), 1, 1, fill=False, edgecolor='red', lw=2))

                # Add text annotation about the error
                true_label = target_names[i] if target_names else class_labels[i]
                pred_label = target_names[j] if target_names else class_labels[j]
                error_count = cm[i, j]
                total_count = cm[i].sum()
                error_pct = 100 * cm_normalized[i, j]

                if error_pct > 20:  # Only annotate significant errors
                    axes[0].text(
                        j + 0.5, i + 0.85, f"⚠️",
                        color='red', ha='center', va='center', fontsize=10
                    )

    plt.tight_layout()
    plt.show()

    # Print detailed classification performance metrics
    print("\n🔍 Classification Performance Metrics:")
    print(f"Overall Accuracy: {accuracy_score(y_true, y_pred):.4f}")

    # Calculate per-class metrics
    f1_scores = f1_score(y_true, y_pred, labels=class_labels, average=None, zero_division=0)
    precision_scores = precision_score(y_true, y_pred, labels=class_labels, average=None, zero_division=0)
    recall_scores = recall_score(y_true, y_pred, labels=class_labels, average=None, zero_division=0)

    # Create a metrics table
    metrics_table = []
    for i, label in enumerate(class_labels):
        class_name = target_names[i] if target_names else f"Class {label}"
        metrics_table.append({
            'Class': class_name,
            'Precision': precision_scores[i],
            'Recall': recall_scores[i],
            'F1 Score': f1_scores[i],
            'Support': np.sum(np.array(y_true) == label)
        })

    # Print table of metrics
    print("\n📊 Per-class Performance Metrics:")
    print(f"{'Class':<15} {'Precision':>10} {'Recall':>10} {'F1 Score':>10} {'Support':>10}")
    print("-" * 60)
    for row in metrics_table:
        print(
            f"{row['Class']:<15} {row['Precision']:>10.4f} {row['Recall']:>10.4f} {row['F1 Score']:>10.4f} {row['Support']:>10}")

    # Print aggregate metrics
    print("\nAggregate Metrics:")
    print(f"Macro F1: {f1_score(y_true, y_pred, average='macro', zero_division=0):.4f}")
    print(f"Weighted F1: {f1_score(y_true, y_pred, average='weighted', zero_division=0):.4f}")

    # Highlight problematic classes
    if problem_classes:
        print("\n⚠️ Problematic Classes (accuracy < 90%):")
        for idx, label in problem_classes:
            true_positives = cm[idx, idx]
            total = np.sum(cm[idx])
            class_name = target_names[idx] if target_names else f"Class {label}"

            # Find where this class is being misclassified
            misclassified_as = []
            for j, count in enumerate(cm[idx]):
                if j != idx and count > 0:
                    misclassified_as.append((
                        target_names[j] if target_names else f"Class {class_labels[j]}",
                        count,
                        100 * count / total
                    ))

            print(f"  {class_name}: {true_positives}/{total} correct ({100 * cm_normalized[idx, idx]:.2f}%)")
            if misclassified_as:
                print(f"     Misclassified as: ", end="")
                print(", ".join([f"{name}: {count} ({pct:.1f}%)" for name, count, pct in misclassified_as]))

    # Print confusion patterns
    print("\n🔍 Confusion Patterns:")

    # Find the top misclassification patterns
    misclass_patterns = []
    for i in range(len(cm)):
        for j in range(len(cm)):
            if i != j and cm[i, j] > 0:
                true_class = target_names[i] if target_names else f"Class {class_labels[i]}"
                pred_class = target_names[j] if target_names else f"Class {class_labels[j]}"
                misclass_patterns.append((true_class, pred_class, cm[i, j], cm_normalized[i, j]))

    # Sort by count
    misclass_patterns.sort(key=lambda x: x[2], reverse=True)

    # Print top patterns
    if misclass_patterns:
        print("Top misclassification patterns:")
        for true_class, pred_class, count, pct in misclass_patterns[:5]:
            print(f"  {true_class} → {pred_class}: {count} instances ({pct * 100:.1f}%)")
    else:
        print("No confusion found - perfect classification!")

    return cm, cm_normalized

def extract_features(signal):
    features = np.zeros(len(FEATURE_NAMES))

    # Time domain features (enhanced calculations for robustness)
    features[0] = np.mean(signal)  # Mean
    features[1] = np.std(signal)  # Std
    features[2] = np.median(signal)  # Median
    features[3] = np.percentile(signal, 95) - np.percentile(signal, 5)  # Modified range to reduce outlier influence
    features[4] = np.sqrt(np.mean(np.square(signal)))  # RMS
    features[5] = stats.skew(signal)  # Skewness
    features[6] = stats.kurtosis(signal)  # Kurtosis

    # Frequency domain features with improved bands
    fft_values = np.abs(np.fft.rfft(signal))
    freqs = np.fft.rfftfreq(len(signal))

    windowed_signal = signal * np.hamming(len(signal))
    fft_windowed = np.abs(np.fft.rfft(windowed_signal))
    fft_values = fft_windowed

    # Main Freq - using center of mass for more stability
    total_power = np.sum(fft_values)
    if total_power > 0:
        features[7] = np.sum(freqs * fft_values) / np.sum(fft_values)
    else:
        features[7] = 0

    # Enhanced frequency band energy normalization
    total_energy = np.sum(fft_values)
    freq_bands = [(0, 0.1), (0.1, 0.2), (0.2, 0.3), (0.3, 0.4)]
    for i, (low, high) in enumerate(freq_bands):
        mask = (freqs >= low) & (freqs < high)
        if total_energy > 0:
            features[8 + i] = np.sum(fft_values[mask]) / total_energy  # Normalized energy
        else:
            features[8 + i] = 0

    # Improved wavelet features using multiple wavelet families
    # Try db4 first
    try:
        coeffs = pywt.wavedec(signal, 'db4', level=2)
        cA, cD = coeffs[0], coeffs[1]
    except:
        # Fallback to simpler wavelet if db4 fails
        try:
            coeffs = pywt.wavedec(signal, 'haar', level=2)
            cA, cD = coeffs[0], coeffs[1]
        except:
            # Last resort - create dummy coefficients
            cA = np.zeros(len(signal) // 4)
            cD = np.zeros(len(signal) // 4)

    features[12] = np.std(cA)  # Wavelet cA Std
    features[13] = np.mean(np.abs(cD))  # Modified to absolute mean for stability
    features[14] = np.percentile(cD, 95) - np.percentile(cD, 5)  # More robust range
    features[15] = np.percentile(cD, 75) - np.percentile(cD, 25)  # Wavelet cD IQR

    # Hjorth parameters with improved calculation
    features[16] = np.var(signal)  # Activity
    diff1 = np.diff(signal)
    if np.std(signal) > 0:
        features[17] = np.std(diff1) / np.std(signal)  # Complexity
    else:
        features[17] = 0

    # Additional frequency bands with normalization
    additional_freq_bands = [(0.4, 0.5), (0.5, 0.6), (0.6, 0.8), (0.8, 1.0)]
    for i, (low, high) in enumerate(additional_freq_bands):
        mask = (freqs >= low) & (freqs < high)
        if total_energy > 0:
            features[18 + i] = np.sum(fft_values[mask]) / total_energy
        else:
            features[18 + i] = 0

    # Entropy features with more robust computation
    r = 0.25 * np.std(signal) if np.std(signal) > 0 else 0.25 # Similarity tolerance
    try:
        features[22] = sample_entropy(signal, m=2, r=r)
    except:
        features[22] = 0

    try:
        features[23] = approximation_entropy(signal, m=2, r=r)
    except:
        features[23] = 0

    # Additional time domain statistics
    features[24] = np.percentile(signal, 25)  # Q1
    features[25] = np.percentile(signal, 75)  # Q3

    return features

def sample_entropy(signal, m=2, r=0.2):
    """计算样本熵，表示时间序列的复杂度和不可预测性"""
    try:
        def _maxdist(x_i, x_j):
            return max([abs(ua - va) for ua, va in zip(x_i, x_j)])

        def _phi(m):
            x = [[signal[j] for j in range(i, i + m)] for i in range(N - m + 1)]
            C = []
            for i in range(len(x)):
                C.append(sum([_maxdist(x[i], x[j]) <= r for j in range(len(x))]) / (N - m + 1.0))
            return sum(C) / len(x)

        N = len(signal)
        return -np.log(_phi(m+1) / _phi(m))
    except Exception as e:
        print(f"Error in sample_entropy: {e}")
        return 0.0

def approximation_entropy(signal, m=2, r=0.2):
    """计算近似熵"""
    def _maxdist(x_i, x_j):
        return max([abs(ua - va) for ua, va in zip(x_i, x_j)])

    def _phi(m):
        x = [[signal[j] for j in range(i, i + m)] for i in range(N - m + 1)]
        C = []
        for i in range(len(x)):
            C.append(sum([_maxdist(x[i], x[j]) <= r for j in range(len(x))]) / (N - m + 1.0))
        return sum(np.log(C)) / len(x)

    N = len(signal)
    return abs(_phi(m) - _phi(m+1))

def plot_signal_examples(dataset, num_samples=3):
    """可视化原始信号样本"""
    plt.figure(figsize=(15, 10))

    for i in range(min(num_samples, len(dataset))):
        signal, _, label = dataset[i]
        plt.subplot(num_samples, 1, i + 1)
        plt.plot(signal.numpy())
        plt.title(f"Signal Example - Class {label}")
        plt.grid(True)

    plt.tight_layout()
    plt.show()


def save_experiment(model, results, config, path, class_centers=None):  # 添加 class_centers 参数
    """保存实验结果和模型"""
    os.makedirs(os.path.dirname(path), exist_ok=True)

    print(f"Saving class centers: {class_centers}")
    # 保存模型时包含类中心信息
    torch.save({
        'model_state_dict': model.state_dict(),
        'config': config,
        'results': results,
        'class_centers': class_centers  # ✅ 新增类中心存储
    }, path)
    print(f"✅ 模型和类中心已保存到 {path}")

def load_experiment(path, model=None):
    """加载已保存的实验"""
    checkpoint = torch.load(path)

    if model is None:
        # 根据配置重建模型
        model = DualBranchModel(
            signal_input_dim=checkpoint['config']['signal_length'],
            feat_input_dim=checkpoint['config']['input_dim'],
            hidden_dim=checkpoint['config']['hidden_dim'],
            num_classes=checkpoint['config']['num_classes']
        )

    model.load_state_dict(checkpoint['model_state_dict'])
    class_centers = checkpoint.get('class_centers', None)

    return model, checkpoint['results'], class_centers

# Then modify the calculate_class_centers function to use this helper
def calculate_class_centers(model, data_loader, device, known_classes, n_clusters=3):
    """计算每个已知类的特征中心 - 改进版本，避免内存泄漏"""
    model.eval()
    class_features = {c: [] for c in known_classes}

    print(f"Calculating centers for classes: {known_classes}")

    # Extract features
    count = 0
    with torch.no_grad():
        for signal, features, labels in data_loader:
            signal, features = signal.to(device), features.to(device)
            batch_features = model.extract_features(signal, features).cpu().numpy()

            for i, label in enumerate(labels.cpu().numpy()):
                if label in known_classes:
                    class_features[label].append(batch_features[i])
                    count += 1

    print(f"Collected {count} feature vectors across {len(known_classes)} classes")

    # Create class centers with improved outlier handling
    class_centers = {}
    for c, feats in class_features.items():
        print(f"Class {c}: {len(feats)} samples")
        if len(feats) >= n_clusters * 2:  # Ensure enough samples for clustering
            feats_array = np.array(feats)

            # Apply outlier detection before clustering
            # Use uniform contamination parameter for all classes
            contamination = 0.08
            clean_feats, outliers = detect_and_remove_outliers(feats_array, contamination=contamination)
            print(f"  Removed {len(outliers)} outliers ({len(outliers) / len(feats_array) * 100:.1f}%)")

            if len(clean_feats) >= n_clusters * 2:
                # Use K-means with multiple initializations for more stable results
                n_actual_clusters = min(n_clusters, len(clean_feats) // 2)
                kmeans = safe_kmeans_clustering(clean_feats, n_clusters=n_actual_clusters, n_init=40)

                # Calculate cluster qualities for weighting
                cluster_qualities = calculate_cluster_qualities(clean_feats, kmeans)

                # Store centers and their quality-based weights
                class_centers[c] = {
                    'centers': kmeans.cluster_centers_,
                    'weights': cluster_qualities
                }
                print(f"  Created {len(kmeans.cluster_centers_)} centers with quality-based weights")

                # Print cluster sizes for diagnostics
                cluster_sizes = np.bincount(kmeans.labels_)
                print(f"  Cluster sizes: {cluster_sizes}")
            else:
                # Not enough samples after outlier removal, use median
                class_centers[c] = np.median(clean_feats, axis=0)
                print(f"  Created single center (median of {len(clean_feats)} clean samples)")
        elif len(feats) > 0:
            # Not enough for clustering, use robust mean (median)
            class_centers[c] = np.median(np.array(feats), axis=0)
            print(f"  Created single center (median of {len(feats)} samples)")
        else:
            print(f"  WARNING: No samples for class {c}")

    return class_centers


def calculate_cluster_qualities(features, kmeans):
    """
    Calculate quality scores for each cluster to use as weights
    Higher quality clusters get higher weights in distance calculations

    Args:
        features: Feature vectors
        kmeans: Fitted KMeans model

    Returns:
        Normalized quality scores for each cluster
    """
    from sklearn.metrics import silhouette_samples

    if len(np.unique(kmeans.labels_)) <= 1:
        # Only one cluster, return default weight
        return np.array([1.0])

    # Calculate silhouette scores for each sample
    silhouette_vals = silhouette_samples(features, kmeans.labels_)

    # Calculate average silhouette score for each cluster
    cluster_silhouettes = []
    for i in range(len(kmeans.cluster_centers_)):
        cluster_mask = kmeans.labels_ == i
        if np.sum(cluster_mask) > 0:
            avg_silhouette = np.mean(silhouette_vals[cluster_mask])
            cluster_silhouettes.append(max(0.1, avg_silhouette))  # Ensure minimum weight
        else:
            cluster_silhouettes.append(0.1)  # Minimum weight for empty clusters

    # Normalize weights to sum to 1
    weights = np.array(cluster_silhouettes)
    weights = weights / np.sum(weights)

    return weights

def detect_and_remove_outliers(features, contamination=0.1):
    """
    Detect and remove outliers in feature vectors

    Args:
        features: numpy array of feature vectors
        contamination: Expected proportion of outliers

    Returns:
        clean_features: Features with outliers removed
        outliers: The detected outlier features
    """
    from sklearn.ensemble import IsolationForest

    # Use IsolationForest for outlier detection
    if len(features) > 10:  # Need enough samples
        isolation_forest = IsolationForest(
            contamination=contamination,
            random_state=42,
            n_estimators=100,
            max_samples='auto'
        )

        outlier_labels = isolation_forest.fit_predict(features)
        clean_mask = outlier_labels == 1  # 1 for inliers, -1 for outliers

        clean_features = features[clean_mask]
        outliers = features[~clean_mask]

        # If too many outliers detected, limit to worst 10%
        if len(clean_features) < 0.5 * len(features):
            print(f"  Too many outliers detected, limiting to worst {contamination * 100}%")
            # Use Mahalanobis distance instead
            clean_features, outliers = remove_outliers_mahalanobis(features, contamination)
    else:
        # Too few samples, skip outlier detection
        clean_features = features
        outliers = np.array([])

    return clean_features, outliers


def remove_outliers_mahalanobis(features, contamination=0.1):
    """Remove outliers using Mahalanobis distance"""
    from scipy.stats import chi2

    # Calculate mean and covariance
    mean = np.mean(features, axis=0)
    cov = np.cov(features, rowvar=False)

    # Add regularization to covariance matrix to ensure it's invertible
    cov = cov + np.eye(cov.shape[0]) * 1e-6

    # Calculate Mahalanobis distances
    inv_cov = np.linalg.inv(cov)
    distances = []

    for sample in features:
        diff = sample - mean
        dist = np.sqrt(diff.dot(inv_cov).dot(diff))
        distances.append(dist)

    # Sort distances and determine threshold
    sorted_distances = np.sort(distances)
    threshold_idx = int(len(features) * (1 - contamination))
    threshold = sorted_distances[threshold_idx]

    # Split into clean and outlier features
    clean_mask = np.array(distances) <= threshold
    clean_features = features[clean_mask]
    outliers = features[~clean_mask]

    return clean_features, outliers

def safe_kmeans_clustering(features_array, n_clusters=3, random_state=42, n_init=10):
    """
    Safely perform KMeans clustering, handling environment variables for Windows.
    This prevents the known memory leak in scikit-learn's KMeans with MKL on Windows.

    Args:
        features_array: numpy array of features to cluster
        n_clusters: number of clusters to form
        random_state: random seed for reproducibility
        n_init: number of initializations to try

    Returns:
        kmeans: fitted KMeans model
    """

    # Store original environment value
    original_omp_threads = os.environ.get("OMP_NUM_THREADS")

    try:
        # Always set to '2' for Windows
        if os.name == 'nt':
            os.environ["OMP_NUM_THREADS"] = "2"

        # Create and fit KMeans model
        kmeans = KMeans(
            n_clusters=n_clusters,
            random_state=random_state,
            n_init=n_init
        )
        kmeans.fit(features_array)
        return kmeans

    finally:
        # Restore original environment variable value
        if original_omp_threads is not None:
            os.environ["OMP_NUM_THREADS"] = original_omp_threads
        elif "OMP_NUM_THREADS" in os.environ:
            # If it wasn't set before, remove it
            del os.environ["OMP_NUM_THREADS"]

    return None  # Should never reach here if all goes well


def plot_threshold_visualization(model, test_loader, device, known_classes, class_centers):
    """
    可视化置信度阈值和距离阈值对开放集识别的影响
    - 横轴: 最大概率值 (confidence)
    - 纵轴: 最小距离 (distance to class center)
    """
    model.eval()
    all_confidences = []
    all_distances = []
    all_labels = []
    original_labels = []

    # Get label mappings
    label_dict = test_loader.dataset.label_dict
    reverse_dict = {v: k for k, v in label_dict.items()}

    with torch.no_grad():
        for signal, features, labels in test_loader:
            signal, features = signal.to(device), features.to(device)

            outputs, _ = model(signal, features)
            batch_features = model.extract_features(signal, features)

            probs = F.softmax(outputs, dim=1).cpu().numpy()
            max_confidences = np.max(probs, axis=1)
            pred_indices = np.argmax(probs, axis=1)

            # Calculate distances to class centers
            batch_distances = []
            for i, feat in enumerate(batch_features.cpu().numpy()):
                min_distance = float('inf')
                for class_idx in class_centers:
                    # Handle different center formats
                    if isinstance(class_centers[class_idx], dict) and 'centers' in class_centers[class_idx]:
                        # Multiple centers case
                        centers = class_centers[class_idx]['centers']
                        class_dist = min(np.linalg.norm(feat - c) for c in centers)
                    else:
                        # Single center case
                        class_dist = np.linalg.norm(feat - class_centers[class_idx])

                    min_distance = min(min_distance, class_dist)

                batch_distances.append(min_distance)

            # Collect data
            batch_labels = labels.cpu().numpy()
            for i, label in enumerate(batch_labels):
                is_known = int(label) in known_classes  # Ensure label is int for comparison

                all_confidences.append(max_confidences[i])
                all_distances.append(batch_distances[i])
                all_labels.append(is_known)
                original_labels.append(int(label))  # Convert tensor to int for storage

    # Convert to numpy arrays
    all_confidences = np.array(all_confidences)
    all_distances = np.array(all_distances)
    all_labels = np.array(all_labels)
    original_labels = np.array(original_labels)

    # Get current thresholds from CFG
    conf_threshold = CFG["conf_threshold"]
    dist_threshold = CFG["distance_threshold"]

    # Create main visualization with improved aesthetics
    plt.figure(figsize=(12, 10))

    # Set color scheme using better contrast
    known_color = '#1f77b4'  # Blue
    unknown_color = '#d62728'  # Red

    # Add grid for better readability
    plt.grid(True, linestyle='--', alpha=0.7)

    # Create scatter plot with larger markers and improved transparency
    plt.scatter(
        all_confidences[all_labels], all_distances[all_labels],
        c=known_color, marker='o', alpha=0.7, s=60, label='Known Classes'
    )
    plt.scatter(
        all_confidences[~all_labels], all_distances[~all_labels],
        c=unknown_color, marker='x', alpha=0.7, s=60, label='Unknown Classes'
    )

    # Draw thresholds with improved styling
    plt.axvline(x=conf_threshold, color='black', linestyle='--', linewidth=2,
                label=f'Confidence Threshold = {conf_threshold:.2f}')
    plt.axhline(y=dist_threshold, color='green', linestyle='--', linewidth=2,
                label=f'Distance Threshold = {dist_threshold:.2f}')

    # Add shaded regions for better visualization of decision areas
    if conf_threshold > 0 and dist_threshold > 0:
        # Known region (high conf, low dist)
        plt.fill_between(
            [conf_threshold, 1.05], [0, 0], [dist_threshold, dist_threshold],
            color='lightgreen', alpha=0.2
        )

        # Unknown regions
        plt.fill_between(
            [0, conf_threshold], [0, 0], [dist_threshold, dist_threshold],
            color='lightsalmon', alpha=0.2
        )
        plt.fill_between(
            [0, 1.05], [dist_threshold, dist_threshold], [plt.ylim()[1] * 1.1, plt.ylim()[1] * 1.1],
            color='lightsalmon', alpha=0.2
        )

    # Region labels with improved formatting
    text_props = {
        'ha': 'center',
        'va': 'center',
        'bbox': dict(boxstyle='round,pad=0.5', facecolor='white', alpha=0.8, edgecolor='gray'),
        'fontsize': 10,
        'weight': 'bold'
    }

    plt.text(
        (1 + conf_threshold) / 2, (dist_threshold / 2),
        "KNOWN\n(High Conf, Low Dist)",
        **text_props, color='darkgreen'
    )
    plt.text(
        conf_threshold / 2, (dist_threshold / 2),
        "UNKNOWN\n(Low Conf, Low Dist)",
        **text_props, color='darkred'
    )
    plt.text(
        (1 + conf_threshold) / 2, dist_threshold + (plt.ylim()[1] - dist_threshold) / 2,
        "UNKNOWN\n(High Conf, High Dist)",
        **text_props, color='darkred'
    )
    plt.text(
        conf_threshold / 2, dist_threshold + (plt.ylim()[1] - dist_threshold) / 2,
        "UNKNOWN\n(Low Conf, High Dist)",
        **text_props, color='darkred'
    )

    # Improved chart styling
    plt.xlabel('Maximum Confidence Score', fontsize=12, weight='bold')
    plt.ylabel('Minimum Distance to Class Centers', fontsize=12, weight='bold')
    plt.title('Dual-Threshold Open-Set Recognition', fontsize=16, weight='bold')
    plt.legend(loc='upper right', fontsize=10, framealpha=0.9)

    plt.xlim(0, 1.05)
    y_max = min(max(all_distances) * 1.1, dist_threshold * 2)  # Reasonable y limit
    plt.ylim(0, y_max)

    # Add annotations for data density
    known_count = np.sum(all_labels)
    unknown_count = len(all_labels) - known_count
    plt.annotate(
        f"Known samples: {known_count}\nUnknown samples: {unknown_count}",
        xy=(0.02, 0.98), xycoords='axes fraction',
        bbox=dict(boxstyle="round,pad=0.3", fc="white", ec="gray", alpha=0.8),
        ha='left', va='top', fontsize=10
    )

    # Class-specific statistics in the corner
    plt.tight_layout()
    plt.show()

    # Class-specific analysis
    print("\n🔍 Class-specific threshold analysis:")
    unique_classes = np.unique(original_labels)
    class_stats = {}

    # Group data by class for better statistics
    for c in unique_classes:
        if c == -1:  # Skip unknown class
            continue

        # Find samples of this class
        class_mask = original_labels == c
        class_conf = all_confidences[class_mask]
        class_dist = all_distances[class_mask]

        if len(class_conf) > 0:
            # Calculate statistics
            conf_mean = np.mean(class_conf)
            conf_std = np.std(class_conf)
            conf_min = np.min(class_conf)
            conf_25 = np.percentile(class_conf, 25)  # 25th percentile

            dist_mean = np.mean(class_dist)
            dist_std = np.std(class_dist)
            dist_max = np.max(class_dist)
            dist_75 = np.percentile(class_dist, 75)  # 75th percentile

            # Store stats
            class_stats[c] = {
                'samples': len(class_conf),
                'conf_mean': conf_mean,
                'conf_std': conf_std,
                'conf_min': conf_min,
                'conf_25': conf_25,
                'dist_mean': dist_mean,
                'dist_std': dist_std,
                'dist_max': dist_max,
                'dist_75': dist_75,
                'suggested_conf': max(0.3, conf_min - 0.1),  # More aggressive
                'suggested_dist': min(5.0, dist_max + 0.5)
            }

            # Print analysis
            orig_label = reverse_dict.get(c, c)
            print(f"Class {orig_label} ({len(class_conf)} samples):")
            print(f"  Confidence: mean={conf_mean:.3f}, std={conf_std:.3f}, min={conf_min:.3f}, 25th={conf_25:.3f}")
            print(f"  Distance: mean={dist_mean:.3f}, std={dist_std:.3f}, max={dist_max:.3f}, 75th={dist_75:.3f}")
            print(f"  Suggested thresholds: conf={class_stats[c]['suggested_conf']:.3f}, "
                  f"dist={class_stats[c]['suggested_dist']:.3f}")

    # Find problematic classes and recommend thresholds
    print("\nRecommended class-specific thresholds:")
    problematic_classes = [11, 21, 30, 31, 40]  # Adjust based on confusion matrix analysis

    for c in problematic_classes:
        if c in reverse_dict.values():
            # Find the mapped index
            mapped_c = next((k for k, v in reverse_dict.items() if v == c), None)
            if mapped_c is not None and mapped_c in class_stats:
                stats = class_stats[mapped_c]
                print(f"Class {c}: conf_threshold={stats['suggested_conf']:.2f}, "
                      f"dist_threshold={stats['suggested_dist']:.2f}")

    # Create a second visualization showing class distribution
    plt.figure(figsize=(14, 8))

    # Create a colormap for the classes
    unique_known_classes = [c for c in unique_classes if c != -1]
    cmap = plt.cm.get_cmap('tab20', len(unique_known_classes))

    # Plot known classes with different colors
    for i, class_label in enumerate(unique_known_classes):
        mask = original_labels == class_label
        label_name = f"Class {reverse_dict.get(class_label, class_label)}"
        plt.scatter(
            all_confidences[mask], all_distances[mask],
            c=[cmap(i)], marker='o', alpha=0.7, s=50, label=label_name
        )

    # Plot unknown classes
    mask = original_labels == -1
    plt.scatter(
        all_confidences[mask], all_distances[mask],
        c='red', marker='x', alpha=0.7, s=50, label="Unknown"
    )

    # Draw thresholds
    plt.axvline(x=conf_threshold, color='black', linestyle='--', linewidth=2,
                label=f'Confidence Threshold = {conf_threshold:.2f}')
    plt.axhline(y=dist_threshold, color='green', linestyle='--', linewidth=2,
                label=f'Distance Threshold = {dist_threshold:.2f}')

    plt.xlabel('Maximum Confidence Score', fontsize=12, weight='bold')
    plt.ylabel('Minimum Distance to Class Centers', fontsize=12, weight='bold')
    plt.title('Class Distribution in Feature Space', fontsize=16, weight='bold')
    plt.grid(True, linestyle='--', alpha=0.7)

    # Add legend in two columns for better readability
    plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', ncol=2)

    plt.xlim(0, 1.05)
    plt.ylim(0, y_max)
    plt.tight_layout()
    plt.show()

    return all_confidences, all_distances, all_labels


def visualize_feature_space(model, train_loader, test_loader, device):
    """Visualize features in 2D using t-SNE to understand data distribution"""
    from sklearn.manifold import TSNE
    import matplotlib.pyplot as plt

    # Extract features
    train_features = []
    train_labels = []
    test_features = []
    test_labels = []

    model.eval()
    with torch.no_grad():
        # Process training data
        for signal, features, labels in train_loader:
            signal, features = signal.to(device), features.to(device)
            batch_features = model.extract_features(signal, features).cpu().numpy()

            train_features.append(batch_features)
            train_labels.append(labels.cpu().numpy())

        # Process test data
        for signal, features, labels in test_loader:
            signal, features = signal.to(device), features.to(device)
            batch_features = model.extract_features(signal, features).cpu().numpy()

            test_features.append(batch_features)
            test_labels.append(labels.cpu().numpy())

    train_features = np.vstack(train_features)
    train_labels = np.concatenate(train_labels)
    test_features = np.vstack(test_features)
    test_labels = np.concatenate(test_labels)

    # Apply t-SNE for visualization
    combined_features = np.vstack([train_features, test_features])

    print("Applying t-SNE...")
    tsne = TSNE(n_components=2, random_state=42)
    combined_tsne = tsne.fit_transform(combined_features)

    train_tsne = combined_tsne[:len(train_features)]
    test_tsne = combined_tsne[len(train_features):]

    # Plot
    plt.figure(figsize=(12, 10))

    # Plot training data
    for label in np.unique(train_labels):
        if label == -1:
            continue  # Skip unknown class
        idx = train_labels == label
        plt.scatter(train_tsne[idx, 0], train_tsne[idx, 1], marker='o', alpha=0.7,
                    label=f"Train Class {label}")

    # Plot test data
    for label in np.unique(test_labels):
        if label == -1:
            continue  # Skip unknown class
        idx = test_labels == label
        plt.scatter(test_tsne[idx, 0], test_tsne[idx, 1], marker='x', alpha=0.7,
                    label=f"Test Class {label}")

    plt.legend()
    plt.title("t-SNE visualization of feature space")
    plt.show()


def triplet_loss_for_problem_classes(embeddings, labels, margin=1.0):
    unique_labels = torch.unique(labels)
    if len(unique_labels) <= 1:
        return torch.tensor(0.0).to(embeddings.device)  # Not enough classes for triplets

    # Normalize embeddings
    embeddings_normalized = F.normalize(embeddings, p=2, dim=1)

    # Calculate pairwise distances
    dist_matrix = torch.cdist(embeddings_normalized, embeddings_normalized)

    # For each sample, find hardest positive and hardest negative
    loss = 0.0
    count = 0

    for i in range(len(labels)):
        label_i = labels[i]

        # Find positives (same class)
        pos_mask = labels == label_i
        pos_mask[i] = False  # Exclude self

        # Find negatives (different class)
        neg_mask = labels != label_i

        # Skip if no positives or negatives
        if not torch.any(pos_mask) or not torch.any(neg_mask):
            continue

        # Get hardest positive (furthest same class)
        hardest_pos_dist = torch.max(dist_matrix[i][pos_mask])

        # Get hardest negative (closest different class)
        hardest_neg_dist = torch.min(dist_matrix[i][neg_mask])

        # Triplet loss with margin
        triplet_loss = torch.clamp(hardest_pos_dist - hardest_neg_dist + margin, min=0.0)
        loss += triplet_loss
        count += 1

    return loss / max(count, 1)

def train_model(model, train_loader, val_loader, optimizer, criterion, device, epochs=90, patience=30,
                lambda_contrastive=0.7, progressive_freezing=True):
    """训练模型函数，带有早停机制和渐进式冻结策略"""
    best_val_loss = float('inf')
    best_model = None
    patience_counter = 0
    history = {'train_loss': [], 'train_acc': [], 'val_loss': [], 'val_acc': [],
               'contrastive_loss': [], 'val_metrics': []}

    # Contrastive loss with lower temperature
    contrastive_criterion = SupConLoss(temperature=0.07)  # Increased from 0.05

    # Dynamic mixup with higher alpha
    mixup_alpha = CFG["mixup_alpha"]

    # OneCycleLR scheduler with modified parameters
    from torch.optim.lr_scheduler import OneCycleLR
    scheduler = OneCycleLR(
        optimizer,
        max_lr=[param_group['lr'] * 3.0 for param_group in optimizer.param_groups],  # Safe multiplier
        total_steps=epochs * len(train_loader),
        pct_start=0.3,  # Increased warm-up percentage
        div_factor=20.0,  # More stable division
        final_div_factor=100.0  # More stable final division
    )

    # SWA with adjusted parameters
    from torch.optim.swa_utils import AveragedModel, SWALR
    swa_model = AveragedModel(model)
    swa_scheduler = SWALR(optimizer, swa_lr=[param_group['lr'] / 3.0 for param_group in optimizer.param_groups])
    swa_start = int(epochs * 0.7)  # Start SWA earlier (was 0.75)

    # Class center tracker
    class_center_tracker = ClassCenterTracker(
        num_classes=CFG["num_classes"],
        feature_dim=128,
        device=device
    )

    # Track per-class performance for monitoring
    num_classes = model.classifier[-1].out_features  # Get actual number of output classes

    for epoch in range(epochs):
        # Dynamic mixup with controlled decay
        current_mixup_alpha = mixup_alpha * (1 - 0.2 * epoch / epochs)

        # Modified progressive freezing
        if progressive_freezing and epoch > int(epochs * 0.8):  # Later freezing (was 0.75)
            # Only freeze the first few layers of signal branch
            for i, layer in enumerate(model.signal_branch):
                if i < 3:  # Only freeze first convolutional block
                    for param in layer.parameters():
                        param.requires_grad = False
            print(f"Epoch {epoch + 1}: First signal branch block frozen")

        # Training phase
        model.train()
        running_loss = 0.0
        contrastive_losses = 0.0
        correct = 0
        total = 0

        # Track per-class accuracy
        class_correct = {i: 0 for i in range(num_classes)}
        class_total = {i: 0 for i in range(num_classes)}

        for signals, features, labels in train_loader:
            signals = signals.to(device)
            features = features.to(device)
            labels = labels.to(device)

            # Filter out unknown class
            valid_indices = labels != -1
            if sum(valid_indices) == 0:
                continue

            signals = signals[valid_indices]
            features = features[valid_indices]
            labels = labels[valid_indices]

            # Apply basic augmentation
            if random.random() < 0.8:  # 80% chance to apply augmentation
                augmented_signals = torch.zeros_like(signals)
                for i in range(len(signals)):
                    augmented_signals[i] = enhanced_augment_signal(signals[i])
                signals = augmented_signals

            # Apply mixup with dynamic probability
            apply_mixup = random.random() < 0.6 and current_mixup_alpha > 0
            if apply_mixup:
                mixed_signals, mixed_features, targets_a, targets_b, lam = mixup_data(
                    signals, features, labels, alpha=current_mixup_alpha
                )

                outputs, embeddings = model(mixed_signals, mixed_features)
                loss = mixup_criterion(criterion, outputs, targets_a, targets_b, lam)
                contrastive_loss = torch.tensor(0.0).to(device)
            else:
                outputs, embeddings = model(signals, features)
                loss = criterion(outputs, labels)

                # Apply contrastive loss with dynamic weight
                contrastive_weight = lambda_contrastive * (1 - 0.1 * epoch / epochs)  # Gentler decay
                contrastive_loss = contrastive_criterion(embeddings, labels)
                loss += contrastive_weight * contrastive_loss

                # Add triplet loss to improve separation for hard examples
                if epoch >= 5:  # Start using after a few epochs
                    triplet_weight = 0.2 * (1 - 0.1 * epoch / epochs)  # Gentle decay
                    triplet_loss = triplet_loss_for_problem_classes(embeddings, labels)
                    loss += triplet_weight * triplet_loss

            optimizer.zero_grad()
            loss.backward()

            # Gradient clipping
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=2.5)

            optimizer.step()

            # Update learning rate
            if epoch < swa_start:
                scheduler.step()
            else:
                swa_scheduler.step()

            running_loss += loss.item()
            contrastive_losses += contrastive_loss.item() if isinstance(contrastive_loss, torch.Tensor) else 0

            # Calculate accuracy and track per-class performance
            if not apply_mixup:
                _, predicted = outputs.max(1)
                total += labels.size(0)
                correct += predicted.eq(labels).sum().item()

                # Update per-class accuracy tracking
                for i in range(len(labels)):
                    label = labels[i].item()
                    if 0 <= label < num_classes:  # Ensure valid class index
                        class_total[label] += 1
                        if predicted[i].item() == label:
                            class_correct[label] += 1

                # Update class centers
                class_center_tracker.update(embeddings, labels, momentum=0.95)
            else:
                _, predicted = outputs.max(1)
                total += targets_a.size(0)
                correct += (lam * predicted.eq(targets_a).sum().item() +
                            (1 - lam) * predicted.eq(targets_b).sum().item())

        train_loss = running_loss / max(1, len(train_loader))
        train_acc = 100. * correct / max(1, total)
        contrastive_loss_avg = contrastive_losses / max(1, len(train_loader))

        history['train_loss'].append(train_loss)
        history['train_acc'].append(train_acc)
        history['contrastive_loss'].append(contrastive_loss_avg)

        # Validation phase
        model.eval()
        val_loss = 0.0
        val_correct = 0
        val_total = 0
        all_preds = []
        all_labels = []

        # Track validation per-class performance
        val_class_correct = {i: 0 for i in range(num_classes)}
        val_class_total = {i: 0 for i in range(num_classes)}

        with torch.no_grad():
            for signals, features, labels in val_loader:
                signals = signals.to(device)
                features = features.to(device)
                labels = labels.to(device)

                # Filter out unknown class
                valid_indices = labels != -1
                if sum(valid_indices) == 0:
                    continue

                signals = signals[valid_indices]
                features = features[valid_indices]
                labels = labels[valid_indices]

                outputs, _ = model(signals, features)
                loss = criterion(outputs, labels)

                val_loss += loss.item()
                _, predicted = outputs.max(1)
                val_total += labels.size(0)
                val_correct += predicted.eq(labels).sum().item()

                # Track validation per-class accuracy
                for i in range(len(labels)):
                    label = labels[i].item()
                    if 0 <= label < num_classes:  # Ensure valid class index
                        val_class_total[label] += 1
                        if predicted[i].item() == label:
                            val_class_correct[label] += 1

                all_preds.extend(predicted.cpu().numpy())
                all_labels.extend(labels.cpu().numpy())

        val_loss = val_loss / max(1, len(val_loader))
        val_acc = 100. * val_correct / max(1, val_total)

        # Calculate F1 metrics
        if len(all_preds) > 0 and len(set(all_labels)) > 1:
            try:
                f1_macro = f1_score(all_labels, all_preds, average='macro')
                f1_weighted = f1_score(all_labels, all_preds, average='weighted')
            except:
                f1_macro = f1_weighted = 0
        else:
            f1_macro = f1_weighted = 0

        val_metrics = {
            'accuracy': val_acc / 100,
            'f1_macro': f1_macro,
            'f1_weighted': f1_weighted
        }

        history['val_loss'].append(val_loss)
        history['val_acc'].append(val_acc)
        history['val_metrics'].append(val_metrics)

        # When SWA phase is reached, update SWA model
        if epoch >= swa_start:
            swa_model.update_parameters(model)

        # Print training results with per-class accuracy for key classes
        print(
            f'Epoch {epoch + 1}/{epochs}: '
            f'Train Loss={train_loss:.4f}, '
            f'Train Acc={train_acc:.2f}%, '
            f'Val Loss={val_loss:.4f}, '
            f'Val Acc={val_acc:.2f}%, '
            f'F1 Macro={f1_macro:.4f}'
        )

        # Print per-class accuracy for training and validation
        print("Per-class accuracy (training):")
        for cls in sorted([c for c in class_total if class_total[c] > 0]):
            if class_total[cls] > 0:
                acc = 100 * class_correct[cls] / class_total[cls]
                print(f"  Class {cls}: {acc:.2f}% ({class_correct[cls]}/{class_total[cls]})")

        print("Per-class accuracy (validation):")
        for cls in sorted([c for c in val_class_total if val_class_total[c] > 0]):
            if val_class_total[cls] > 0:
                acc = 100 * val_class_correct[cls] / val_class_total[cls]
                print(f"  Class {cls}: {acc:.2f}% ({val_class_correct[cls]}/{val_class_total[cls]})")

        # Early stopping
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            best_model = model.state_dict().copy()
            patience_counter = 0
        else:
            patience_counter += 1
            if patience_counter >= patience:
                print(f'Early stopping at epoch {epoch + 1}')
                break

    # If using SWA, update BN statistics
    if epoch >= swa_start:
        print("Updating SWA BatchNorm statistics...")
        update_bn_custom(train_loader, swa_model, device)
        model.load_state_dict(swa_model.module.state_dict())
        print("Switched to SWA model")
    else:
        # Restore best model
        if best_model is not None:
            model.load_state_dict(best_model)

    # Get final class centers
    class_centers = class_center_tracker.get_centers()
    print(f"Class centers after epoch {epoch + 1}: {class_centers}")

    return model, history, class_centers


def get_class_weights(train_loader):
    # Count samples per class
    class_counts = {}
    for _, _, labels in train_loader:
        for label in labels:
            label_item = label.item()
            if label_item != -1:  # Ignore unknown class
                if label_item not in class_counts:
                    class_counts[label_item] = 0
                class_counts[label_item] += 1

    # Calculate weights (inverse frequency)
    total_samples = sum(class_counts.values())
    class_weights = {c: total_samples / (len(class_counts) * count) for c, count in class_counts.items()}

    # Boost weights for problematic classes
    if 10 in class_weights:
        class_weights[10] *= 1.5  # Give more weight to class 10
    if 31 in class_weights:
        class_weights[31] *= 1.3  # Give more weight to class 31

    weight_tensor = torch.ones(len(train_loader.dataset.dataset.label_dict))
    for c, w in class_weights.items():
        weight_tensor[c] = w

    return weight_tensor

def run_all(train_data, test_path, known_classes, use_cache=True):
    """完整的训练和评估流程"""
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"💻 使用设备: {device}")

    # 检查训练集是否包含所有已知类
    unique_labels = set(train_data.raw_labels)
    missing_classes = set(known_classes) - unique_labels
    if missing_classes:
        print(f"⚠️ 警告: 训练集缺少以下类别: {missing_classes}")
        print(f"训练集唯一标签: {sorted(unique_labels)}")
        print("请检查数据或修改已知类别列表")
        raise ValueError(f"训练集缺少这些类别: {missing_classes}")

    # 打印标签映射
    print("原始标签到索引映射:")
    for original_label, idx in train_data.label_dict.items():
        print(f"原始标签 {original_label} -> 索引 {idx}")

    # 将已知类别转换为索引
    known_indices = [train_data.label_dict.get(label) for label in known_classes if label in train_data.label_dict]
    print(f"已知类别原始标签: {known_classes}")
    print(f"对应的映射索引: {known_indices}")

    # 加载测试数据
    test_data = SteelDataset(
        test_path,
        augment=False,
        label_dict=train_data.label_dict,
        known_classes=known_classes,
        use_cache=use_cache
    )

    # 分割训练/验证集
    from sklearn.model_selection import train_test_split

    # 提取数据
    all_signals = []
    all_features = []
    all_labels = []

    for i in range(len(train_data)):
        signal, features, label = train_data[i]
        all_signals.append(signal)
        all_features.append(features)
        all_labels.append(label.item())

    # 转换为numpy数组
    all_signals = np.array([s.numpy() for s in all_signals])
    all_features = np.array([f.numpy() for f in all_features])
    all_labels = np.array(all_labels)

    # 分层抽样
    train_idx, val_idx = train_test_split(
        np.arange(len(all_labels)),
        test_size=0.2,
        stratify=all_labels,
        random_state=CFG["seed"]
    )

    # 创建子集
    from torch.utils.data import Subset
    train_subset = Subset(train_data, train_idx)
    val_subset = Subset(train_data, val_idx)

    # 创建数据加载器
    train_loader = DataLoader(
        train_subset,
        batch_size=CFG["batch_size"],
        shuffle=True,
        num_workers=CFG["num_workers"],
        pin_memory=True
    )

    val_loader = DataLoader(
        val_subset,
        batch_size=CFG["batch_size"],
        shuffle=False,
        num_workers=CFG["num_workers"],
        pin_memory=True
    )

    test_loader = DataLoader(
        test_data,
        batch_size=CFG["batch_size"],
        shuffle=False,
        num_workers=CFG["num_workers"],
        pin_memory=True
    )

    # 使用标签字典中的类别数量
    num_classes = len(train_data.label_dict)
    print(f"发现 {num_classes} 个类别: {train_data.label_dict}")

    # 初始化模型
    model = DualBranchModel(
        signal_input_dim=CFG["signal_length"],
        feat_input_dim=CFG["input_dim"],
        hidden_dim=CFG["hidden_dim"],
        num_classes=num_classes
    ).to(device)

    # 使用焦点损失
    criterion = FocalLoss(alpha=2.5, gamma=3.0)

    # 为不同组件设置不同学习率
    signal_params = list(map(id, model.signal_branch.parameters()))
    feature_params = list(map(id, model.feature_branch.parameters()))
    base_params = filter(lambda p: id(p) not in signal_params + feature_params,
                         model.parameters())

    # 优化器
    optimizer = torch.optim.AdamW([
        {'params': model.signal_branch.parameters(), 'lr': CFG["lr"] * 1.5},
        {'params': model.feature_branch.parameters(), 'lr': CFG["lr"] * 1.2},
        {'params': base_params, 'lr': CFG["lr"]}
    ], weight_decay=CFG["weight_decay"])

    # 训练模型
    print("🚀 开始训练...")
    model, history, dynamic_class_centers = train_model(
        model,
        train_loader,
        val_loader,
        optimizer,
        criterion,
        device,
        epochs=CFG["epochs"],
        patience=CFG["patience"],
        lambda_contrastive=0.8,
        progressive_freezing=True
    )

    # 可视化训练历史
    plot_training_history(history)

    # 计算类中心
    print("🧮 计算类中心...")
    class_centers = dynamic_class_centers

    if class_centers is not None:
        # Visualize distribution of distances to understand thresholds
        print("📊 Visualizing feature distributions...")
        train_full_loader = DataLoader(
            train_data,
            batch_size=CFG["batch_size"],
            shuffle=False,
            num_workers=CFG["num_workers"]
        )

        visualize_feature_distributions(
            model,
            train_full_loader,
            test_loader,
            device,
            class_centers
        )

        # Optimize thresholds based on validation data
        print("🔍 Optimizing thresholds...")
        best_conf_thresh, best_dist_thresh = optimize_open_set_thresholds(
            model,
            val_loader,
            class_centers,
            device,
            known_indices
        )

        # Update thresholds in CFG
        print(f"⚙️ Updating thresholds to optimal values: conf={best_conf_thresh:.2f}, dist={best_dist_thresh:.2f}")
        CFG["conf_threshold"] = best_conf_thresh
        CFG["distance_threshold"] = best_dist_thresh

        # 使用改进的类中心函数
        class_centers = calculate_class_centers(
            model, train_full_loader, device, known_indices
        )

    CFG["class_centers"] = class_centers
    # ✅ 保存类中心到文件
    save_class_centers(class_centers, path=os.path.join(CFG["output_dir"], "class_centers.pkl"))


    # 创建类特定阈值对象
    class_thresholds = setup_class_specific_thresholds()

    # 保存模型
    model_path = os.path.join(CFG["output_dir"], f"improved_steel_model.pth")
    save_experiment(model, history, CFG, model_path, class_centers)

    # 执行开放集评估
    print("👁️ 执行开放集评估...")
    predicted_labels, mapped_labels, probs = evaluate_open_set_improved(
        model,
        test_loader,
        device,
        known_classes,
        threshold=CFG["conf_threshold"],
        class_centers=class_centers,
        distance_threshold=CFG["distance_threshold"],
        thresholds=class_thresholds
    )

    # 新增: 生成类别特定阈值可视化
    print("🎨 生成类别特定阈值可视化...")
    threshold_recommendations = generate_threshold_visualizations(
        model,
        train_loader,
        val_loader,
        test_loader,
        device,
        class_centers,
        known_indices,
        class_thresholds
    )

    # 可视化决策边界
    print("📊 绘制阈值决策边界...")
    confidences, distances, binary_labels = plot_threshold_visualization(
        model,
        test_loader,
        device,
        known_indices,
        class_centers
    )

    # 添加: 单独调用类别特定阈值可视化函数以获得更详细的分析
    print("📊 生成详细的类别特定阈值分析...")
    visualize_class_specific_thresholds(
        model,
        val_loader,
        device,
        class_centers,
        class_thresholds,
        known_indices
    )
    print("✨ 完成！模型训练和评估已完成。")

    return model, history, threshold_recommendations


def plot_training_history(history):
    """Visualize training metrics with more details"""
    plt.figure(figsize=(15, 10))

    # Plot loss metrics
    plt.subplot(2, 2, 1)
    plt.plot(history['train_loss'], label='Train Loss')
    plt.plot(history['val_loss'], label='Val Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    plt.title('Training and Validation Loss')
    plt.grid(alpha=0.3)

    # Plot accuracy metrics
    plt.subplot(2, 2, 2)
    plt.plot(history['train_acc'], label='Train Acc')
    plt.plot(history['val_acc'], label='Val Acc')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy (%)')
    plt.legend()
    plt.title('Training and Validation Accuracy')
    plt.grid(alpha=0.3)

    # Plot contrastive loss
    plt.subplot(2, 2, 3)
    plt.plot(history['contrastive_loss'], label='Contrastive Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    plt.title('Contrastive Loss')
    plt.grid(alpha=0.3)

    # Plot F1 scores
    plt.subplot(2, 2, 4)
    f1_macro = [metrics['f1_macro'] for metrics in history['val_metrics']]
    f1_weighted = [metrics['f1_weighted'] for metrics in history['val_metrics']]
    plt.plot(f1_macro, label='F1 Macro')
    plt.plot(f1_weighted, label='F1 Weighted')
    plt.xlabel('Epoch')
    plt.ylabel('F1 Score')
    plt.legend()
    plt.title('Validation F1 Scores')
    plt.grid(alpha=0.3)

    plt.tight_layout()
    plt.show()


def evaluate_trained_model(model_path, test_path, known_classes):
    """
    从保存的模型加载并评估，展示混淆矩阵

    参数:
    - model_path: 保存的模型路径
    - test_path: 测试数据路径
    - known_classes: 已知类别列表
    """
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"💻 使用设备: {device}")

    # 加载模型、结果和类中心 ✅ 新增类中心加载
    checkpoint = torch.load(model_path, map_location=device)
    config = checkpoint['config']
    class_centers = checkpoint.get('class_centers', None)

    # 初始化模型
    model = DualBranchModel(
        signal_input_dim=config["signal_length"],
        feat_input_dim=config["input_dim"],
        hidden_dim=config["hidden_dim"],
        num_classes=config["num_classes"]
    ).to(device)

    # 加载模型权重
    model.load_state_dict(checkpoint['model_state_dict'])

    # 输出类中心信息
    if class_centers is not None:
        print("✅ 已加载类中心信息:")
        for class_idx, center in class_centers.items():
            if isinstance(center, dict) and 'centers' in center:
                print(f"  类别 {class_idx}: {len(center['centers'])} 个中心点")
            else:
                print(f"  类别 {class_idx}: 单个中心点")
    else:
        print("⚠️ 警告: 模型中没有保存类中心信息，将使用默认阈值进行分类")

    # 加载测试数据
    test_dataset = SteelDataset(
        test_path,
        augment=False,
        label_dict=None,
        known_classes=known_classes,
        use_cache=True
    )

    test_loader = DataLoader(
        test_dataset,
        batch_size=config["batch_size"],
        shuffle=False,
        num_workers=config["num_workers"]
    )

    # 获取标签映射信息
    label_dict = test_dataset.label_dict
    print("标签映射字典:")
    for original_label, idx in label_dict.items():
        print(f"  原始标签 {original_label} -> 索引 {idx}")

    # 将已知类别转换为索引
    known_indices = [label_dict.get(label) for label in known_classes if label in label_dict]
    print(f"已知类别原始标签: {known_classes}")
    print(f"对应的映射索引: {known_indices}")

    # 创建类特定阈值对象
    class_thresholds = setup_class_specific_thresholds()

    # 执行评估和显示混淆矩阵
    print("👁️‍ 执行开放集评估和混淆矩阵分析...")
    predicted_labels, mapped_labels, probs = evaluate_open_set_improved(
        model,
        test_loader,
        device,
        known_classes,
        threshold=config["conf_threshold"],
        class_centers=class_centers,  # ✅ 使用加载的类中心
        distance_threshold=config["distance_threshold"],
        thresholds=class_thresholds
    )

    # 新增: 生成类别特定阈值可视化
    print("🎨 生成类别特定阈值可视化...")
    # 创建训练数据加载器用于可视化
    train_loader = DataLoader(
        test_dataset,  # 使用测试数据作为示例
        batch_size=config["batch_size"],
        shuffle=False,
        num_workers=config["num_workers"]
    )

    # 使用验证数据加载器（这里简化处理，实际应使用验证集）
    val_loader = DataLoader(
        test_dataset,  # 使用测试数据作为示例
        batch_size=config["batch_size"],
        shuffle=False,
        num_workers=config["num_workers"]
    )

    # 生成阈值可视化
    threshold_recommendations = generate_threshold_visualizations(
        model,
        train_loader,
        val_loader,
        test_loader,
        device,
        class_centers,
        known_indices,
        class_thresholds
    )

    # 单独调用类别特定阈值可视化函数
    print("📊 生成详细的类别特定阈值分析...")
    visualize_class_specific_thresholds(
        model,
        test_loader,
        device,
        class_centers,
        class_thresholds,
        known_indices
    )

    # 可视化决策边界
    print("📊 绘制阈值决策边界...")
    confidences, distances, binary_labels = plot_threshold_visualization(
        model,
        test_loader,
        device,
        known_indices,
        class_centers
    )

    return predicted_labels, mapped_labels, probs


def test_model(train_path, test_path, known_classes, use_cache=True, force_recompute=False, use_ensemble=False, n_models=3):
    """Run the improved model implementation"""
    print("📂 加载数据...")

    # 创建输出目录
    os.makedirs(CFG["output_dir"], exist_ok=True)

    # 加载训练数据，启用缓存
    train_data = SteelDataset(
        train_path,
        augment=True,
        use_cache=use_cache,
        force_recompute=force_recompute
    )

    # 运行模型训练和评估（单个模型或集成）
    if use_ensemble:
        print("🚀 使用集成模型进行训练和评估...")
        model, class_centers = run_ensemble(train_data, test_path, known_classes, n_models=n_models,
                                            use_cache=use_cache)
        return model, class_centers
    else:
        print("🚀 使用单个模型进行训练和评估...")
        model, history = run_all(train_data, test_path, known_classes, use_cache=use_cache)
        return model, history


def run_ensemble(train_data, test_path, known_classes, n_models=5, use_cache=True):
    """使用模型集成进行训练和评估"""
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"💻 使用设备: {device}")

    # Set number of classes based on the label dictionary
    CFG["num_classes"] = len(train_data.label_dict)
    print(f"Setting num_classes to {CFG['num_classes']} based on dataset label dictionary")

    # 随机种子列表
    seeds = [42, 123, 456, 789, 999, 111, 222, 333][:n_models]

    models = []
    all_class_centers = []

    # 训练每个模型
    for i, seed in enumerate(seeds):
        print(f"\n🔄 训练模型 {i + 1}/{len(seeds)}，种子 = {seed}")

        # 设置随机种子
        set_seed(seed)

        # 初始化模型
        model = DualBranchModel(
            signal_input_dim=CFG["signal_length"],
            feat_input_dim=CFG["input_dim"],
            hidden_dim=CFG["hidden_dim"],
            num_classes=CFG["num_classes"]
        ).to(device)

        # 创建训练/验证分割
        from sklearn.model_selection import train_test_split

        # 提取数据
        all_signals = []
        all_features = []
        all_labels = []

        for j in range(len(train_data)):
            signal, features, label = train_data[j]
            all_signals.append(signal)
            all_features.append(features)
            all_labels.append(label.item())

        # 转换为numpy数组
        all_signals = np.array([s.numpy() for s in all_signals])
        all_features = np.array([f.numpy() for f in all_features])
        all_labels = np.array(all_labels)

        # 分层抽样
        train_idx, val_idx = train_test_split(
            np.arange(len(all_labels)),
            test_size=0.2,
            stratify=all_labels,
            random_state=seed  # 对每个模型使用不同的随机种子
        )

        # 创建子集
        from torch.utils.data import Subset
        train_subset = Subset(train_data, train_idx)
        val_subset = Subset(train_data, val_idx)

        # 创建数据加载器
        train_loader = DataLoader(
            train_subset,
            batch_size=CFG["batch_size"],
            shuffle=True,
            num_workers=CFG["num_workers"],
            pin_memory=True
        )

        val_loader = DataLoader(
            val_subset,
            batch_size=CFG["batch_size"],
            shuffle=False,
            num_workers=CFG["num_workers"],
            pin_memory=True
        )

        # 损失和优化器
        criterion = FocalLoss(alpha=2.0, gamma=2.5)

        # 为不同组件设置不同学习率 - 使用更安全的方法
        signal_params = []
        feature_params = []
        other_params = []

        # 分类参数 - 更安全的方法
        for name, param in model.named_parameters():
            if 'signal_branch' in name:
                signal_params.append(param)
            elif 'feature_branch' in name:
                feature_params.append(param)
            else:
                other_params.append(param)

        # 设置学习率变化
        base_lr = CFG["lr"] * (0.9 + 0.2 * i / max(1, n_models - 1))  # 模型间的微小变化

        # 优化器
        optimizer = torch.optim.AdamW([
            {'params': signal_params, 'lr': base_lr * 1.3},
            {'params': feature_params, 'lr': base_lr * 1.1},
            {'params': other_params, 'lr': base_lr}
        ], weight_decay=CFG["weight_decay"])

        # 训练模型
        model, history, dynamic_class_centers = train_model(
            model,
            train_loader,
            val_loader,
            optimizer,
            criterion,
            device,
            epochs=max(25, CFG["epochs"] // 2),  # 为集成使用较短的训练
            patience=CFG["patience"],
            lambda_contrastive=0.7
        )

        # 添加到集成
        models.append(model)
        all_class_centers.append(dynamic_class_centers)

        # 保存单个模型
        model_path = os.path.join(CFG["output_dir"], f"model_seed_{seed}.pth")
        save_experiment(model, history, CFG, model_path)

    # 创建集成模型
    ensemble = ModelEnsemble(models, device)

    # 加载测试数据
    test_data = SteelDataset(
        test_path,
        augment=False,
        label_dict=train_data.label_dict,
        known_classes=known_classes,
        use_cache=use_cache
    )

    test_loader = DataLoader(
        test_data,
        batch_size=CFG["batch_size"],
        shuffle=False,
        num_workers=CFG["num_workers"],
        pin_memory=True
    )

    # 创建验证数据加载器 (使用最后一个模型的验证集)
    val_loader = DataLoader(
        val_subset,
        batch_size=CFG["batch_size"],
        shuffle=False,
        num_workers=CFG["num_workers"],
        pin_memory=True
    )

    # 将已知类别转换为索引
    known_indices = [train_data.label_dict.get(label) for label in known_classes if label in train_data.label_dict]

    # 创建简化版的类中心（使用单中心而非多中心复杂结构）
    merged_centers = {}
    for model_centers in all_class_centers:
        if model_centers is not None:
            for c, center_data in model_centers.items():
                if c not in merged_centers:
                    merged_centers[c] = []

                # 处理可能的多中心格式或单中心格式
                if isinstance(center_data, dict) and 'centers' in center_data:
                    centers = center_data['centers']
                    for center in centers:
                        merged_centers[c].append(center)
                else:
                    merged_centers[c].append(center_data)

    # 对每个类计算平均中心点
    final_centers = {}
    for c, centers in merged_centers.items():
        if centers:
            final_centers[c] = np.mean(np.array(centers), axis=0)

    # 创建类特定阈值对象
    class_thresholds = setup_class_specific_thresholds()

    # 执行评估
    print("🔍 使用集成模型执行开放集评估...")
    predicted_labels, mapped_labels, probs = evaluate_open_set_improved(
        ensemble,
        test_loader,
        device,
        known_classes,
        threshold=CFG["conf_threshold"],
        class_centers=final_centers,
        distance_threshold=CFG["distance_threshold"],
        thresholds=class_thresholds
    )

    # 新增: 生成类别特定阈值可视化
    print("🎨 生成类别特定阈值可视化...")
    # 创建数据加载器
    train_full_loader = DataLoader(
        train_data,
        batch_size=CFG["batch_size"],
        shuffle=False,
        num_workers=CFG["num_workers"]
    )

    threshold_recommendations = generate_threshold_visualizations(
        ensemble,
        train_full_loader,
        val_loader,
        test_loader,
        device,
        final_centers,
        known_indices,
        class_thresholds
    )

    # 添加: 单独调用类别特定阈值可视化函数以获得更详细的分析
    print("📊 生成详细的类别特定阈值分析...")
    visualize_class_specific_thresholds(
        ensemble,
        test_loader,
        device,
        final_centers,
        class_thresholds,
        known_indices
    )

    # 可视化决策边界
    print("📊 绘制阈值决策边界...")
    confidences, distances, binary_labels = plot_threshold_visualization(
        ensemble,
        test_loader,
        device,
        known_indices,
        final_centers
    )

    # 保存集成模型
    ensemble_path = os.path.join(CFG["output_dir"], "ensemble_model.pth")
    per_model_class_centers = []
    if all_class_centers:
        for center in all_class_centers:
            per_model_class_centers.append({
                int(k): v for k, v in center.items()
            })
    else:
        print("⚠️ 警告：未收集到任何模型类中心，per_model_class_centers 将为空")

    # ✅ 保存集成模型（新增 per_model_class_centers 字段）
    ensemble_data = {
        'models': [m.state_dict() for m in models],
        'class_centers': final_centers,  # 平均后的类中心
        'per_model_class_centers': all_class_centers,  # 每个模型自己的类中心
        'config': CFG,
        'seeds': seeds
    }
    torch.save(ensemble_data, ensemble_path)

    print(f"✅ 模型和各模型类中心已保存到: {ensemble_path}")
    return ensemble, final_centers, threshold_recommendations


def augment_signal(signal, prob=0.9):  # Increased from 0.8
    """Apply multiple augmentations with probability"""
    if random.random() < prob:
        # Noise augmentation with better parameters
        if random.random() < 0.75:  # Slightly reduced from 0.8
            signal = add_noise(signal, level=random.uniform(0.015, 0.045))  # More controlled noise range

        # Shift augmentation
        if random.random() < 0.65:  # Slightly reduced from 0.7
            signal = shift_signal(signal, max_shift=random.randint(5, 20))  # Limited max shift

        # Scale augmentation
        if random.random() < 0.6:  # Slightly reduced from 0.65
            signal = scale_signal(signal, scale_range=(0.8, 1.2))  # More conservative scaling

        # Zero segments - but not too many
        if random.random() < 0.3:  # Reduced from 0.4
            zero_length = random.randint(5, 12)  # Reduced maximum length
            start_idx = random.randint(0, len(signal) - zero_length - 1)
            signal[start_idx:start_idx + zero_length] = 0

    return signal

def update_bn_custom(loader, model, device=None):
    """
    Custom version of torch.optim.swa_utils.update_bn for dual-input models

    This updates the batch normalization statistics for the SWA model
    with a properly structured forward pass that handles multiple inputs.

    Args:
        loader: DataLoader with training data
        model: Model with batch normalization layers
        device: Device to use (defaults to model parameters device)
    """
    if not loader.dataset:
        return

    model.train()

    momenta = {}
    for module in model.modules():
        if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
            module.reset_running_stats()
            momenta[module] = module.momentum
            module.momentum = None
            module.num_batches_tracked *= 0

    if not momenta:
        return

    # Use a device appropriate for the model parameters
    if device is None:
        device = next(model.parameters()).device

    # Process each batch correctly with both inputs
    n = 0
    with torch.no_grad():
        for batch in loader:
            signals, features, _ = batch  # Unpack all three elements
            signals = signals.to(device)
            features = features.to(device)

            # Update BN statistics with a proper forward pass
            model(signals, features)

            n += signals.size(0)

    # Restore momentum values
    for module in momenta:
        module.momentum = momenta[module]

    return


def preprocess_data(signal_data):
    """Apply consistent preprocessing to both train and test data"""
    processed_data = np.copy(signal_data)

    # Normalize each signal to zero mean and unit variance
    for i in range(len(processed_data)):
        signal = processed_data[i]
        mean = np.mean(signal)
        std = np.std(signal)
        if std > 0:
            processed_data[i] = (signal - mean) / std
        else:
            processed_data[i] = signal - mean

    return processed_data


def optimize_open_set_thresholds(model, val_loader, class_centers, device, known_classes):
    """Automatically find optimal thresholds for open-set recognition"""
    # Confidence threshold range
    conf_thresholds = np.linspace(0.3, 0.9, 10)  # More granular, higher range

    # More granular distance threshold range - focus on lower values
    dist_thresholds = np.linspace(1.0, 6.0, 10)  # More granular

    # Get label mappings
    if hasattr(val_loader.dataset, 'dataset'):
        # This is a Subset
        label_dict = val_loader.dataset.dataset.label_dict
        reverse_dict = val_loader.dataset.dataset.reverse_dict
    else:
        # This is a regular dataset
        label_dict = val_loader.dataset.label_dict
        reverse_dict = val_loader.dataset.reverse_dict

    mapped_known_classes = [label_dict.get(c) for c in known_classes if c in label_dict]

    # Extract all labels and features
    all_true_labels = []
    all_outputs = []
    all_embeddings = []

    with torch.no_grad():
        for signal, features, labels in val_loader:
            signal, features = signal.to(device), features.to(device)

            # Get model outputs
            outputs, _ = model(signal, features)
            embeddings = model.extract_features(signal, features)

            all_outputs.append(outputs.cpu())
            all_embeddings.append(embeddings.cpu())
            all_true_labels.extend([reverse_dict.get(l.item(), -1) for l in labels])

    # Concatenate all batches
    all_outputs = torch.cat(all_outputs, dim=0)
    all_embeddings = torch.cat(all_embeddings, dim=0)

    # Calculate probabilities
    all_probs = F.softmax(all_outputs, dim=1).numpy()
    max_probs = np.max(all_probs, axis=1)

    # Calculate distances to class centers
    all_min_distances = []
    for embedding in all_embeddings:
        embedding_np = embedding.numpy()
        min_distance = float('inf')

        for class_idx in mapped_known_classes:
            if class_idx in class_centers:
                center = class_centers[class_idx]
                # Handle different center formats
                if isinstance(center, dict) and 'centers' in center:
                    # Multiple centers case
                    centers = center['centers']
                    class_dist = min(np.linalg.norm(embedding_np - c) for c in centers)
                else:
                    # Single center case
                    class_dist = np.linalg.norm(embedding_np - center)

                min_distance = min(min_distance, class_dist)

        all_min_distances.append(min_distance)

    # Grid search with additional metrics
    results = []
    for conf_thresh in conf_thresholds:
        for dist_thresh in dist_thresholds:
            # Apply thresholds
            predictions = []
            for i in range(len(all_probs)):
                if max_probs[i] < conf_thresh or all_min_distances[i] > dist_thresh:
                    predictions.append(-1)  # Unknown
                else:
                    # Get predicted class
                    pred_idx = np.argmax(all_probs[i])
                    if pred_idx < len(mapped_known_classes):
                        pred_class_idx = mapped_known_classes[pred_idx]
                        predictions.append(reverse_dict.get(pred_class_idx, -1))
                    else:
                        predictions.append(-1)

            # Calculate multiple metrics for better threshold selection
            f1_macro = f1_score(all_true_labels, predictions, average='macro', labels=[-1] + known_classes)
            f1_weighted = f1_score(all_true_labels, predictions, average='weighted', labels=[-1] + known_classes)
            accuracy = accuracy_score(all_true_labels, predictions)

            # Calculate precision and recall specifically for unknown class
            unknown_precision = precision_score(
                [1 if l == -1 else 0 for l in all_true_labels],
                [1 if p == -1 else 0 for p in predictions],
                zero_division=0
            )

            unknown_recall = recall_score(
                [1 if l == -1 else 0 for l in all_true_labels],
                [1 if p == -1 else 0 for p in predictions],
                zero_division=0
            )

            # Calculate balanced accuracy to handle class imbalance
            balanced_acc = balanced_accuracy_score(all_true_labels, predictions)

            # Create a combined score that weights all metrics
            # This puts more emphasis on unknown detection while maintaining good known class accuracy
            combined_score = (
                    f1_macro * 0.3 +
                    balanced_acc * 0.3 +
                    unknown_precision * 0.2 +
                    unknown_recall * 0.2
            )

            results.append({
                'conf_threshold': conf_thresh,
                'dist_threshold': dist_thresh,
                'f1_score': f1_macro,
                'f1_weighted': f1_weighted,
                'accuracy': accuracy,
                'balanced_accuracy': balanced_acc,
                'unknown_precision': unknown_precision,
                'unknown_recall': unknown_recall,
                'combined_score': combined_score
            })

    # Sort by combined score first
    results.sort(key=lambda x: x['combined_score'], reverse=True)

    print("\n🔍 Threshold optimization results (top 5 by combined score):")
    for i, result in enumerate(results[:5]):
        print(f"{i + 1}. Conf: {result['conf_threshold']:.2f}, " +
              f"Dist: {result['dist_threshold']:.2f}, " +
              f"Combined: {result['combined_score']:.4f}, " +
              f"F1: {result['f1_score']:.4f}, " +
              f"Unknown P/R: {result['unknown_precision']:.2f}/{result['unknown_recall']:.2f}")

    # Also sort by F1 for comparison
    f1_sorted = sorted(results, key=lambda x: x['f1_score'], reverse=True)

    print("\nTop 3 by F1 score:")
    for i, result in enumerate(f1_sorted[:3]):
        print(f"{i + 1}. Conf: {result['conf_threshold']:.2f}, " +
              f"Dist: {result['dist_threshold']:.2f}, " +
              f"F1: {result['f1_score']:.4f}")

    best_result = results[0]  # Use the combined score winner
    print(f"\n✅ Best thresholds: Confidence = {best_result['conf_threshold']:.2f}, " +
          f"Distance = {best_result['dist_threshold']:.2f}")

    return best_result['conf_threshold'], best_result['dist_threshold']


def visualize_feature_distributions(model, train_loader, test_loader, device, class_centers):
    """Visualize feature distributions and distances to better understand thresholds"""
    if hasattr(train_loader.dataset, 'dataset'):
        # This is a Subset
        train_dataset = train_loader.dataset.dataset
    else:
        # This is a regular dataset
        train_dataset = train_loader.dataset

    if hasattr(test_loader.dataset, 'dataset'):
        # This is a Subset
        test_dataset = test_loader.dataset.dataset
    else:
        # This is a regular dataset
        test_dataset = test_loader.dataset
    model.eval()

    # Extract features and metadata
    train_features = []
    train_labels = []
    train_distances = []

    test_features = []
    test_labels = []
    test_distances = []

    # Process training data
    with torch.no_grad():
        for signal, features, labels in train_loader:
            signal, features = signal.to(device), features.to(device)
            embeddings = model.extract_features(signal, features).cpu().numpy()

            for i, embedding in enumerate(embeddings):
                label = labels[i].item()
                train_features.append(embedding)
                train_labels.append(label)

                # Calculate distance to own class center
                if label in class_centers:
                    center = class_centers[label]
                    # Handle different center formats
                    if isinstance(center, dict) and 'centers' in center:
                        # Multiple centers case
                        centers = center['centers']
                        dist = min(np.linalg.norm(embedding - c) for c in centers)
                    else:
                        # Single center case
                        dist = np.linalg.norm(embedding - center)

                    train_distances.append(dist)
                else:
                    train_distances.append(float('nan'))

    # Process test data
    with torch.no_grad():
        for signal, features, labels in test_loader:
            signal, features = signal.to(device), features.to(device)
            embeddings = model.extract_features(signal, features).cpu().numpy()

            for i, embedding in enumerate(embeddings):
                label = labels[i].item()
                test_features.append(embedding)
                test_labels.append(label)

                # Calculate minimum distance to any class center
                min_dist = float('inf')
                for class_idx, center in class_centers.items():
                    # Handle different center formats
                    if isinstance(center, dict) and 'centers' in center:
                        # Multiple centers case
                        centers = center['centers']
                        class_dist = min(np.linalg.norm(embedding - c) for c in centers)
                    else:
                        # Single center case
                        class_dist = np.linalg.norm(embedding - center)

                    min_dist = min(min_dist, class_dist)

                test_distances.append(min_dist)

    # Convert to numpy arrays
    train_distances = np.array(train_distances)
    test_distances = np.array(test_distances)
    train_labels = np.array(train_labels)
    test_labels = np.array(test_labels)

    # Get unique labels
    unique_labels = np.unique(np.concatenate([train_labels, test_labels]))
    unique_labels = unique_labels[unique_labels >= 0]  # Filter out unknown class

    # Plot distance distributions by class
    plt.figure(figsize=(15, 10))
    bins = np.linspace(0, max(np.nanmax(train_distances), np.nanmax(test_distances)) * 1.1, 30)

    for i, label in enumerate(unique_labels):
        plt.subplot(len(unique_labels), 1, i + 1)

        # Training data distances
        train_mask = train_labels == label
        if np.sum(train_mask) > 0:
            plt.hist(train_distances[train_mask], bins=bins, alpha=0.5,
                     label=f'Train (n={np.sum(train_mask)})', color='blue')

        # Test data distances - compare same class
        test_same_mask = test_labels == label
        if np.sum(test_same_mask) > 0:
            plt.hist(test_distances[test_same_mask], bins=bins, alpha=0.5,
                     label=f'Test Same (n={np.sum(test_same_mask)})', color='green')

        # Test data distances - all other classes
        test_other_mask = (test_labels != label) & (test_labels != -1)  # Exclude unknown
        if np.sum(test_other_mask) > 0:
            plt.hist(test_distances[test_other_mask], bins=bins, alpha=0.3,
                     label=f'Test Other (n={np.sum(test_other_mask)})', color='red')

        plt.xlabel('Distance to Class Center')
        plt.ylabel('Count')
        plt.title(f'Class {label} Distance Distribution')
        plt.legend()
        plt.grid(alpha=0.3)

    plt.tight_layout()
    plt.show()

    # Calculate suggested thresholds
    percentile_95_train = np.nanpercentile(train_distances, 95)
    percentile_99_train = np.nanpercentile(train_distances, 99)

    print(f"Suggested distance thresholds based on training data:")
    print(f"  95th percentile: {percentile_95_train:.3f}")
    print(f"  99th percentile: {percentile_99_train:.3f}")

    return train_distances, test_distances


def analyze_test_data(model, test_loader, device, known_classes, class_centers):
    """Analyze test data in more detail, separating known and unknown samples"""
    # Get test dataset
    test_data = test_loader.dataset

    # Create known and unknown subsets
    known_indices = []
    unknown_indices = []

    for i in range(len(test_data)):
        _, _, label = test_data[i]
        if label.item() != -1:  # Not unknown
            known_indices.append(i)
        else:
            unknown_indices.append(i)

    # Create separate loaders
    from torch.utils.data import Subset

    known_loader = DataLoader(
        Subset(test_data, known_indices),
        batch_size=CFG["batch_size"],
        shuffle=False,
        num_workers=CFG["num_workers"]
    )

    unknown_loader = DataLoader(
        Subset(test_data, unknown_indices),
        batch_size=CFG["batch_size"],
        shuffle=False,
        num_workers=CFG["num_workers"]
    )

    # Print statistics
    print(f"Test data: {len(test_data)} samples")
    print(f"Known samples: {len(known_indices)}")
    print(f"Unknown samples: {len(unknown_indices)}")

    # Analyze known samples only
    if len(known_indices) > 0:
        print("\n🔍 Evaluating only on known classes (closed-set accuracy):")
        known_preds, known_labels, _ = evaluate_open_set_improved(
            model,
            known_loader,
            device,
            known_classes,
            threshold=0.0,  # Disable unknown detection
            class_centers=class_centers,
            distance_threshold=float('inf')  # Disable unknown detection
        )

    # Analyze unknown detection
    if len(unknown_indices) > 0:
        print("\n🔍 Evaluating only on unknown classes (unknown detection):")
        unknown_preds, unknown_labels, _ = evaluate_open_set_improved(
            model,
            unknown_loader,
            device,
            known_classes,
            threshold=CFG["conf_threshold"],
            class_centers=class_centers,
            distance_threshold=CFG["distance_threshold"]
        )

        # Calculate unknown detection rate
        unknown_detection_rate = sum(1 for p in unknown_preds if p == -1) / len(unknown_preds)
        print(f"Unknown detection rate: {unknown_detection_rate:.2f}")

    return known_loader, unknown_loader


def visualize_datasets(model, train_data, test_data, device, known_classes):
    """Visualize the feature space to understand data distribution"""
    print("🔬 Visualizing feature space...")

    # Create data loaders with smaller batch size for visualization
    vis_batch_size = min(32, len(train_data), len(test_data))

    train_loader = DataLoader(
        train_data,
        batch_size=vis_batch_size,
        shuffle=True,
        num_workers=CFG["num_workers"]
    )

    test_loader = DataLoader(
        test_data,
        batch_size=vis_batch_size,
        shuffle=False,
        num_workers=CFG["num_workers"]
    )

    # Visualize feature space
    visualize_feature_space(model, train_loader, test_loader, device)



    return train_loader, test_loader

def save_thresholds(thresholds, path="thresholds.pkl"):
    with open(path, "wb") as f:
        pickle.dump(thresholds.thresholds, f)

def load_thresholds(path="thresholds.pkl"):
    with open(path, "rb") as f:
        data = pickle.load(f)
    thresholds = setup_class_specific_thresholds()  # 初始化结构
    thresholds.thresholds = data  # 覆盖默认值
    return thresholds

if __name__ == "__main__":
    # 修改为你本地的训练/测试文件路径
    train_path = r'F:\download_software\labview\Data_save\牌号测试\4_29牌号\train_data.xlsx'
    test_path = r'F:\download_software\labview\Data_save\牌号测试\4_29牌号\test_data.xlsx'

    # 指定已知类别
    known_classes = CFG["known_classes"]

    os.makedirs(CFG["output_dir"], exist_ok=True)

    # 指定训练中包含的已知类别的索引，如：[0,1,2]
    if os.name == 'nt':  # Check if running on Windows
        os.environ['OMP_NUM_THREADS'] = '2'
        print("Windows detected, set OMP_NUM_THREADS=2 to prevent KMeans memory leak")

    use_ensemble = True    # 设置为True使用集成模型，False使用单模型
    n_models = 3
    try:
        # Run model with caching enabled
        print("🚀 Starting model training...")
        if use_ensemble:
            result = run_ensemble(
                SteelDataset(train_path, augment=True, use_cache=True, force_recompute=False),
                test_path,
                known_classes,
                n_models=n_models
            )
            model, class_centers, threshold_recommendations = result
        else:
            result = run_all(
                SteelDataset(train_path, augment=True, use_cache=True, force_recompute=False),
                test_path,
                known_classes
            )
            model, history, threshold_recommendations = result

        print("✅ Training and evaluation completed successfully!")
        # ===== ✅ 保存主模型和类中心（适配单模型或集成模型） =====
        print("💾 正在保存模型和类中心...")
        model_path = os.path.join(CFG["output_dir"], "model_seed_42.pth")

        # 集成模型模式（保存第一个子模型）
        if use_ensemble:
            torch.save({
                "model_state_dict": model.models[0].state_dict(),  # 保存第一个子模型
                "class_centers": class_centers  # 保存类中心
            }, model_path)
            print(f"✅ 集成模型第一个子模型已保存到: {model_path}")
        else:
            torch.save({
                "model_state_dict": model.state_dict(),  # 保存单一模型
                "class_centers": threshold_recommendations.get("class_centers", None)  # 保存类中心
            }, model_path)
            print(f"✅ 模型已保存到: {model_path}")

        # 保存类中心为独立文件
        with open(os.path.join(CFG["output_dir"], "class_centers.pkl"), "wb") as f:
            pickle.dump(class_centers if use_ensemble else threshold_recommendations.get("class_centers", None), f)
        print("✅ 类中心已保存为独立文件 class_centers.pkl")

    except Exception as e:
        print(f"❌ Error: {str(e)}")
        import traceback

        traceback.print_exc()