import os
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader, random_split, TensorDataset
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from gan_model import Generator as VanillaGenerator
from wgan_model import Generator as WGANGenerator

# 设置信号和对应的标签
class SignalDataset(Dataset):
    def __init__(self, data, labels):
        self.data = torch.FloatTensor(data)
        self.labels = torch.LongTensor(labels)
    
    def __len__(self):
        return len(self.data)
    
    def __getitem__(self, idx):
        return self.data[idx], self.labels[idx]

def load_and_preprocess_data(use_gan=False, gan_type="vanilla", use_sim=False, use_sim_real=False, gan_model_dir="./models", n_samples=1000, latent_dim=100, signal_length=1200):
    """加载和预处理数据
    
    Args:
        use_gan (bool): 是否使用GAN生成的样本
        gan_type (str): GAN类型，'vanilla' 或 'wgan'
        use_sim (bool): 是否使用仿真数据
        use_sim_real (bool): 是否使用真实仿真数据
        gan_model_dir (str): GAN模型目录
        n_samples (int): 每个类别生成的样本数
        latent_dim (int): 潜在空间维度
        signal_length (int): 信号长度
    
    Returns:
        tuple: (X_train, X_test, y_train, y_test)
    """
    # 加载原始数据
    train_data_path = os.path.join("./cwru_prepro", "train")
    test_data_path = os.path.join("./cwru_prepro", "test")
    
    # 加载训练集
    train_files = [f for f in os.listdir(train_data_path) if f.endswith('.npy')]
    X_train = []
    y_train = []
    class_names = []
    
    for class_idx, class_file in enumerate(train_files):
        class_name = class_file.replace('.npy', '')
        data = np.load(os.path.join(train_data_path, class_file))
        X_train.append(data)
        y_train.append(np.full(len(data), class_idx))
        class_names.append(class_name)
    
    X_train = np.concatenate(X_train)
    y_train = np.concatenate(y_train)
    
    # 加载测试集
    test_files = [f for f in os.listdir(test_data_path) if f.endswith('.npy')]
    X_test = []
    y_test = []
    
    for class_idx, class_file in enumerate(test_files):
        class_name = class_file.replace('.npy', '')
        data = np.load(os.path.join(test_data_path, class_file))
        X_test.append(data)
        y_test.append(np.full(len(data), class_idx))
    
    X_test = np.concatenate(X_test)
    y_test = np.concatenate(y_test)
    
    if use_sim_real:
        # 使用真实仿真数据替换训练集
        sim_data_path = "./cwru_sim_real"
        if not os.path.exists(sim_data_path):
            print(f"警告：真实仿真数据目录 {sim_data_path} 不存在")
            return X_train, X_test, y_train, y_test
            
        sim_samples = []
        sim_labels = []
        
        for class_idx, class_name in enumerate(class_names):
            sim_file = os.path.join(sim_data_path, f"{class_name}.npy")
            if os.path.exists(sim_file):
                samples = np.load(sim_file)
                # 确保仿真样本数量与训练集样本数量成比例
                n_samples_per_class = len(X_train[y_train == class_idx])
                if len(samples) > n_samples_per_class:
                    indices = np.random.choice(len(samples), n_samples_per_class, replace=False)
                    samples = samples[indices]
                sim_samples.append(samples)
                sim_labels.append(np.full(len(samples), class_idx))
                print(f"已加载 {class_name} 类别的真实仿真样本，形状: {samples.shape}")
            else:
                print(f"警告：未找到 {class_name} 类别的真实仿真样本")
        
        if sim_samples:
            # 将仿真样本替换训练集
            X_train = np.concatenate(sim_samples)
            y_train = np.concatenate(sim_labels)
            print(f"使用真实仿真样本后的训练集形状: {X_train.shape}")
    
    elif use_sim:
        # 使用仿真数据替换训练集
        sim_data_path = "./cwru_sim"
        if not os.path.exists(sim_data_path):
            print(f"警告：仿真数据目录 {sim_data_path} 不存在")
            return X_train, X_test, y_train, y_test
            
        sim_samples = []
        sim_labels = []
        
        for class_idx, class_name in enumerate(class_names):
            sim_file = os.path.join(sim_data_path, f"{class_name}.npy")
            if os.path.exists(sim_file):
                samples = np.load(sim_file)
                # 确保仿真样本数量与训练集样本数量成比例
                n_samples_per_class = len(X_train[y_train == class_idx])
                if len(samples) > n_samples_per_class:
                    indices = np.random.choice(len(samples), n_samples_per_class, replace=False)
                    samples = samples[indices]
                sim_samples.append(samples)
                sim_labels.append(np.full(len(samples), class_idx))
                print(f"已加载 {class_name} 类别的仿真样本，形状: {samples.shape}")
            else:
                print(f"警告：未找到 {class_name} 类别的仿真样本")
        
        if sim_samples:
            # 将仿真样本替换训练集
            X_train = np.concatenate(sim_samples)
            y_train = np.concatenate(sim_labels)
            print(f"使用仿真样本后的训练集形状: {X_train.shape}")
    
    elif use_gan:
        # 加载GAN生成的样本
        gan_data_path = os.path.join("./cwru_gan", gan_type)
        if not os.path.exists(gan_data_path):
            print(f"警告：GAN数据目录 {gan_data_path} 不存在")
            return X_train, X_test, y_train, y_test
            
        # 为每个类别生成样本
        gan_samples = []
        gan_labels = []
        
        for class_idx, class_name in enumerate(class_names):
            # 获取该类别的训练集样本数量
            n_samples_per_class = len(X_train[y_train == class_idx])
            
            # 生成GAN样本
            gan_file = os.path.join(gan_data_path, f"{class_name}.npy")
            if os.path.exists(gan_file):
                samples = np.load(gan_file)
                # 确保GAN样本数量与训练集样本数量成比例
                if len(samples) > n_samples_per_class:
                    indices = np.random.choice(len(samples), n_samples_per_class, replace=False)
                    samples = samples[indices]
                gan_samples.append(samples)
                gan_labels.append(np.full(len(samples), class_idx))
                print(f"已加载 {class_name} 类别的GAN生成样本，形状: {samples.shape}")
            else:
                print(f"警告：未找到 {class_name} 类别的GAN生成样本")
        
        if gan_samples:
            # 将生成的样本替换训练集
            X_train = np.concatenate(gan_samples)
            y_train = np.concatenate(gan_labels)
            print(f"使用GAN生成样本后的训练集形状: {X_train.shape}")
    
    return X_train, X_test, y_train, y_test

def create_data_loaders(X_train, X_test, y_train, y_test, batch_size=64):
    """创建数据加载器"""
    # npy调整数据维度为 [batch_size, channels, signal_length]，添加通道数，适配torch
    X_train = X_train.reshape(-1, 1, X_train.shape[1])
    X_test = X_test.reshape(-1, 1, X_test.shape[1])
    
    train_dataset = TensorDataset(torch.FloatTensor(X_train), torch.LongTensor(y_train))
    test_dataset = TensorDataset(torch.FloatTensor(X_test), torch.LongTensor(y_test))
    
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=batch_size)
    
    return train_loader, test_loader

def plot_training_curves(train_losses, val_losses, train_accs, val_accs, save_path):
    """绘制训练曲线"""
    import matplotlib.pyplot as plt
    
    plt.figure(figsize=(12, 5))
    
    # 绘制损失曲线
    plt.subplot(1, 2, 1)
    plt.plot(train_losses, label='训练损失')
    plt.plot(val_losses, label='验证损失')
    plt.title('损失曲线')
    plt.xlabel('轮次')
    plt.ylabel('损失')
    plt.legend()
    
    # 绘制准确率曲线
    plt.subplot(1, 2, 2)
    plt.plot(train_accs, label='训练准确率')
    plt.plot(val_accs, label='验证准确率')
    plt.title('准确率曲线')
    plt.xlabel('轮次')
    plt.ylabel('准确率')
    plt.legend()
    
    plt.tight_layout()
    plt.savefig(save_path)
    plt.close()

def plot_confusion_matrix(cm, class_names, save_path):
    """绘制混淆矩阵"""
    import matplotlib.pyplot as plt
    import seaborn as sns
    
    plt.figure(figsize=(10, 8))
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
                xticklabels=class_names,
                yticklabels=class_names)
    plt.title('混淆矩阵')
    plt.ylabel('真实标签')
    plt.xlabel('预测标签')
    plt.tight_layout()
    plt.savefig(save_path)
    plt.close()

def plot_tsne(features, labels, class_names, save_path):
    """绘制t-SNE可视化结果"""
    from sklearn.manifold import TSNE
    import matplotlib.pyplot as plt
    
    # 进行t-SNE降维
    tsne = TSNE(n_components=2, random_state=42)
    features_2d = tsne.fit_transform(features)
    
    # 绘制散点图
    plt.figure(figsize=(10, 8))
    for i, class_name in enumerate(class_names):
        mask = labels == i
        plt.scatter(features_2d[mask, 0], features_2d[mask, 1], label=class_name, alpha=0.7)
    
    plt.title('t-SNE特征可视化')
    plt.xlabel('t-SNE维度1')
    plt.ylabel('t-SNE维度2')
    plt.legend()
    plt.tight_layout()
    plt.savefig(save_path)
    plt.close()

def safe_divide(a, b):
    """安全除法，避免除以零"""
    return np.divide(a, b, out=np.zeros_like(a), where=b!=0)

def evaluate_model(model, data_loader, device):
    """评估模型性能
    
    Returns:
        tuple: (accuracy, confusion_matrix)
    """
    model.eval()
    correct = 0
    total = 0
    all_preds = []
    all_labels = []
    
    with torch.no_grad():
        for inputs, labels in data_loader:
            inputs = inputs.to(device)
            labels = labels.to(device)
            
            outputs = model(inputs)
            _, predicted = torch.max(outputs.data, 1)
            
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
            
            all_preds.extend(predicted.cpu().numpy())
            all_labels.extend(labels.cpu().numpy())
    
    accuracy = 100 * correct / total
    
    # 计算混淆矩阵
    from sklearn.metrics import confusion_matrix
    cm = confusion_matrix(all_labels, all_preds)
    
    return accuracy, cm 