import os
import argparse
import torch
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from cnn_model import CNNModel, get_model
from utils import load_and_preprocess_data, evaluate_model
from sklearn.manifold import TSNE
from sklearn.preprocessing import StandardScaler
from torch.utils.data import DataLoader, TensorDataset
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix
from gan_model import Generator as VanillaGenerator
from wgan_model import Generator as WGANGenerator
import json
import torch.nn as nn

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号

def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--use_gan", action="store_true", help="是否使用GAN生成的样本")
    parser.add_argument("--gan_type", type=str, default="vanilla", 
                       choices=["vanilla", "wgan", "cgan", "wgan_gp"], help="GAN类型")
    parser.add_argument("--model_path", type=str, default=None, help="CNN模型路径")
    parser.add_argument("--gan_model_dir", type=str, default="./models", help="GAN模型目录")
    parser.add_argument("--batch_size", type=int, default=64, help="批次大小")
    parser.add_argument("--signal_length", type=int, default=1200, help="信号长度")
    parser.add_argument("--latent_dim", type=int, default=100, help="潜在空间维度")
    parser.add_argument("--n_samples", type=int, default=1000, help="每个类别生成的样本数")
    parser.add_argument("--use_sim", action="store_true", help="是否使用仿真数据")
    return parser.parse_args()

def safe_divide(a, b, fill_value=0.0):
    """安全除法，处理除以零的情况"""
    return np.divide(a, b, out=np.full_like(a, fill_value, dtype=float), where=b!=0)

def plot_confusion_matrix(cm, class_names, save_path):
    """绘制混淆矩阵"""
    plt.figure(figsize=(12, 10))
    
    # 计算每个类别的准确率，处理零样本情况
    class_acc = safe_divide(cm.diagonal(), cm.sum(axis=1)) * 100
    
    # 创建标签，包含类别名称、样本数和准确率
    labels = []
    for i, (name, acc, total) in enumerate(zip(class_names, class_acc, cm.sum(axis=1))):
        if total == 0:
            labels.append(f"{name}\n(无样本)")
        else:
            labels.append(f"{name}\n({total}样本, {acc:.1f}%)")
    
    # 绘制热力图
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
                xticklabels=labels,
                yticklabels=labels)
    
    plt.title('混淆矩阵 (每个类别的样本数和准确率显示在标签中)', pad=20)
    plt.ylabel('真实标签')
    plt.xlabel('预测标签')
    
    # 调整布局，确保标签完全显示
    plt.tight_layout()
    
    # 保存图片，设置更高的DPI以获得更好的质量
    plt.savefig(save_path, dpi=300, bbox_inches='tight')
    plt.close()

def plot_training_curves(train_losses, val_losses, train_accs, val_accs, save_path):
    """绘制训练曲线
    
    Args:
        train_losses (list): 训练损失历史
        val_losses (list): 验证损失历史
        train_accs (list): 训练准确率历史
        val_accs (list): 验证准确率历史
        save_path (str): 图像保存路径
    """
    plt.figure(figsize=(12, 5))
    
    # 绘制损失曲线
    plt.subplot(1, 2, 1)
    plt.plot(train_losses, label='训练损失', color='blue')
    plt.plot(val_losses, label='验证损失', color='red')
    plt.xlabel('轮次')
    plt.ylabel('损失')
    plt.title('训练和验证损失曲线')
    plt.legend()
    plt.grid(True)
    
    # 绘制准确率曲线
    plt.subplot(1, 2, 2)
    plt.plot(train_accs, label='训练准确率', color='blue')
    plt.plot(val_accs, label='验证准确率', color='red')
    plt.xlabel('轮次')
    plt.ylabel('准确率 (%)')
    plt.title('训练和验证准确率曲线')
    plt.legend()
    plt.grid(True)
    
    plt.tight_layout()
    plt.savefig(save_path, dpi=300, bbox_inches='tight')
    plt.close()

def extract_features(model, data_loader, device):
    """提取模型的特征
    
    Args:
        model: CNN模型
        data_loader: 数据加载器
        device: 设备
    
    Returns:
        tuple: (特征, 标签)
    """
    model.eval()
    features = []
    labels = []
    
    with torch.no_grad():
        for inputs, targets in data_loader:
            inputs = inputs.to(device)
            # 获取倒数第二层的输出作为特征
            outputs = model(inputs, return_features=True)
            features.append(outputs.cpu().numpy())
            labels.extend(targets.numpy())
    
    features = np.concatenate(features, axis=0)
    labels = np.array(labels)
    return features, labels

def plot_tsne(features, labels, class_names, save_path):
    """使用t-SNE可视化特征分布
    
    Args:
        features (numpy.ndarray): 特征矩阵
        labels (numpy.ndarray): 标签
        class_names (list): 类别名称列表
        save_path (str): 图像保存路径
    """
    # 标准化特征
    scaler = StandardScaler()
    features_scaled = scaler.fit_transform(features)
    
    # 使用t-SNE降维
    tsne = TSNE(n_components=2, random_state=42)
    features_tsne = tsne.fit_transform(features_scaled)
    
    # 绘制散点图
    plt.figure(figsize=(10, 8))
    scatter = plt.scatter(
        features_tsne[:, 0],
        features_tsne[:, 1],
        c=labels,
        cmap='tab10',
        alpha=0.6
    )
    
    # 添加图例
    legend1 = plt.legend(
        *scatter.legend_elements(),
        title="类别",
        loc="upper right"
    )
    plt.gca().add_artist(legend1)
    
    # 添加类别标签
    for i, class_name in enumerate(class_names):
        # 计算每个类别的中心点
        mask = labels == i
        if np.any(mask):
            center = np.mean(features_tsne[mask], axis=0)
            plt.annotate(
                class_name,
                center,
                xytext=(5, 5),
                textcoords='offset points',
                ha='left',
                va='bottom',
                bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),
                arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0')
            )
    
    plt.title('t-SNE特征可视化')
    plt.xlabel('t-SNE维度1')
    plt.ylabel('t-SNE维度2')
    plt.grid(True)
    
    # 保存图像
    plt.savefig(save_path, dpi=300, bbox_inches='tight')
    plt.close()

def generate_samples(generator, n_samples, latent_dim, device):
    """生成样本"""
    generator.eval()
    with torch.no_grad():
        z = torch.randn(n_samples, latent_dim).to(device)
        samples = generator(z).cpu().numpy()
    return samples

def evaluate_model(model, test_loader, criterion, device):
    """评估模型性能"""
    model.eval()
    total_loss = 0
    correct = 0
    total = 0
    all_preds = []
    all_labels = []
    
    with torch.no_grad():
        for inputs, labels in test_loader:
            inputs, labels = inputs.to(device), labels.to(device)
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            
            total_loss += loss.item()
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
            
            all_preds.extend(predicted.cpu().numpy())
            all_labels.extend(labels.cpu().numpy())
    
    avg_loss = total_loss / len(test_loader)
    accuracy = (correct / total) * 100  # 转换为百分比格式
    
    return avg_loss, accuracy, all_preds, all_labels

def main():
    args = parse_args()
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    
    # 根据模型类型创建保存目录
    if args.use_sim:
        save_dir = os.path.join("./diagnosis_models/cnn", "sim")
    elif args.use_gan:
        save_dir = os.path.join("./diagnosis_models/cnn", args.gan_type)
    else:
        save_dir = os.path.join("./diagnosis_models/cnn", "no_gan")
    os.makedirs(save_dir, exist_ok=True)
    
    # 如果没有指定模型路径，使用默认路径
    if args.model_path is None:
        args.model_path = os.path.join(save_dir, "best_model.pth")
    
    # 检查模型文件是否存在
    if not os.path.exists(args.model_path):
        raise FileNotFoundError(f"找不到模型文件：{args.model_path}")
    
    # 加载数据
    X_train, X_test, y_train, y_test = load_and_preprocess_data(
        use_gan=args.use_gan,
        gan_type=args.gan_type,
        use_sim=args.use_sim
    )
    
    # 获取类别名称
    data_path = "./cwru_prepro"
    class_files = [f for f in os.listdir(data_path) if f.endswith('.npy')]
    class_names = [f.replace('.npy', '') for f in class_files]
    
    # 准备数据加载器
    test_dataset = TensorDataset(torch.FloatTensor(X_test.reshape(-1, 1, X_test.shape[1])), torch.LongTensor(y_test))
    test_loader = DataLoader(test_dataset, batch_size=args.batch_size)
    
    # 加载CNN模型
    model = get_model(num_classes=len(np.unique(y_train)), device=device)
    
    # 加载模型状态
    checkpoint = torch.load(args.model_path)
    if isinstance(checkpoint, dict) and 'model_state_dict' in checkpoint:
        model.load_state_dict(checkpoint['model_state_dict'])
    else:
        model.load_state_dict(checkpoint)
    
    # 定义损失函数
    criterion = nn.CrossEntropyLoss()
    
    # 评估模型
    print("\n评估模型...")
    test_loss, test_acc, test_preds, test_labels = evaluate_model(model, test_loader, criterion, device)
    print(f"测试集准确率: {test_acc:.2f}%")
    
    # 计算混淆矩阵
    cm = confusion_matrix(test_labels, test_preds)
    
    # 绘制混淆矩阵
    plt.figure(figsize=(10, 8))
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
                xticklabels=class_names,
                yticklabels=class_names)
    plt.title('混淆矩阵')
    plt.xlabel('预测标签')
    plt.ylabel('真实标签')
    plt.tight_layout()
    plt.savefig(os.path.join(save_dir, 'confusion_matrix.png'))
    plt.close()
    
    # 保存评估结果
    results = {
        'test_accuracy': test_acc,
        'test_loss': test_loss,
        'confusion_matrix': cm.tolist(),
        'class_names': class_names,
        'model_type': 'sim' if args.use_sim else 'gan' if args.use_gan else 'real',
        'gan_type': args.gan_type if args.use_gan else None,
        'use_sim': args.use_sim
    }
    
    with open(os.path.join(save_dir, 'evaluation_results.json'), 'w') as f:
        json.dump(results, f, indent=4)

if __name__ == "__main__":
    main() 