import os
import torch
import torchvision
import torchvision.transforms as transforms
from torch import nn
from torch.utils.data import DataLoader, TensorDataset, ConcatDataset
import numpy as np
import matplotlib.pyplot as plt
from torchvision.utils import save_image
import shutil


# ========================= 创建目录结构 =========================
def create_directories():
    dirs = [
        "original_model",  # 原始模型文件夹
        "robust_model",    # 增强模型文件夹
        "adversarial_examples"  # 对抗样本示例文件夹
    ]
    for d in dirs:
        if not os.path.exists(d):
            os.makedirs(d)
            print(f"创建目录: {d}")


# ========================= 数据加载与预处理 =========================
def load_data(batch_size=64):
    transform = transforms.ToTensor()
    train_data = torchvision.datasets.MNIST(root='data', train=True, download=True, transform=transform)
    test_data = torchvision.datasets.MNIST(root='data', train=False, download=True, transform=transform)
    train_dataloader = DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True)
    test_dataloader = DataLoader(dataset=test_data, batch_size=batch_size)
    return train_dataloader, test_dataloader, train_data, test_data


# ========================= 模型定义 =========================
class LeNet(nn.Module):
    def __init__(self):
        super(LeNet, self).__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(1, 6, 3, stride=1, padding=1),
            nn.MaxPool2d(2, 2),
            nn.Conv2d(6, 16, 5, stride=1, padding=1),
            nn.MaxPool2d(2, 2)
        )
        self.fc = nn.Sequential(
            nn.Linear(576, 120),
            nn.Linear(120, 84),
            nn.Linear(84, 10)
        )

    def forward(self, x):
        out = self.conv(x)
        out = out.view(out.size(0), -1)
        out = self.fc(out)
        return out


# ========================= 训练函数 =========================
def train_model(network, train_dataloader, device, loss_fn, optimizer, epochs=10, 
               model_path="model.pth", loss_plot_path="loss.png"):
    network.train()
    losses = []
    for epoch in range(epochs):
        loss_sum = 0
        for X, y in train_dataloader:
            X, y = X.to(device), y.to(device)
            pred = network(X)
            loss = loss_fn(pred, y)
            loss_sum += loss.item()
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        mean_loss = loss_sum / len(train_dataloader.dataset)
        losses.append(mean_loss)
        print(f"Epoch {epoch + 1} loss: {mean_loss:.6f}")
    
    # 保存模型权重
    torch.save(network.state_dict(), model_path)
    print(f"模型权重已保存到 {model_path}")
    
    # 保存损失函数图像
    plt.figure(figsize=(10, 6))
    plt.plot(range(1, epochs + 1), losses, marker='o')
    plt.title("Training Loss")
    plt.xlabel("Epochs")
    plt.ylabel("Loss")
    plt.grid(True)
    plt.savefig(loss_plot_path)
    plt.close()
    print(f"损失函数图像已保存到 {loss_plot_path}")
    
    return losses


# ========================= 测试函数 =========================
def evaluate_model(network, test_dataloader, device, description=""):
    network.eval()
    positive, negative = 0, 0
    with torch.no_grad():
        for X, y in test_dataloader:
            X, y = X.to(device), y.to(device)
            pred = network(X)
            for item in zip(pred, y):
                if torch.argmax(item[0]) == item[1]:
                    positive += 1
                else:
                    negative += 1
    acc = positive / (positive + negative)
    print(f"{description} 准确率: {acc * 100:.2f}%")
    return acc


# ========================= 生成对抗样本 =========================
def generate_adversarial_samples(network, dataloader, device, loss_fn,
                                epsilons = [0.01, 0.03, 0.05, 0.07, 0.09, 0.11, 0.13]):
    network.eval()
    all_adv_images = []
    all_labels = []
    
    # 为每个epsilon生成一批对抗样本
    for eps_idx, epsilon in enumerate(epsilons):
        adv_images = []
        labels = []
        
        for batch_idx, (X, y) in enumerate(dataloader):
            X, y = X.to(device), y.to(device)
            X.requires_grad = True            
            # 前向传播
            pred = network(X)
            loss = loss_fn(pred, y)            
            # 反向传播
            network.zero_grad()
            loss.backward()            
            # 生成对抗样本
            X_adv = torch.clamp(X + epsilon * X.grad.sign(), 0, 1)            
            # 保存对抗样本
            adv_images.append(X_adv.detach().cpu())
            labels.append(y.cpu())
            # 仅保存第一批次的样本作为示例
            if batch_idx == 0:
                examples_dir = "adversarial_examples"
                # 保存原始图像(仅对第一个epsilon)
                if eps_idx == 0:
                    original_path = os.path.join(examples_dir, "original_images.png")
                    save_image(X.detach().cpu(), original_path, nrow=8)
                    print(f"原始图像示例已保存到 {original_path}")
                
                # 保存对抗样本
                adv_path = os.path.join(examples_dir, f"adversarial_epsilon_{epsilon:.2f}.png")
                save_image(X_adv.detach().cpu(), adv_path, nrow=8)
                print(f"对抗样本示例(epsilon={epsilon:.2f})已保存到 {adv_path}")
                
                # 保存扰动(放大5倍以便观察)
                perturbation = (X_adv - X).detach().cpu() * 5
                perturbation = torch.clamp(perturbation + 0.5, 0, 1)  # 调整对比度
                pert_path = os.path.join(examples_dir, f"perturbation_epsilon_{epsilon:.2f}.png")
                save_image(perturbation, pert_path, nrow=8)
                print(f"扰动示例(epsilon={epsilon:.2f})已保存到 {pert_path}")
            
            # 只处理部分数据以加快速度
            if batch_idx >= 20:  # 处理20个批次
                break
        
        # 添加当前epsilon的所有对抗样本
        all_adv_images.extend(adv_images)
        all_labels.extend(labels)
        
    # 将所有对抗样本合并为一个张量
    all_adv_images = torch.cat(all_adv_images, dim=0)
    all_labels = torch.cat(all_labels, dim=0)
    
    return all_adv_images, all_labels


# ========================= 创建带权重的增强数据集 =========================
def create_weighted_dataset(train_data, adv_images, adv_labels, batch_size=64, adv_weight=4.0):
    """
    创建带有权重平衡的增强数据集
    
    参数:
        train_data: 原始训练数据集
        adv_images: 对抗样本图像
        adv_labels: 对抗样本标签
        batch_size: 批次大小
        adv_weight: 对抗样本权重（增加对抗样本被采样的概率）
    """
    # 从原始训练数据中提取所有图像和标签
    train_loader = DataLoader(train_data, batch_size=len(train_data), shuffle=False)
    for imgs, labels in train_loader:  # 只会循环一次，获取所有数据
        train_images = imgs
        train_labels = labels
        break    
    # 确保对抗样本标签与原始标签格式一致
    if isinstance(adv_labels[0], torch.Tensor) and adv_labels[0].dim() == 0:
        adv_labels = adv_labels.long()  # 确保标签是LongTensor    
    # 创建合并数据集
    combined_images = torch.cat([train_images, adv_images], dim=0)
    combined_labels = torch.cat([train_labels, adv_labels], dim=0)
    combined_dataset = TensorDataset(combined_images, combined_labels)    
    # 创建样本权重：对抗样本权重设为adv_weight倍
    weights = torch.ones(len(combined_dataset))
    # 增加对抗样本的权重 (后半部分是对抗样本)
    weights[len(train_images):] = adv_weight
    # 创建加权采样器
    sampler = torch.utils.data.WeightedRandomSampler(
        weights, len(weights), replacement=True
    )    
    # 创建加权数据加载器（用于训练）
    weighted_dataloader = DataLoader(
        combined_dataset, batch_size=batch_size, sampler=sampler
    )
    print(f"原始数据: {len(train_images)}个样本")
    print(f"对抗样本: {len(adv_images)}个样本")
    print(f"对抗样本权重: {adv_weight}倍")
    print(f"增强数据集总计: {len(combined_dataset)}个样本")
    
    # 额外创建一个用于测试的增强数据集加载器（不带权重）
    test_enhanced_dataloader = DataLoader(
        combined_dataset, batch_size=batch_size, shuffle=False
    )
    
    return weighted_dataloader, test_enhanced_dataloader



# ========================= FGSM攻击测试 =========================
def test_fgsm_attack(network, test_dataloader, device, loss_fn, epsilons, 
                    output_dir, plot_filename="accuracy_vs_epsilon.png"):
    network.eval()
    acc_list = []
    
    for epsilon in epsilons:
        positive, negative = 0, 0
        for X, y in test_dataloader:
            X, y = X.to(device), y.to(device)
            X.requires_grad = True
            
            # 前向传播
            pred = network(X)
            loss = loss_fn(pred, y)
            
            # 反向传播
            network.zero_grad()
            loss.backward()
            
            # 生成对抗样本
            X_adv = torch.clamp(X + epsilon * X.grad.sign(), 0, 1)
            
            # 评估对抗样本上的准确率
            with torch.no_grad():
                pred_adv = network(X_adv)
                for p, t in zip(pred_adv, y):
                    if torch.argmax(p) == t:
                        positive += 1
                    else:
                        negative += 1
        
        # 计算准确率
        acc = positive / (positive + negative)
        acc_list.append(acc)
        print(f"Epsilon: {epsilon:.2f}, 准确率: {acc * 100:.2f}%")
    
    # 绘制并保存epsilon与准确率的关系图
    plt.figure(figsize=(10, 6))
    plt.plot(epsilons, acc_list, marker='o')
    plt.title("Accuracy Under FGSM")
    plt.xlabel("Epsilon")
    plt.ylabel("Accuracy")
    plt.ylim(0, 1)
    plt.grid(True)
    plot_path = os.path.join(output_dir, plot_filename)
    plt.savefig(plot_path)
    plt.close()
    print(f"准确率图像已保存到 {plot_path}")
    
    return acc_list


# ========================= 主函数 =========================
def main():
    # 创建目录结构
    create_directories()
    
    # 加载数据
    batch_size = 64
    train_dataloader, test_dataloader, train_data, test_data = load_data(batch_size)
    
    # 设置设备
    device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
    print(f"使用设备: {device}")
    
    # 创建损失函数
    loss_fn = nn.CrossEntropyLoss()
    
    # 设置FGSM攻击的epsilon值
    epsilons = [0.01, 0.05, 0.09, 0.13, 0.17, 0.2]
    
    # ===== 步骤1: 训练原始模型 =====
    original_model_path = os.path.join("original_model", "model.pth")
    original_loss_plot = os.path.join("original_model", "loss.png")
    
    if os.path.exists(original_model_path):
        # 加载已有的原始模型
        original_model = LeNet().to(device)
        original_model.load_state_dict(torch.load(original_model_path))
        print(f"加载已有的原始模型: {original_model_path}")
    else:
        # 训练原始模型
        print("训练原始模型...")
        original_model = LeNet().to(device)
        original_optimizer = torch.optim.SGD(original_model.parameters(), lr=0.001, momentum=0.9)
        train_model(original_model, train_dataloader, device, loss_fn, original_optimizer, 
                   model_path=original_model_path, loss_plot_path=original_loss_plot)
    
    # 评估原始模型在干净测试数据上的性能
    print("\n===== 原始模型评估 =====")
    orig_clean_acc = evaluate_model(original_model, test_dataloader, device, 
                                   description="原始模型在干净测试数据上")
    
    # 对原始模型进行FGSM攻击测试
    print("\n===== 原始模型在FGSM攻击下的性能 =====")
    orig_attack_accs = test_fgsm_attack(original_model, test_dataloader, device, loss_fn,
                                      epsilons, "original_model")
    
    # ===== 步骤2: 生成对抗样本 =====
    print("\n===== 生成对抗样本 =====")
    adv_images, adv_labels = generate_adversarial_samples(
        original_model, train_dataloader, device, loss_fn)
    print(f"生成的对抗样本数量: {len(adv_images)}")
    

    # ===== 步骤3: 创建权重平衡的增强数据集 =====
    print("\n===== 创建权重平衡的增强数据集 =====")
    adv_weight = 4.0  # 设置对抗样本的权重为原始样本的8倍
    enhanced_train_dataloader, test_enhanced_dataloader = create_weighted_dataset(
        train_data, adv_images, adv_labels, batch_size, adv_weight)
    
        # ===== 步骤4: 训练增强模型 =====
    robust_model_path = os.path.join("robust_model", "model.pth")
    robust_loss_plot = os.path.join("robust_model", "loss.png")
    
    print("\n===== 训练增强模型 =====")
    robust_model = LeNet().to(device)
    # 使用较小的学习率并添加权重衰减，提高稳定性
    #robust_optimizer = torch.optim.SGD(robust_model.parameters(), lr=0.001, momentum=0.9, weight_decay=5e-5)
    robust_optimizer = torch.optim.Adam(robust_model.parameters(), lr=0.001, weight_decay=1e-5)
    # 增加训练轮数
    train_model(robust_model, enhanced_train_dataloader, device, loss_fn, robust_optimizer,
              epochs=20, model_path=robust_model_path, loss_plot_path=robust_loss_plot) 
    
    # ===== 步骤5: 评估增强模型 =====
    print("\n===== 增强模型评估 =====")
    # 在干净测试数据上评估
    robust_clean_acc = evaluate_model(robust_model, test_dataloader, device,
                                     description="增强模型在干净测试数据上")
    # ===== 评估原始模型在增强数据集上的性能 =====
    print("\n===== 原始模型在增强数据集上的性能 =====")
    orig_enhanced_acc = evaluate_model(original_model, test_enhanced_dataloader, device,
                                    description="原始模型在增强数据集上")
    
    # ===== 评估增强模型在增强数据集上的性能 =====
    print("\n===== 增强模型在增强数据集上的性能 =====")
    robust_enhanced_acc = evaluate_model(robust_model, test_enhanced_dataloader, device,
                                      description="增强模型在增强数据集上")

    # 对增强模型进行FGSM攻击测试
    print("\n===== 增强模型在FGSM攻击下的性能 =====")
    robust_attack_accs = test_fgsm_attack(robust_model, test_dataloader, device, loss_fn,
                                        epsilons, "robust_model")
    
    # ===== 步骤6: 生成比较图 =====
    plt.figure(figsize=(10, 6))
    plt.plot(epsilons, orig_attack_accs, 'bo-', label='original model')
    plt.plot(epsilons, robust_attack_accs, 'ro-', label='robust model')
    plt.title("Model Comparison Under FGSM Attack")
    plt.xlabel("Epsilon")
    plt.ylabel("Accuracy")
    plt.ylim(0, 1)
    plt.legend()
    plt.grid(True)
    comparison_path = "model_comparison.png"
    plt.savefig(comparison_path)
    plt.close()
    print(f"模型对比图已保存到 {comparison_path}")
    
    # 汇总结果
        # 汇总结果
    print("\n===== 实验结果汇总 =====")
    print(f"原始模型在干净测试数据上的准确率: {orig_clean_acc * 100:.2f}%")
    print(f"增强模型在干净测试数据上的准确率: {robust_clean_acc * 100:.2f}%")
    print(f"原始模型在增强数据集上的准确率: {orig_enhanced_acc * 100:.2f}%")
    print(f"增强模型在增强数据集上的准确率: {robust_enhanced_acc * 100:.2f}%")
    
    print("\nFGSM攻击下的准确率比较:")
    for i, eps in enumerate(epsilons):
        print(f"Epsilon={eps:.2f}: 原始模型={orig_attack_accs[i]*100:.2f}%, "
              f"增强模型={robust_attack_accs[i]*100:.2f}%")


if __name__ == "__main__":
    main()