import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
from sklearn.metrics import mean_squared_error
from dataLoader import load_datasets
from model import NetMNIST, NetCIFAR
from GPDC import gpdc
import matplotlib.pyplot as plt
import os
import concurrent.futures

# 设置设备为 GPU，如果不可用，则使用 CPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")

# 确保保存结果的文件夹存在
if not os.path.exists('result'):
    os.makedirs('result')

def evaluate(model, test_loader):
    correct = 0
    total = 0
    model.eval()
    with torch.no_grad():
        for data in test_loader:
            images, labels = data
            images, labels = images.to(device), labels.to(device)  # 将数据迁移到GPU
            outputs = model(images)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    accuracy = 100 * correct / total
    print(f'Accuracy of the network on the test images: {accuracy}%')
    return accuracy


# 攻击函数
def random_gradient_attack(model, test_loader, noise_scale=0.1):
    attacked_gradients = []
    model.to(device)  # 确保模型在GPU上
    for data, target in test_loader:
        data, target = data.to(device), target.to(device)  # 将数据迁移到GPU

        # 前向传播并计算损失
        output = model(data)
        loss = F.cross_entropy(output, target)

        # 计算梯度并清零梯度
        model.zero_grad()
        loss.backward()

        # 获取模型参数的梯度
        gradients = [param.grad.data for param in model.parameters() if param.grad is not None]

        # 对梯度添加随机噪声（模拟攻击）
        attacked_gradients_batch = []
        for grad in gradients:
            # 在梯度上添加随机噪声
            noisy_grad = grad + torch.randn_like(grad) * noise_scale
            attacked_gradients_batch.append(noisy_grad)

        attacked_gradients.append(attacked_gradients_batch)

    return attacked_gradients


def gradient_compression(model, dataloader, epochs, compression_ratio=0.5):
    model.train()
    model.to(device)
    optimizer = optim.SGD(model.parameters(), lr=0.01)

    for epoch in range(epochs):
        total_loss = 0
        for data, target in dataloader:
            data, target = data.to(device), target.to(device)
            optimizer.zero_grad()

            # 前向传播并计算损失
            output = model(data)
            loss = F.cross_entropy(output, target)
            loss.backward()

            # 获取梯度
            gradients = [param.grad.data for param in model.parameters() if param.grad is not None]

            # 逐个压缩梯度
            compressed_gradients = []
            for grad in gradients:
                max_grad = torch.max(torch.abs(grad))  # 获取当前梯度的最大值
                threshold = max_grad * compression_ratio  # 设置压缩阈值
                compressed_grad = torch.where(torch.abs(grad) < threshold, torch.zeros_like(grad), grad)
                compressed_gradients.append(compressed_grad)

            # 更新模型参数
            for param, compressed_grad in zip(model.parameters(), compressed_gradients):
                if param.grad is not None:
                    param.grad = compressed_grad

            optimizer.step()

            total_loss += loss.item()

        print(f'Epoch {epoch + 1}, Loss: {total_loss / len(dataloader)}')


def differential_privacy(model, dataloader, epochs, noise_scale=0.1):
    model.train()
    model.to(device)
    optimizer = optim.SGD(model.parameters(), lr=0.01)

    for epoch in range(epochs):
        total_loss = 0
        for data, target in dataloader:
            data, target = data.to(device), target.to(device)
            optimizer.zero_grad()

            # 前向传播并计算损失
            output = model(data)
            loss = F.cross_entropy(output, target)
            loss.backward()

            # 获取梯度并添加噪声
            gradients = [param.grad.data for param in model.parameters() if param.grad is not None]
            noisy_gradients = []

            # 添加噪声
            for grad in gradients:
                noisy_grad = grad + torch.randn_like(grad) * noise_scale
                noisy_gradients.append(noisy_grad)

            # 使用噪声梯度更新参数
            for param, noisy_grad in zip(model.parameters(), noisy_gradients):
                if param.grad is not None:
                    param.grad = noisy_grad

            # 进行参数更新
            optimizer.step()

            total_loss += loss.item()

        print(f'Epoch {epoch + 1}, Loss: {total_loss / len(dataloader)}')


# 防御方法：基于表示的防御

def defense_from_representation(model, dataloader, epochs, perturbation_scale=0.05, max_grad_norm=1.0):
    model.train()
    model.to(device)
    optimizer = optim.SGD(model.parameters(), lr=0.01)

    for epoch in range(epochs):
        total_loss = 0
        for data, target in dataloader:
            data, target = data.to(device), target.to(device)
            optimizer.zero_grad()

            # 前向传播
            output = model(data)
            loss = F.cross_entropy(output, target)
            loss.backward()

            # 获取并扰动梯度
            gradients = [param.grad.data for param in model.parameters() if param.grad is not None]
            perturbed_gradients = []
            for grad in gradients:
                perturbed_grad = grad + torch.randn_like(grad) * perturbation_scale
                perturbed_gradients.append(perturbed_grad)

            # 用扰动后的梯度更新模型参数
            for param, perturbed_grad in zip(model.parameters(), perturbed_gradients):
                if param.grad is not None:
                    param.grad = perturbed_grad

            # 进行梯度裁剪，防止梯度爆炸
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)

            # 更新模型
            optimizer.step()
            total_loss += loss.item()

        print(f'Epoch {epoch + 1}, Loss: {total_loss / len(dataloader)}')

def plot_results(dataset, method_results, filename):
    """
    绘制多个防御方法的结果图表并保存

    Args:
        dataset (str): 数据集名称，例如 "MNIST" 或 "CIFAR10"。
        method_results (dict): 方法名称到 (MSE_list, accuracy_list) 的映射。
        filename (str): 保存图表的文件路径。
    """
    os.makedirs('result', exist_ok=True)

    fig, ax = plt.subplots(figsize=(10, 6))

    # 绘制每个防御方法的折线图
    for method, (mse_list, accuracy_list) in method_results.items():
        ax.plot(mse_list, accuracy_list, marker='o', label=method)  # 使用折线图，并添加圆圈标记

    # 设置图表标题和标签
    ax.set_title(f"{dataset} - Model Accuracy vs MSE", fontsize=14)
    ax.set_xlabel("MSE", fontsize=12)
    ax.set_ylabel("Model Accuracy (%)", fontsize=12)
    ax.legend()
    ax.grid(True)  # 添加网格，便于查看

    # 保存图表
    plt.savefig(filename, bbox_inches='tight')
    plt.close()


def run_experiment(model, train_loader, test_loader, method, epochs=10):
    mse_list = []
    accuracy_list = []

    # 根据不同方法进行训练
    defense_methods = {
        'GPDC': gpdc,
        'Gradient Compression': gradient_compression,
        'Differential Privacy': differential_privacy,
        'Defense from Representation': defense_from_representation
    }

    # 检查防御方法并进行训练
    if method in defense_methods:
        print(f"Training with {method}...")
        defense_methods[method](model, train_loader, epochs=epochs)
    else:
        print(f"Invalid defense method: {method}")
        return None, None

    # 在每个epoch进行攻击与评估
    for epoch in range(epochs):
        # 计算梯度攻击结果
        attacked_gradients = random_gradient_attack(model, test_loader)

        # 计算 MSE：平均梯度的均方误差
        mse = np.mean(
            [mean_squared_error(grad.flatten().cpu().numpy(), np.zeros_like(grad.flatten().cpu().numpy()))
             for grads in attacked_gradients for grad in grads]
        )

        # 评估模型在测试集上的准确率
        accuracy = evaluate(model, test_loader)

        mse_list.append(mse)
        accuracy_list.append(accuracy)

        # 打印调试信息
        print(f"Epoch {epoch + 1} - Accuracy: {accuracy}%")
        print(f"Epoch {epoch + 1} - MSE: {mse}")

    return mse_list, accuracy_list


def run_and_plot_experiment(model, train_loader, test_loader, experiment_name, methods, save_path):
    results = {}

    # 运行每个防御方法
    for method in methods:
        print(f"Running {experiment_name} with {method}...")
        results[method] = run_experiment(model, train_loader, test_loader, method)

    # 绘制并保存图表
    print(f"Saving {experiment_name} performance results...")
    plot_results(experiment_name, results, save_path)

    return results


def experiment_and_analysis():
    # 数据集和模型配置
    datasets = {
        'MNIST': ('mnist', NetMNIST),
        'CIFAR10': ('cifar', NetCIFAR)
    }

    methods = ['GPDC', 'Gradient Compression', 'Differential Privacy', 'Defense from Representation']

    # 创建结果保存目录
    if not os.path.exists('result'):
        os.makedirs('result')

    # 执行 MNIST 和 CIFAR10 实验
    for dataset_name, (dataset_type, model_class) in datasets.items():
        print(f"Loading {dataset_name} dataset...")
        # 加载数据集
        train_loader, test_loader = load_datasets(dataset_type, subset_size=9000)

        # 初始化模型
        model = model_class().to(device)

        # 执行实验并保存结果
        experiment_name = f"{dataset_name} Experiment"
        save_path = f"result/{dataset_name}_performance.png"
        results = run_and_plot_experiment(model, train_loader, test_loader, experiment_name, methods, save_path)

        # 根据数据集存储实验结果
        if dataset_name == 'MNIST':
            mnist_results = results
        elif dataset_name == 'CIFAR10':
            cifar_results = results

    print("Experiments completed and results saved.")
    return mnist_results, cifar_results

# 运行实验
experiment_and_analysis()
