import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset, Subset, TensorDataset
import torchvision
import torchvision.transforms as transforms
import numpy as np
import matplotlib.pyplot as plt
import copy


# ==================== 1. 改进的数据集准备 ====================
class CollaborativeDataset:
    """为协作学习准备数据集"""

    def __init__(self, dataset_name='mnist', data_root='./data'):
        self.dataset_name = dataset_name

        if dataset_name == 'mnist':
            # 使用更好的数据预处理
            transform = transforms.Compose([
                transforms.Resize(28),  # 保持MNIST原始大小
                transforms.ToTensor(),
                transforms.Normalize((0.5,), (0.5,))
            ])

            self.train_dataset = torchvision.datasets.MNIST(
                root=data_root, train=True, download=True, transform=transform
            )
            self.test_dataset = torchvision.datasets.MNIST(
                root=data_root, train=False, download=True, transform=transform
            )
            self.num_classes = 10
            self.channels = 1
            self.image_size = 28

    def split_dataset_by_labels(self, labels_dict):
        """根据标签分割数据集"""
        datasets = {}

        for participant, labels in labels_dict.items():
            indices = []
            for idx in range(len(self.train_dataset)):
                _, label = self.train_dataset[idx]
                if label in labels:
                    indices.append(idx)
            datasets[participant] = Subset(self.train_dataset, indices)

        return datasets


# ==================== 2. 改进的模型架构 ====================
class CNNDiscriminator(nn.Module):
    """基于论文架构的CNN判别器"""

    def __init__(self, num_classes=11, input_channels=1):
        super(CNNDiscriminator, self).__init__()

        # 输入: 1 x 28 x 28
        self.conv1 = nn.Conv2d(input_channels, 32, kernel_size=5, stride=1, padding=2)
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
        # 输出: 32 x 14 x 14

        self.conv2 = nn.Conv2d(32, 64, kernel_size=5, stride=1, padding=2)
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
        # 输出: 64 x 7 x 7

        self.flatten_size = 64 * 7 * 7

        self.fc1 = nn.Linear(self.flatten_size, 200)
        self.fc2 = nn.Linear(200, num_classes)

        # 添加Dropout提高鲁棒性
        self.dropout = nn.Dropout(0.5)

    def forward(self, x):
        x = torch.tanh(self.conv1(x))
        x = self.pool1(x)

        x = torch.tanh(self.conv2(x))
        x = self.pool2(x)

        x = x.view(x.size(0), -1)

        x = torch.tanh(self.fc1(x))
        x = self.dropout(x)
        x = self.fc2(x)

        return F.log_softmax(x, dim=1)

    def get_features(self, x):
        """提取特征用于GAN训练"""
        x = torch.tanh(self.conv1(x))
        x = self.pool1(x)

        x = torch.tanh(self.conv2(x))
        x = self.pool2(x)

        x = x.view(x.size(0), -1)
        x = torch.tanh(self.fc1(x))

        return x


class DCGANGenerator(nn.Module):
    """DCGAN生成器"""

    def __init__(self, nz=100, ngf=64, nc=1):
        super(DCGANGenerator, self).__init__()

        self.nz = nz

        # 为28x28设计的生成器架构
        self.main = nn.Sequential(
            # 输入: nz x 1 x 1
            nn.ConvTranspose2d(nz, ngf * 4, 7, 1, 0, bias=False),
            nn.BatchNorm2d(ngf * 4),
            nn.ReLU(True),
            # 状态: (ngf*4) x 7 x 7

            nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf * 2),
            nn.ReLU(True),
            # 状态: (ngf*2) x 14 x 14

            nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ngf),
            nn.ReLU(True),
            # 状态: ngf x 28 x 28

            nn.Conv2d(ngf, nc, 3, 1, 1, bias=False),
            nn.Tanh()
            # 输出: nc x 28 x 28
        )

        self._initialize_weights()

    def _initialize_weights(self):
        """权重初始化"""
        for m in self.modules():
            if isinstance(m, (nn.ConvTranspose2d, nn.Conv2d)):
                nn.init.normal_(m.weight.data, 0.0, 0.02)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.normal_(m.weight.data, 1.0, 0.02)
                nn.init.constant_(m.bias.data, 0)

    def forward(self, z):
        return self.main(z)


# ==================== 3. 参数服务器 ====================
class ParameterServer:
    """参数服务器"""

    def __init__(self, model):
        self.global_model = copy.deepcopy(model)
        self.parameters = {name: param.data.clone().cpu()
                           for name, param in model.named_parameters()}

    def receive_update(self, participant_name, local_parameters,
                       upload_fraction=1.0):
        """接收参数更新"""
        if upload_fraction < 1.0:
            selected_params = self._select_parameters(local_parameters, upload_fraction)
        else:
            selected_params = local_parameters

        for name, param in selected_params.items():
            if name in self.parameters:
                self.parameters[name] = param.clone().cpu()

    def send_update(self, download_fraction=1.0):
        """发送参数"""
        if download_fraction < 1.0:
            param_names = list(self.parameters.keys())
            num_params = max(1, int(len(param_names) * download_fraction))
            selected_names = np.random.choice(param_names, num_params, replace=False)
            return {name: self.parameters[name].clone() for name in selected_names}
        else:
            return {name: param.clone() for name, param in self.parameters.items()}

    def _select_parameters(self, parameters, fraction):
        """随机选择参数"""
        param_names = list(parameters.keys())
        num_params = max(1, int(len(param_names) * fraction))
        selected_names = np.random.choice(param_names, num_params, replace=False)
        return {name: parameters[name] for name in selected_names}


# ==================== 4. 参与者类 ====================
class HonestParticipant:
    """诚实参与者"""

    def __init__(self, name, model, dataset, device='cpu',
                 learning_rate=0.001, batch_size=64):
        self.name = name
        self.device = device
        self.local_model = copy.deepcopy(model).to(device)
        self.dataset = dataset
        self.dataloader = DataLoader(
            dataset,
            batch_size=batch_size,
            shuffle=True,
            num_workers=0
        )

        self.optimizer = optim.SGD(
            self.local_model.parameters(),
            lr=learning_rate,
            momentum=0.9,
            weight_decay=1e-4
        )
        self.criterion = nn.NLLLoss()

    def local_train(self, epochs=1):
        """本地训练"""
        self.local_model.train()
        total_loss = 0.0
        correct = 0
        total = 0

        for epoch in range(epochs):
            for data, target in self.dataloader:
                data = data.to(self.device)
                target = target.to(self.device)

                self.optimizer.zero_grad()
                output = self.local_model(data)
                loss = self.criterion(output, target)
                loss.backward()
                self.optimizer.step()

                total_loss += loss.item()
                pred = output.argmax(dim=1)
                correct += pred.eq(target).sum().item()
                total += target.size(0)

        accuracy = 100.0 * correct / total if total > 0 else 0.0
        avg_loss = total_loss / max(len(self.dataloader) * epochs, 1)

        return {'loss': avg_loss, 'accuracy': accuracy}

    def get_parameters(self):
        """获取参数"""
        return {name: param.data.clone().cpu()
                for name, param in self.local_model.named_parameters()}

    def update_parameters(self, new_parameters):
        """更新参数"""
        with torch.no_grad():
            for name, param in self.local_model.named_parameters():
                if name in new_parameters:
                    param.data.copy_(new_parameters[name].to(self.device))


class AdversarialParticipant(HonestParticipant):
    """恶意参与者（使用GAN攻击）"""

    def __init__(self, name, model, dataset, target_class, fake_class,
                 device='cpu', learning_rate=0.001, batch_size=64, nz=100):
        super().__init__(name, model, dataset, device, learning_rate, batch_size)

        self.target_class = target_class
        self.fake_class = fake_class
        self.nz = nz

        # 创建生成器
        self.generator = DCGANGenerator(nz=nz, ngf=64, nc=1).to(device)
        self.g_optimizer = optim.Adam(
            self.generator.parameters(),
            lr=0.0002,
            betas=(0.5, 0.999)
        )

        # 固定噪声用于可视化
        self.fixed_noise = torch.randn(64, nz, 1, 1, device=device)

        # 存储生成的样本
        self.generated_data = []
        self.generated_labels = []

        # GAN训练历史
        self.gan_losses = []

    def train_gan(self, num_iterations=100, batch_size=64):
        """改进的GAN训练"""
        self.generator.train()
        discriminator = copy.deepcopy(self.local_model)
        discriminator.eval()

        total_g_loss = 0.0
        best_loss = float('inf')
        patience = 0
        max_patience = 20

        for iteration in range(num_iterations):
            # 生成假样本
            noise = torch.randn(batch_size, self.nz, 1, 1, device=self.device)
            fake_images = self.generator(noise)

            # 计算判别器输出
            self.g_optimizer.zero_grad()

            with torch.set_grad_enabled(True):
                fake_images.requires_grad_(True)
                outputs = discriminator(fake_images)

                # 目标：让生成样本被分类为目标类别
                target_labels = torch.full(
                    (batch_size,),
                    self.target_class,
                    dtype=torch.long,
                    device=self.device
                )

                # 使用交叉熵损失
                g_loss = F.nll_loss(outputs, target_labels)

                # 添加特征匹配损失（论文中提到的技巧）
                with torch.no_grad():
                    real_features = self._get_real_features(discriminator)
                if real_features is not None:
                    fake_features = discriminator.get_features(fake_images)
                    feature_loss = F.mse_loss(fake_features.mean(0), real_features)
                    g_loss = g_loss + 0.1 * feature_loss

                g_loss.backward()
                self.g_optimizer.step()

                total_g_loss += g_loss.item()

            # 早停机制
            if g_loss.item() < best_loss:
                best_loss = g_loss.item()
                patience = 0
            else:
                patience += 1
                if patience >= max_patience:
                    break

        avg_loss = total_g_loss / max(iteration + 1, 1)
        self.gan_losses.append(avg_loss)

        return avg_loss

    def _get_real_features(self, discriminator):
        """获取真实样本的特征"""
        # 从victim的数据中采样（这里用自己的数据近似）
        try:
            data_iter = iter(self.dataloader)
            real_data, _ = next(data_iter)
            real_data = real_data.to(self.device)

            with torch.no_grad():
                real_features = discriminator.get_features(real_data).mean(0)

            return real_features
        except:
            return None

    def generate_fake_samples(self, num_samples=1000):
        """生成高质量假样本"""
        self.generator.eval()
        fake_images = []
        fake_labels = []

        with torch.no_grad():
            num_batches = (num_samples + 63) // 64
            for _ in range(num_batches):
                batch_size = min(64, num_samples - len(fake_images) * 64)
                if batch_size <= 0:
                    break

                noise = torch.randn(batch_size, self.nz, 1, 1, device=self.device)
                samples = self.generator(noise)

                # 验证生成样本的质量
                outputs = self.local_model(samples)
                probs = torch.exp(outputs)
                confidence = probs[:, self.target_class]

                # 只保留高置信度的样本
                high_conf_mask = confidence > 0.5
                if high_conf_mask.sum() > 0:
                    fake_images.append(samples[high_conf_mask].cpu())
                    fake_labels.extend([self.fake_class] * high_conf_mask.sum().item())

        if len(fake_images) > 0:
            fake_images = torch.cat(fake_images, dim=0)[:num_samples]
            fake_labels = fake_labels[:num_samples]

            self.generated_data = fake_images
            self.generated_labels = torch.tensor(fake_labels)

            return fake_images, fake_labels

        return None, None

    def local_train(self, epochs=1):
        """使用真实+生成数据训练"""
        self.local_model.train()

        # 准备数据
        if len(self.generated_data) > 0:
            # 收集真实数据
            real_data_list = []
            real_labels_list = []
            for data, label in self.dataset:
                real_data_list.append(data)
                real_labels_list.append(label)

            if len(real_data_list) > 0:
                real_data = torch.stack(real_data_list)
                real_labels = torch.tensor(real_labels_list)

                # 合并真实和生成数据
                all_data = torch.cat([real_data, self.generated_data], dim=0)
                all_labels = torch.cat([real_labels, self.generated_labels], dim=0)

                # 创建数据加载器
                combined_dataset = TensorDataset(all_data, all_labels)
                train_loader = DataLoader(
                    combined_dataset,
                    batch_size=self.dataloader.batch_size,
                    shuffle=True,
                    num_workers=0
                )
            else:
                train_loader = self.dataloader
        else:
            train_loader = self.dataloader

        total_loss = 0.0
        correct = 0
        total = 0

        for epoch in range(epochs):
            for data, target in train_loader:
                data = data.to(self.device)
                target = target.to(self.device)

                self.optimizer.zero_grad()
                output = self.local_model(data)
                loss = self.criterion(output, target)
                loss.backward()
                self.optimizer.step()

                total_loss += loss.item()
                pred = output.argmax(dim=1)
                correct += pred.eq(target).sum().item()
                total += target.size(0)

        accuracy = 100.0 * correct / total if total > 0 else 0.0
        avg_loss = total_loss / max(len(train_loader) * epochs, 1)

        return {'loss': avg_loss, 'accuracy': accuracy}


# ==================== 5. 协作学习系统 ====================
class CollaborativeLearningSystem:
    """协作学习系统"""

    def __init__(self, participants, parameter_server,
                 upload_fraction=1.0, download_fraction=1.0):
        self.participants = participants
        self.parameter_server = parameter_server
        self.upload_fraction = upload_fraction
        self.download_fraction = download_fraction

    def train_round(self, local_epochs=1, adversary_gan_iterations=100):
        """执行一轮训练"""
        round_stats = []

        for participant in self.participants:
            # 下载全局参数
            global_params = self.parameter_server.send_update(self.download_fraction)
            participant.update_parameters(global_params)

            stats = {'participant': participant.name}

            # 如果是攻击者，先训练GAN
            if isinstance(participant, AdversarialParticipant):
                # 训练GAN
                gan_loss = participant.train_gan(num_iterations=adversary_gan_iterations)

                # 生成假样本
                fake_samples, fake_labels = participant.generate_fake_samples(num_samples=1000)

                num_fake = len(fake_samples) if fake_samples is not None else 0
                stats.update({
                    'gan_loss': gan_loss,
                    'num_fake_samples': num_fake
                })

            # 本地训练
            train_stats = participant.local_train(epochs=local_epochs)
            stats.update(train_stats)

            # 上传参数
            local_params = participant.get_parameters()
            self.parameter_server.receive_update(
                participant.name,
                local_params,
                self.upload_fraction
            )

            round_stats.append(stats)

        return round_stats

    def train(self, num_rounds=20, local_epochs=1, adversary_gan_iterations=100):
        """执行完整训练"""
        print("=" * 70)
        print("协作深度学习 - GAN攻击实验")
        print("=" * 70)

        history = []

        for round_idx in range(num_rounds):
            print(f"\n{'=' * 70}")
            print(f"Round {round_idx + 1}/{num_rounds}")
            print(f"{'=' * 70}")

            round_stats = self.train_round(local_epochs, adversary_gan_iterations)

            for stats in round_stats:
                stats['round'] = round_idx + 1
                history.append(stats)

                print(f"\n[{stats['participant']}]")
                print(f"  Loss: {stats['loss']:.4f}")
                print(f"  Accuracy: {stats['accuracy']:.2f}%")

                if 'gan_loss' in stats:
                    print(f"  GAN Loss: {stats['gan_loss']:.4f}")
                    print(f"  Generated Samples: {stats['num_fake_samples']}")

        return history


# ==================== 6. 评估和可视化 ====================
def evaluate_model(model, test_loader, device='cpu'):
    """评估模型"""
    model.eval()
    correct = 0
    total = 0

    with torch.no_grad():
        for data, target in test_loader:
            data = data.to(device)
            target = target.to(device)
            output = model(data)
            pred = output.argmax(dim=1)
            correct += pred.eq(target).sum().item()
            total += target.size(0)

    return 100.0 * correct / total if total > 0 else 0.0


def visualize_attack_results(test_dataset, generator, target_class=0,
                             nz=100, device='cpu', num_samples=20):
    """可视化攻击结果"""
    # 获取真实样本
    real_samples = []
    for data, label in test_dataset:
        if label == target_class and len(real_samples) < num_samples:
            real_samples.append(data)

    # 生成假样本
    generator.eval()
    with torch.no_grad():
        noise = torch.randn(num_samples, nz, 1, 1, device=device)
        fake_samples = generator(noise).cpu()

    # 创建对比图
    fig = plt.figure(figsize=(16, 6))

    # 真实样本
    for i in range(min(10, len(real_samples))):
        ax = plt.subplot(2, 10, i + 1)
        img = real_samples[i].squeeze().numpy()
        img = (img + 1) / 2
        img = np.clip(img, 0, 1)
        ax.imshow(img, cmap='gray')
        ax.axis('off')
        if i == 0:
            ax.set_title('Real Samples', fontsize=12, fontweight='bold', pad=10)

    # 生成样本
    for i in range(min(10, len(fake_samples))):
        ax = plt.subplot(2, 10, i + 11)
        img = fake_samples[i].squeeze().numpy()
        img = (img + 1) / 2
        img = np.clip(img, 0, 1)
        ax.imshow(img, cmap='gray')
        ax.axis('off')
        if i == 0:
            ax.set_title('Generated Samples', fontsize=12, fontweight='bold', pad=10)

    plt.tight_layout()
    plt.savefig('gan_attack_comparison.png', dpi=200, bbox_inches='tight')
    print("\n✓ 对比图已保存: gan_attack_comparison.png")
    plt.close()


def plot_comprehensive_results(history, adversary, save_prefix='results'):
    """绘制综合结果"""
    fig = plt.figure(figsize=(18, 5))

    # 提取数据
    victim_data = [h for h in history if h['participant'] == 'Victim']
    adversary_data = [h for h in history if h['participant'] == 'Adversary']

    # 1. 训练损失
    ax1 = plt.subplot(1, 3, 1)
    if victim_data:
        ax1.plot([h['round'] for h in victim_data],
                 [h['loss'] for h in victim_data],
                 'b-o', label='Victim', linewidth=2, markersize=6)
    if adversary_data:
        ax1.plot([h['round'] for h in adversary_data],
                 [h['loss'] for h in adversary_data],
                 'r-s', label='Adversary', linewidth=2, markersize=6)
    ax1.set_xlabel('Round', fontsize=12, fontweight='bold')
    ax1.set_ylabel('Loss', fontsize=12, fontweight='bold')
    ax1.set_title('Training Loss', fontsize=14, fontweight='bold')
    ax1.legend(fontsize=10)
    ax1.grid(True, alpha=0.3)

    # 2. 准确率
    ax2 = plt.subplot(1, 3, 2)
    if victim_data:
        ax2.plot([h['round'] for h in victim_data],
                 [h['accuracy'] for h in victim_data],
                 'b-o', label='Victim', linewidth=2, markersize=6)
    if adversary_data:
        ax2.plot([h['round'] for h in adversary_data],
                 [h['accuracy'] for h in adversary_data],
                 'r-s', label='Adversary', linewidth=2, markersize=6)
    ax2.set_xlabel('Round', fontsize=12, fontweight='bold')
    ax2.set_ylabel('Accuracy (%)', fontsize=12, fontweight='bold')
    ax2.set_title('Training Accuracy', fontsize=14, fontweight='bold')
    ax2.legend(fontsize=10)
    ax2.grid(True, alpha=0.3)

    # 3. GAN损失
    ax3 = plt.subplot(1, 3, 3)
    if adversary_data and 'gan_loss' in adversary_data[0]:
        ax3.plot([h['round'] for h in adversary_data],
                 [h['gan_loss'] for h in adversary_data],
                 'g-^', linewidth=2, markersize=6)
        ax3.set_xlabel('Round', fontsize=12, fontweight='bold')
        ax3.set_ylabel('GAN Loss', fontsize=12, fontweight='bold')
        ax3.set_title('GAN Training Loss', fontsize=14, fontweight='bold')
        ax3.grid(True, alpha=0.3)

    plt.tight_layout()
    plt.savefig(f'{save_prefix}_training_history.png', dpi=200, bbox_inches='tight')
    print(f"✓ 训练历史已保存: {save_prefix}_training_history.png")
    plt.close()


def save_generated_samples_grid(generator, nz=100, device='cpu',
                                save_path='generated_grid.png'):
    """保存生成样本网格"""
    generator.eval()

    with torch.no_grad():
        noise = torch.randn(64, nz, 1, 1, device=device)
        fake_samples = generator(noise).cpu()

    fig, axes = plt.subplots(8, 8, figsize=(12, 12))
    axes = axes.flatten()

    for idx in range(64):
        img = fake_samples[idx].squeeze().numpy()
        img = (img + 1) / 2
        img = np.clip(img, 0, 1)
        axes[idx].imshow(img, cmap='gray')
        axes[idx].axis('off')

    plt.suptitle('Generated Samples (64 images)', fontsize=16, fontweight='bold')
    plt.tight_layout()
    plt.savefig(save_path, dpi=200, bbox_inches='tight')
    print(f"✓ 生成样本网格已保存: {save_path}")
    plt.close()


# ==================== 7. 主程序 ====================
def main():
    """主实验函数"""
    # 设置随机种子
    torch.manual_seed(42)
    np.random.seed(42)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(42)

    # 设备配置
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}\n")

    # 实验配置
    config = {
        'num_rounds': 160,
        'local_epochs': 1,
        'learning_rate': 0.01,
        'batch_size': 64,
        'upload_fraction': 1.0,
        'download_fraction': 1.0,
        'adversary_gan_iterations': 500,
        'nz': 100,
        'target_class': 4,  # 攻击目标：数字3
        'fake_class': 10,
    }

    print("=" * 70)
    print("实验配置")
    print("=" * 70)
    for key, value in config.items():
        print(f"  {key:30s}: {value}")
    print()

    # 1. 准备数据
    print("=" * 70)
    print("1. 数据准备")
    print("=" * 70)
    collab_data = CollaborativeDataset('mnist')

    labels_dict = {
        'victim': [0, 1, 2, 3, 4],
        'adversary': [5, 6, 7, 8, 9]
    }
    datasets = collab_data.split_dataset_by_labels(labels_dict)

    print(f"  Victim数据集大小: {len(datasets['victim'])}")
    print(f"  Adversary数据集大小: {len(datasets['adversary'])}")
    print(f"  目标类别: {config['target_class']} (数字{config['target_class']})")
    print()

    # 2. 创建模型
    print("=" * 70)
    print("2. 模型初始化")
    print("=" * 70)
    global_model = CNNDiscriminator(num_classes=11, input_channels=1)
    parameter_server = ParameterServer(global_model)
    print(" 全局模型已创建")
    print(" 参数服务器已初始化")
    print()

    # 3. 创建参与者
    print("=" * 70)
    print("3. 参与者创建")
    print("=" * 70)

    victim = HonestParticipant(
        name='Victim',
        model=copy.deepcopy(global_model),
        dataset=datasets['victim'],
        device=device,
        learning_rate=config['learning_rate'],
        batch_size=config['batch_size']
    )
    print(" Victim参与者已创建")

    adversary = AdversarialParticipant(
        name='Adversary',
        model=copy.deepcopy(global_model),
        dataset=datasets['adversary'],
        target_class=config['target_class'],
        fake_class=config['fake_class'],
        device=device,
        learning_rate=config['learning_rate'],
        batch_size=config['batch_size'],
        nz=config['nz']
    )
    print(" Adversary参与者已创建")
    print()

    participants = [victim, adversary]

    # 4. 创建协作学习系统
    print("=" * 70)
    print("4. 协作学习系统初始化")
    print("=" * 70)

    system = CollaborativeLearningSystem(
        participants=participants,
        parameter_server=parameter_server,
        upload_fraction=config['upload_fraction'],
        download_fraction=config['download_fraction']
    )
    print(" 协作学习系统已就绪")
    print()

    # 5. 训练
    print("=" * 70)
    print("5. 开始训练")
    print("=" * 70)
    print()

    history = system.train(
        num_rounds=config['num_rounds'],
        local_epochs=config['local_epochs'],
        adversary_gan_iterations=config['adversary_gan_iterations']
    )

    # 6. 评估
    print("\n" + "=" * 70)
    print("6. 模型评估")
    print("=" * 70)

    test_loader = DataLoader(
        collab_data.test_dataset,
        batch_size=1000,
        shuffle=False,
        num_workers=0
    )

    victim_acc = evaluate_model(victim.local_model, test_loader, device)
    adversary_acc = evaluate_model(adversary.local_model, test_loader, device)

    print(f"\n最终结果:")
    print(f"  Victim准确率:    {victim_acc:.2f}%")
    print(f"  Adversary准确率: {adversary_acc:.2f}%")
    print()

    # 7. 评估生成样本质量
    print("=" * 70)
    print("7. 生成样本质量评估")
    print("=" * 70)

    adversary.generator.eval()
    with torch.no_grad():
        test_noise = torch.randn(1000, config['nz'], 1, 1, device=device)
        test_samples = adversary.generator(test_noise)

        # 使用victim的模型评估
        victim.local_model.eval()
        outputs = victim.local_model(test_samples)
        probs = torch.exp(outputs)
        predictions = outputs.argmax(dim=1)

        # 统计被分类为目标类别的比例
        target_ratio = (predictions == config['target_class']).float().mean().item()
        avg_confidence = probs[:, config['target_class']].mean().item()

    print(f"\n生成样本分析 (共1000个样本):")
    print(f"  被分类为目标类别的比例: {target_ratio * 100:.2f}%")
    print(f"  平均置信度: {avg_confidence:.4f}")
    print()

    # 8. 可视化
    print("=" * 70)
    print("8. 结果可视化")
    print("=" * 70)
    print()

    # 8.1 训练历史
    plot_comprehensive_results(history, adversary, save_prefix='gan_attack')

    # 8.2 对比真实样本和生成样本
    visualize_attack_results(
        collab_data.test_dataset,
        adversary.generator,
        target_class=config['target_class'],
        nz=config['nz'],
        device=device,
        num_samples=20
    )

    # 8.3 生成样本网格
    save_generated_samples_grid(
        adversary.generator,
        nz=config['nz'],
        device=device,
        save_path='generated_samples_grid.png'
    )

    # 8.4 展示不同轮次的生成样本演化
    print("生成样本演化可视化...")
    plot_generation_evolution(adversary, config['nz'], device)
    return history, victim, adversary


def plot_generation_evolution(adversary, nz, device, num_stages=5):
    """展示生成样本的演化过程"""
    fig, axes = plt.subplots(num_stages, 10, figsize=(15, 8))

    adversary.generator.eval()
    fixed_noise = torch.randn(10, nz, 1, 1, device=device)

    # 模拟不同训练阶段
    with torch.no_grad():
        for stage in range(num_stages):
            samples = adversary.generator(fixed_noise).cpu()

            for idx in range(10):
                img = samples[idx].squeeze().numpy()
                img = (img + 1) / 2
                img = np.clip(img, 0, 1)
                axes[stage, idx].imshow(img, cmap='gray')
                axes[stage, idx].axis('off')

                if idx == 0:
                    if stage < len(adversary.gan_losses):
                        loss_val = adversary.gan_losses[stage * len(adversary.gan_losses) // num_stages]
                        axes[stage, idx].set_ylabel(f'Loss: {loss_val:.3f}',
                                                    fontsize=9, rotation=0, ha='right', va='center')

    plt.suptitle('Generation Evolution Over Training', fontsize=14, fontweight='bold')
    plt.tight_layout()
    plt.savefig('generation_evolution.png', dpi=200, bbox_inches='tight')
    print("✓ 生成演化图已保存: generation_evolution.png")
    plt.close()


def analyze_attack_effectiveness(adversary, victim, test_dataset,
                                 target_class, nz, device):
    """分析攻击效果的详细指标"""
    print("\n" + "=" * 70)
    print("攻击效果详细分析")
    print("=" * 70)

    # 生成样本
    adversary.generator.eval()
    num_samples = 1000

    with torch.no_grad():
        noise = torch.randn(num_samples, nz, 1, 1, device=device)
        fake_samples = adversary.generator(noise)

        # Victim模型评估
        victim.local_model.eval()
        fake_outputs = victim.local_model(fake_samples)
        fake_probs = torch.exp(fake_outputs)
        fake_preds = fake_outputs.argmax(dim=1)

        # 获取真实样本
        real_samples = []
        real_labels = []
        for data, label in test_dataset:
            if label == target_class:
                real_samples.append(data)
                real_labels.append(label)
                if len(real_samples) >= num_samples:
                    break

        if len(real_samples) > 0:
            real_samples = torch.stack(real_samples).to(device)
            real_outputs = victim.local_model(real_samples)
            real_probs = torch.exp(real_outputs)
            real_preds = real_outputs.argmax(dim=1)

    # 计算指标
    fake_target_acc = (fake_preds == target_class).float().mean().item() * 100
    fake_avg_conf = fake_probs[:, target_class].mean().item()

    if len(real_samples) > 0:
        real_target_acc = (real_preds == target_class).float().mean().item() * 100
        real_avg_conf = real_probs[:, target_class].mean().item()
    else:
        real_target_acc = 0
        real_avg_conf = 0



# 运行实验
if __name__ == "__main__":
    try:
        history, victim, adversary = main()

        # 额外的详细分析
        from torch.utils.data import DataLoader

        collab_data = CollaborativeDataset('mnist')
        analyze_attack_effectiveness(
            adversary,
            victim,
            collab_data.test_dataset,
            target_class=3,  # 应该与config中保持一致
            nz=100,
            device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        )

        print("\n" + "=" * 70)
        print("所有结果文件:")
        print("  - gan_attack_training_history.png  (训练曲线)")
        print("  - gan_attack_comparison.png        (真实vs生成对比)")
        print("  - generated_samples_grid.png       (64个生成样本)")
        print("  - generation_evolution.png         (生成演化过程)")
        print("=" * 70)

    except KeyboardInterrupt:
        print("\n\n实验被用户中断")
    except Exception as e:
        print(f"\n\n实验出错: {str(e)}")
        import traceback

        traceback.print_exc()