import torch  # PyTorch 主模块
import torch.nn as nn  # 包含神经网络层和功能，如全连接层、激活函数等
import torch.optim as optim  # 包含优化器，如 SGD、Adam 等
import numpy as np  # 用于数值计算和矩阵操作
import matplotlib.pyplot as plt  # 用于绘图和可视化
from collections import deque  # 双端队列，用于经验回放缓存（DRL）
import random  # 用于生成随机数、随机抽样等操作
import math  # 提供数学函数，如 sqrt、log、exp 等
from torch.utils.data import DataLoader, Subset, Dataset  # 数据加载工具，Subset 用于提取子集，Dataset 是自定义数据集的基类
from torchvision import datasets, transforms   # datasets 提供常用图像数据集，transforms 用于数据增强和预处理
from sklearn.model_selection import train_test_split  # 用于将数据集划分为训练集和测试集
import copy  # 提供对象的深拷贝和浅拷贝功能
import os  # 用于操作系统交互，例如文件路径管理、创建文件夹等
from collections import Counter

class ActorNetwork(nn.Module):
    def __init__(self, state_dim=58, continuous_action_dim=6, discrete_action_dim=12):
        """
        初始化 Actor 网络，根据论文公式(23)的状态表示和动作空间定义：
        - M=10子区域，N=3无人机时：
        - 状态维度：3(位置) + 10(AoI) + 30(连接状态) + 3(电量) + 3(能量) + 9(数据量) = 58
        - 连续动作：θ和d对每架无人机 → 3×2=6
        - 离散动作：每架无人机的b,x,y,z决策 → 3×4=12
        """
        super(ActorNetwork, self).__init__()

        # 共享特征提取层，将状态输入经过全连接层提取特征
        self.fc_shared = nn.Sequential(
            nn.Linear(state_dim, 256),  # 输入状态维度，输出256维
            nn.ReLU(),  # 激活函数ReLU
            nn.Linear(256, 128),  # 256维到128维
            nn.ReLU()  # 激活函数ReLU
        )

        # 连续动作头 (θ和d)，用于输出连续动作的参数
        self.continuous_head = nn.Sequential(
            nn.Linear(128, 64),  # 128维到64维
            nn.ReLU(),  # 激活函数ReLU
            nn.Linear(64, continuous_action_dim),  # 64维到连续动作维度（θ和d的维度）
            nn.Sigmoid()  # θ∈[0,2π), d∈[0,l_max]，Sigmoid用于限制输出范围
        )

        # 离散动作头 (b,x,y,z)，用于输出离散动作的概率
        self.discrete_head = nn.Sequential(
            nn.Linear(128, 64),  # 128维到64维
            nn.ReLU(),  # 激活函数ReLU
            nn.Linear(64, discrete_action_dim),  # 64维到离散动作维度（b, x, y, z的决策项）
            nn.Sigmoid()  # 各决策项∈[0,1]，Sigmoid用于限制输出范围
        )

    def forward(self, state):
        # 共享特征提取
        shared_features = self.fc_shared(state)

        # 获取连续动作 (θ和d)
        continuous_actions = self.continuous_head(shared_features)

        # 获取离散动作的输出
        discrete_logits = self.discrete_head(shared_features)

        return continuous_actions, discrete_logits


class CriticNetwork(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(CriticNetwork, self).__init__()

        # Q值网络，用于评估动作的价值
        self.q_network = nn.Sequential(
            nn.Linear(state_dim + action_dim, 256),  # 状态和动作拼接后的输入维度，输出256维
            nn.ReLU(),  # 激活函数ReLU
            nn.Linear(256, 128),  # 256维到128维
            nn.ReLU(),  # 激活函数ReLU
            nn.Linear(128, 1)  # 输出1维，即Q值
        )

    def forward(self, state, action):
        # 拼接状态和动作，并输入Q网络
        x = torch.cat([state, action], dim=1)

        # 计算Q值
        return self.q_network(x)


class MDC2_DRL_Agent:
    def __init__(self, M=10, N=3):
        # 系统参数初始化
        self.M = M  # 子区域数
        self.N = N  # 无人机数
        self.l_max = 5.0  # 最大飞行距离(km)
        self.H = 0.1  # 飞行高度(km)
        self.T = 100  # 总时隙数

        # 计算状态和动作维度
        self.state_dim = 2 * N + M + M * N + N + N + 3 * N  # 位置 + AoI + 连接 + 电量 + 能量 + 数据量
        self.continuous_action_dim = 2 * N  # θ和d
        self.discrete_action_dim = 4 * N  # b,x,y,z

        # 网络初始化
        self.p_actor = ActorNetwork(self.state_dim, self.continuous_action_dim, self.discrete_action_dim)
        self.p_critic = CriticNetwork(self.state_dim, self.continuous_action_dim + self.discrete_action_dim)
        self.t_actor = ActorNetwork(self.state_dim, self.continuous_action_dim, self.discrete_action_dim)
        self.t_critic = CriticNetwork(self.state_dim, self.continuous_action_dim + self.discrete_action_dim)

        # 硬更新目标网络
        self.hard_update(self.t_actor, self.p_actor)
        self.hard_update(self.t_critic, self.p_critic)

        # 优化器
        self.actor_optimizer = optim.Adam(self.p_actor.parameters(), lr=0.0001)
        self.critic_optimizer = optim.Adam(self.p_critic.parameters(), lr=0.0002)

        # 训练参数
        self.gamma = 0.95  # 折扣因子
        self.tau = 0.01  # 软更新系数
        self.memory = deque(maxlen=100000)
        self.batch_size = 64

    def hard_update(self, target, source):
        for target_param, param in zip(target.parameters(), source.parameters()):
            target_param.data.copy_(param.data) #将源网络的参数直接拷贝到目标网络，替代原来的参数

    # 一个智能体策略网络的动作采样函数，结合了连续动作与离散动作，并采用了ε - greedy策略控制探索与利用的权衡。
    def get_action(self, state, epsilon):
        state_tensor = torch.FloatTensor(state).unsqueeze(0)

        with torch.no_grad():
            continuous_actions, discrete_logits = self.p_actor(state_tensor)

        # 连续动作处理
        continuous_actions = continuous_actions.numpy()[0]
        continuous_actions[::2] *= 2 * math.pi  # θ∈[0,2π]
        continuous_actions[1::2] *= self.l_max  # d∈[0,l_max]

        # 离散动作处理 (ε-greedy)
        if random.random() > epsilon:
            # 确定性策略
            discrete_actions = (discrete_logits.numpy()[0] > 0.5).astype(np.float32)
        else:
            # 随机探索
            discrete_actions = np.random.randint(0, 2, size=self.discrete_action_dim).astype(np.float32)

        return continuous_actions, discrete_actions

    def store_transition(self, state, action, reward, next_state, done):
        self.memory.append((state, action, reward, next_state, done))

    def update_networks(self):
        if len(self.memory) < self.batch_size:
            return

        # 从记忆库中采样
        batch = random.sample(self.memory, self.batch_size)
        states, actions, rewards, next_states, dones = zip(*batch)

        states = torch.FloatTensor(np.array(states))
        actions = torch.FloatTensor(np.array(actions))
        rewards = torch.FloatTensor(np.array(rewards)).unsqueeze(1)
        next_states = torch.FloatTensor(np.array(next_states))
        dones = torch.FloatTensor(np.array(dones)).unsqueeze(1)

        # Critic更新 (论文公式29)
        with torch.no_grad():
            next_continuous, next_discrete = self.t_actor(next_states)
            next_actions = torch.cat([next_continuous, next_discrete], dim=1)
            next_q = self.t_critic(next_states, next_actions)
            target_q = rewards + (1 - dones) * self.gamma * next_q

        current_q = self.p_critic(states, actions)
        critic_loss = nn.MSELoss()(current_q, target_q)

        self.critic_optimizer.zero_grad()
        critic_loss.backward()
        torch.nn.utils.clip_grad_norm_(self.p_critic.parameters(), 1.0)
        self.critic_optimizer.step()

        # Actor更新 (论文公式31)
        continuous_actions, discrete_actions = self.p_actor(states)
        policy_actions = torch.cat([continuous_actions, discrete_actions], dim=1)
        actor_loss = -self.p_critic(states, policy_actions).mean()

        self.actor_optimizer.zero_grad()
        actor_loss.backward()
        torch.nn.utils.clip_grad_norm_(self.p_actor.parameters(), 1.0)
        self.actor_optimizer.step()

        # 目标网络软更新 (论文公式32)
        self.soft_update(self.p_actor, self.t_actor, self.tau)
        self.soft_update(self.p_critic, self.t_critic, self.tau)

    def soft_update(self, local_model, target_model, tau):
        for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
            target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data)


class UAVEnvironment:
    def __init__(self, M=10, N=3, T=100):
        """
        严格按论文4.1节参数设置
        M: 子区域数 (10m×10m网格)
        N: 无人机数量 (3架)
        T: 最大全局迭代数 (100)
        """
        # 按论文4.1节精确参数设置
        self.M = M
        self.N = N
        self.T = T
        self.R_max = 1.0  # 覆盖半径1m
        self.H = 0.1  # 飞行高度0.1m
        self.l_max = 10.0  # 最大飞行距离10m
        self.W = 100  # 带宽100MHz
        self.bs_position = np.array([-1, -1])  # 基站位置[-1,-1]

        # 联邦学习参数 (论文4.1节)
        self.N_max = 500  # 最大本地训练轮数
        self.eta = 0.01  # 学习率
        self.epsilon = 1.0  # 目标精度

        # 能耗参数 (论文2.5节)
        self.p_mov = 10.0  # 移动功率(W)
        self.p_rev = 5.0  # 接收功率
        self.p_cmp = 15.0  # 计算功率
        self.p_U_tra = 8.0  # U2U传输功率
        self.p_B_tra = 12.0  # U2B传输功率

        # 奖励权重 (论文4.1节)
        self.mu1 = 1000  # 全局奖励权重
        self.mu2 = 0.1  # 局部奖励权重

        self.reset()

    def reset(self):
        """按论文要求初始化环境状态"""
        self.current_step = 0
        # 无人机初始位置 (10m×10m网格坐标)
        self.drone_positions = np.random.uniform(0, 10, size=(self.N, 2))

        # 初始化AoI (论文公式12)
        self.aoi = np.zeros(self.M)

        # 能量相关 (论文2.6节)
        self.battery = np.full(self.N, 100.0)  # 初始电量100%
        self.energy_harvest = np.random.uniform(0, 10, size=self.N)
        # self.energy_harvest 是一个大小为 self.N 的数组，里面存储的是每个节点的能量收集量，
        # 这个量是通过 np.random.uniform(0, 10, size=self.N) 从均匀分布 [0, 10) 中随机生成的

        # 数据量 (论文公式4)
        self.data_volume = np.zeros((self.N, 3))  # 本地/U2U/U2B数据传输初始化为0

        # 用户设备分布 (每个子区域随机1-3个用户)
        self.users = [np.random.randint(1, 4) for _ in range(self.M)]
        # np.random.randint(1, 4)：这个值代表每个子区域内的随机分配的用户数量。self.M 个子区域
        return self._get_state()

    def _get_state(self):
        """严格按论文公式23构建状态向量"""
        state = []
        # 1. 无人机位置 (2*N维坐标)
        # self.drone_positions 包含了 N 架无人机的位置，每个位置是一个 2D 坐标，因此是一个 (N, 2) 的矩阵。
        # 通过 flatten() 方法将其展平成一维数组。为了标准化，除以 10.0 使得位置值的范围变为较小的数值，便于模型处理。
        state.extend(self.drone_positions.flatten() / 10.0)  # 归一化

        # 2. 各子区域AoI (M维)
        # self.aoi 是一个长度为 M 的数组，表示每个子区域的 AoI
        # 状态向量中的每个元素是 aoi_k / 100.0，其中 aoi_k 是第 k 个子区域的 AoI
        state.extend(self.aoi / 100.0)  # 归一化

        # 3. 连接状态 (M×N维)
        connection = np.zeros((self.M, self.N))
        grid_size = int(np.sqrt(self.M))
        for i in range(self.N):
            for k in range(self.M):
                row, col = k // grid_size, k % grid_size
                grid_center = np.array([row * 10 + 5, col * 10 + 5])
                dist = np.linalg.norm(self.drone_positions[i] - grid_center)
                connection[k, i] = 1 if dist <= self.R_max else 0
        state.extend(connection.flatten())

        # 4-6. 电量、收集能量、数据量 (按论文公式)
        state.extend(self.battery / 100.0)
        state.extend(self.energy_harvest / 10.0)
        state.extend(self.data_volume.flatten() / 50.0)

        return np.array(state, dtype=np.float32)

    def step(self, continuous_actions, discrete_actions):
        """严格按论文2.1-2.6节实现，返回分解的奖励"""
        # 1. 移动无人机 (论文2.1节)
        for i in range(self.N):
            theta = continuous_actions[i * 2]  # θ∈[0,2π]
            d = continuous_actions[i * 2 + 1]  # d∈[0,l_max]
            new_x = self.drone_positions[i, 0] + d * np.cos(theta)
            new_y = self.drone_positions[i, 1] + d * np.sin(theta)
            self.drone_positions[i] = np.clip([new_x, new_y], 0, 10)
        # np.clip([new_x, new_y], 0, 10) 确保无人机的坐标不会超出 0-10 范围
        # 2. 计算能耗 (论文2.5节)
        move_energy = np.zeros(self.N)
        comm_energy = np.zeros(self.N)
        comp_energy = np.zeros(self.N)

        for i in range(self.N):
            # 计算移动能耗
            d = continuous_actions[i * 2 + 1]
            move_energy[i] = self.p_mov * d / 5.0  # 假设速度5m/s

            # 读取离散动作
            b_offset = i * 4
            x_offset = i * 4 + 1
            y_offset = i * 4 + 2
            z_offset = i * 4 + 3

            b = discrete_actions[b_offset]
            x = discrete_actions[x_offset]
            y = discrete_actions[y_offset]
            z = discrete_actions[z_offset]
            #discrete_actions 数组中提取了无人机 i 对应的四个离散动作：

            # 通信能耗计算
            comm_energy[i] = self.p_rev * b * 0.1 + self.p_U_tra * y * 0.2 + self.p_B_tra * z * 0.3
            comp_energy[i] = self.p_cmp * (x * 0.5 + y * 0.3 + z * 0.7)


        # 3. 更新电量 (论文2.6节)
        total_energy = move_energy + comm_energy + comp_energy
        self.battery -= total_energy
        self.battery = np.clip(self.battery + self.energy_harvest, 0, 100)
        # np.clip 保证电池电量不会小于 0 或大于 100%

        # 更新AoI
        self.aoi += 1
        # 每个时间步，所有区域的 AoI（信息陈旧度）+1，表示信息变旧了一步
        grid_size = int(np.sqrt(self.M))
        for i in range(self.N):
            # 确定无人机当前覆盖哪个区域
            x, y = self.drone_positions[i]
            grid_x = int(x // 10)
            grid_y = int(y // 10)
            if 0 <= grid_x < grid_size and 0 <= grid_y < grid_size:
                k = grid_x * grid_size + grid_y
                if k < self.M:
                    self.aoi[k] = 0  # 覆盖区域的AoI重置为0,如果无人机覆盖了这个区域，则将该区域 AoI 归零,这意味着无人机向该区域传输了最新信息，刷新了数据。

        # 4. 计算奖励 (论文公式13)
        #  AoI 惩罚（aoi_penalty）
        aoi_penalty = -np.sum(self.aoi)
        # 能量惩罚是根据无人机的总能耗来计算的，能量消耗越大，惩罚越大。
        energy_penalty = -np.sum(total_energy)

        # 分解奖励
        global_reward = self.mu1 * aoi_penalty
        local_reward = self.mu2 * energy_penalty
        # 奖励=全局奖励+局部奖励
        reward = global_reward + local_reward

        # 5. 更新状态
        self.current_step += 1
        # self.current_step 代表当前的时间步，每执行一步，就加 1
        done = self.current_step >= self.T
        # 如果 current_step 达到 T，则 done = True，表示仿真回合结束。
        next_state = self._get_state()
        # 这里调用 _get_state() 方法，用于获取下一个状态 next_state。
        return next_state, reward, done, {
            'aoi': np.mean(self.aoi),
            'energy': np.mean(total_energy),
            'battery': np.mean(self.battery),
            'global_reward': global_reward,
            'local_reward': local_reward
        }

class CNNModel(nn.Module):
    def __init__(self, input_channels=1):
        super(CNNModel, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(input_channels, 32, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2),
            nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2)
        )
        # 卷积层 提取图像特征，池化层 降低计算复杂度，全连接层 进行分类
        # 计算特征尺寸 (MNIST和FashionMNIST是28x28，CIFAR10是32x32)
        if input_channels == 3:  # CIFAR10
            self.classifier = nn.Sequential(
                nn.Linear(64 * 8 * 8, 128),
                nn.ReLU(),
                nn.Linear(128, 10)
            )
        else:  # MNIST/FashionMNIST
            self.classifier = nn.Sequential(
                nn.Linear(64 * 7 * 7, 128),
                nn.ReLU(),
                nn.Linear(128, 10)
            )

        self.input_channels = input_channels

    # ‌特征提取‌：通过卷积层提取图像特征。
    # ‌维度适配‌：根据输入数据调整展平维度。
    # ‌分类‌：通过全连接层输出预测结果。
    # 适用于多通道（CIFAR10）和单通道（MNIST）数据集的分类任务。
    def forward(self, x):
        x = self.features(x)
        if self.input_channels == 3:  # CIFAR10
            x = x.view(-1, 64 * 8 * 8)
        else:  # MNIST/FashionMNIST
            x = x.view(-1, 64 * 7 * 7)
        x = self.classifier(x)
        return x


class DatasetComparator:
    def __init__(self):
        # 按论文4.2节设置三个数据集
        self.datasets = {
            'MNIST': self._load_mnist(),
            'FashionMNIST': self._load_fashion(),
            'CIFAR10': self._load_cifar10()
        }

        # 对比算法模型
        self.models = {
            'Random': self._init_random_model(),
            'Centralized': self._init_centralized_model(),
            'FedAvg': self._init_fedavg_model(),
            'MDC2_DRL': self._init_mdc2_model()
        }

    def _load_mnist(self):
        """加载MNIST数据集 (70%训练, 30%测试)"""
        transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307,), (0.3081,))
        ])
        train = datasets.MNIST('./data', train=True, download=True, transform=transform)
        test = datasets.MNIST('./data', train=False, transform=transform)

        # 为训练集和测试集生成单独的随机索引
        train_size = int(0.7 * len(train))
        train_indices = torch.randperm(len(train)).tolist()

        test_size = int(0.3 * len(test))
        test_indices = torch.randperm(len(test)).tolist()

        return {
            'train': Subset(train, train_indices[:train_size]),
            'test': Subset(test, test_indices[:test_size])
        }

    def _load_fashion(self):
        """加载FashionMNIST数据集"""
        transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.2860,), (0.3530,))
        ])
        train = datasets.FashionMNIST('./data', train=True, download=True, transform=transform)
        test = datasets.FashionMNIST('./data', train=False, transform=transform)

        # 为训练集和测试集生成单独的随机索引
        train_size = int(0.7 * len(train))
        train_indices = torch.randperm(len(train)).tolist()

        test_size = int(0.3 * len(test))
        test_indices = torch.randperm(len(test)).tolist()

        return {
            'train': Subset(train, train_indices[:train_size]),
            'test': Subset(test, test_indices[:test_size])
        }

    def _load_cifar10(self):
        """加载CIFAR-10数据集"""
        transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2470, 0.2435, 0.2616))
        ])
        train = datasets.CIFAR10('./data', train=True, download=True, transform=transform)
        test = datasets.CIFAR10('./data', train=False, transform=transform)

        # 为训练集和测试集生成单独的随机索引
        train_size = int(0.7 * len(train))
        train_indices = torch.randperm(len(train)).tolist()

        test_size = int(0.3 * len(test))
        test_indices = torch.randperm(len(test)).tolist()

        return {
            'train': Subset(train, train_indices[:train_size]),
            'test': Subset(test, test_indices[:test_size])
        }

    def _init_random_model(self):
        """随机基线模型"""

        class RandomModel(nn.Module):
            def __init__(self):
                super(RandomModel, self).__init__()

            def forward(self, x):
                return torch.randn(x.size(0), 10)

        return RandomModel()

    def _init_centralized_model(self):
        """集中式学习模型"""
        return CNNModel(input_channels=1)  # 默认为MNIST

    def _init_fedavg_model(self):
        """FedAvg模型"""
        return CNNModel(input_channels=1)

    def _init_mdc2_model(self):
        """MDC2-DRL模型"""
        # 定义了模型的结构。它包括一个卷积神经网络（CNNModel）和一个强化学习智能体（MDC2_DRL_Agent）。
        class MDC2DRLModel(nn.Module):
            def __init__(self):
                super(MDC2DRLModel, self).__init__()
                self.cnn = CNNModel(input_channels=1)
                self.agent = MDC2_DRL_Agent(M=10, N=3)
            # self.agent：强化学习智能体（MDC2_DRL_Agent）的实例。它包含了智能体与环境的交互策略，M=10 和 N=3 是与智能体状态空间、动作空间等相关的参数。
            def forward(self, x):
                return self.cnn(x)

            def get_action(self, state, epsilon):
                return self.agent.get_action(state, epsilon)

        return MDC2DRLModel()

    def _distribute_data(self, train_data, num_regions):
        """将数据分配给各子区域"""
        data_per_region = len(train_data) // num_regions
        distributed_data = []

        for i in range(num_regions):
            start_idx = i * data_per_region
            end_idx = (i + 1) * data_per_region if i < num_regions - 1 else len(train_data)
            distributed_data.append(Subset(train_data, range(start_idx, end_idx)))

        return distributed_data

    def _local_train(self, global_model, data, N_max, eta):
        """本地训练过程"""
        # 创建本地模型副本
        local_model = CNNModel(input_channels=1)
        local_model.load_state_dict(global_model.state_dict())

        # 设置优化器
        optimizer = optim.SGD(local_model.parameters(), lr=eta)
        criterion = nn.CrossEntropyLoss()

        # 本地训练循环
        dataloader = DataLoader(data, batch_size=32, shuffle=True)
        local_model.train()

        for _ in range(min(N_max, len(dataloader))):
            for inputs, targets in dataloader:
                optimizer.zero_grad()
                outputs = local_model(inputs)
                loss = criterion(outputs, targets)
                loss.backward()
                optimizer.step()

        return local_model

    def _aggregate_models(self, local_models):
        """聚合本地模型"""
        global_model = CNNModel(input_channels=1)

        # 平均模型参数 (FedAvg)
        with torch.no_grad():
            # 获取状态字典
            state_dicts = [model.state_dict() for model in local_models]

            # 初始化全局模型参数
            global_dict = global_model.state_dict()

            # 对每个参数取平均
            for key in global_dict.keys():
                # 堆叠所有本地模型的相同参数
                stacked = torch.stack([state_dict[key].float() for state_dict in state_dicts], 0)
                # 计算平均值并赋给全局模型
                global_dict[key] = torch.mean(stacked, 0).clone()

            # 加载平均后的参数
            global_model.load_state_dict(global_dict)

        return global_model

    def _evaluate(self, model, test_data):
        """评估模型准确率"""
        model.eval()
        dataloader = DataLoader(test_data, batch_size=128)

        correct = 0
        total = 0

        with torch.no_grad():
            for inputs, targets in dataloader:
                outputs = model(inputs)
                _, predicted = torch.max(outputs, 1)
                total += targets.size(0)
                correct += (predicted == targets).sum().item()

        return 100.0 * correct / total

    def run_comparison(self, epochs=10, batch_size=64):
        """运行对比实验 (论文4.2节)"""
        results = {}

        for ds_name, ds in self.datasets.items():
            results[ds_name] = {}
            print(f"===== 在 {ds_name} 数据集上进行实验 =====")

            # 调整模型输入通道
            if ds_name == 'CIFAR10':
                input_channels = 3
            else:
                input_channels = 1

            for model_name in self.models:
                if model_name != 'Random':
                    if model_name == 'MDC2_DRL':
                        self.models[model_name].cnn = CNNModel(input_channels=input_channels)
                    else:
                        self.models[model_name] = CNNModel(input_channels=input_channels)

            for model_name, model in self.models.items():
                print(f"训练 {model_name} 模型...")

                # 训练模型
                if model_name == 'MDC2_DRL':
                    # 我们的方法需要特殊训练流程
                    acc = self._train_mdc2(model, ds['train'], ds['test'],
                                           epochs, batch_size)
                else:
                    # 其他标准方法
                    acc = self._train_standard(model, ds['train'], ds['test'],
                                               epochs, batch_size)

                results[ds_name][model_name] = acc
                print(f"{model_name} 在 {ds_name} 上的准确率: {acc:.2f}%")

        return results

    def _train_standard(self, model, train_data, test_data, epochs, batch_size):
        """标准训练流程"""
        train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
        test_loader = DataLoader(test_data, batch_size=batch_size)

        # 检查模型是否有可训练参数
        has_params = len(list(model.parameters())) > 0

        if not has_params:
            # 如果没有参数（比如RandomModel），跳过训练直接评估
            print("模型没有可训练参数，直接进行评估...")
            model.eval()
            correct = 0
            total = 0
            with torch.no_grad():
                for x, y in test_loader:
                    output = model(x)
                    _, predicted = torch.max(output.data, 1)
                    total += y.size(0)
                    correct += (predicted == y).sum().item()
            return 100 * correct / total

        # 有参数的模型正常训练
        optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
        criterion = torch.nn.CrossEntropyLoss()

        for epoch in range(epochs):
            model.train()
            for x, y in train_loader:
                optimizer.zero_grad()
                output = model(x)
                loss = criterion(output, y)
                loss.backward()
                optimizer.step()

            # 每个epoch打印进度
            if (epoch + 1) % 5 == 0:
                # 测试准确率
                model.eval()
                correct = 0
                total = 0
                with torch.no_grad():
                    for x, y in test_loader:
                        output = model(x)
                        _, predicted = torch.max(output.data, 1)
                        total += y.size(0)
                        correct += (predicted == y).sum().item()
                print(f"Epoch {epoch + 1}/{epochs}, 准确率: {100 * correct / total:.2f}%")

        # 最终测试准确率
        model.eval()
        correct = 0
        total = 0
        with torch.no_grad():
            for x, y in test_loader:
                output = model(x)
                _, predicted = torch.max(output.data, 1)
                total += y.size(0)
                correct += (predicted == y).sum().item()

        return 100 * correct / total

    def _train_mdc2(self, model, train_data, test_data, epochs, batch_size):
        """MDC2-DRL特殊训练流程 (算法1)"""
        # 初始化无人机环境
        env = UAVEnvironment(M=10, N=3, T=epochs)

        # 将数据分配给用户设备 (论文4.2节)
        user_data = self._distribute_data(train_data, env.M)

        # 联邦学习参数 (论文4.1节)
        N_max = 5  # 最大本地训练轮数 (简化版，原文为500)
        eta = 0.01  # 学习率

        global_model = model.cnn
        acc_history = []

        for t in range(epochs):
            # 1. 无人机探索决策 (算法2)
            state = env._get_state()
            cont_act, disc_act = model.get_action(state, epsilon=0.1)

            # 2. 执行通信和模型训练
            local_models = []
            for i in range(env.N):
                # 获取分配给当前无人机覆盖区域的数据
                grid_size = int(np.sqrt(env.M))
                x, y = env.drone_positions[i]
                grid_x = int(x // 10)
                grid_y = int(y // 10)
                if 0 <= grid_x < grid_size and 0 <= grid_y < grid_size:
                    k = grid_x * grid_size + grid_y
                    if k < env.M:
                        data = user_data[k]
                        # 本地训练 (论文算法1)
                        local_model = self._local_train(global_model, data, N_max, eta)
                        local_models.append(local_model)

            # 3. 全局聚合 (论文公式8)
            if local_models:  # 确保有本地模型更新
                global_model = self._aggregate_models(local_models)
                model.cnn = global_model  # 更新MDC2模型内的CNN

            # 4. 环境步进
            next_state, reward, done, _ = env.step(cont_act, disc_act)

            # 5. 评估全局模型
            if (t + 1) % 5 == 0:
                acc = self._evaluate(global_model, test_data)
                acc_history.append(acc)
                print(f"Epoch {t + 1}/{epochs}, 准确率: {acc:.2f}%")

        return np.mean(acc_history[-5:]) if acc_history else 0.0  # 返回最后5轮平均准确率


# 训练过程
def train_drl():
    # 初始化时明确指定T参数
    env = UAVEnvironment(M=10, N=3, T=100)  # 与论文2.1节保持一致
    agent = MDC2_DRL_Agent(M=10, N=3)

    episodes = 50  # 简化为1000轮
    epsilon = 1.0
    epsilon_min = 0.01
    epsilon_decay = 0.995

    rewards_history = []  # 记录每轮奖励

    for episode in range(episodes):
        state = env.reset()
        total_reward = 0
        done = False

        while not done:
            # 1. 选择动作
            continuous_actions, discrete_actions = agent.get_action(state, epsilon)

            # 2. 执行动作
            next_state, reward, done, _ = env.step(continuous_actions, discrete_actions)

            # 3. 存储转移
            action = np.concatenate([continuous_actions, discrete_actions])
            agent.store_transition(state, action, reward, next_state, done)

            # 4. 学习
            agent.update_networks()

            # 5. 更新状态
            state = next_state
            total_reward += reward

        # 记录奖励
        rewards_history.append(total_reward)

        # 衰减探索率
        epsilon = max(epsilon_min, epsilon * epsilon_decay)

        # 打印训练信息
        if (episode + 1) % 10 == 0:
            avg_reward = np.mean(rewards_history[-10:])
            print(f"Episode: {episode + 1}, Total Reward: {total_reward:.2f}, "
                  f"Avg Reward (10 eps): {avg_reward:.2f}, "
                  f"Avg AoI: {np.mean(env.aoi):.2f}, "
                  f"Avg Battery: {np.mean(env.battery):.2f}%, "
                  f"Epsilon: {epsilon:.3f}")

    # 保存模型
    torch.save(agent.p_actor.state_dict(), "actor_model.pth")
    torch.save(agent.p_critic.state_dict(), "critic_model.pth")
    print("训练完成，模型已保存")

    return agent


def run_dataset_comparison():
    """运行数据集对比实验"""
    comparator = DatasetComparator()
    results = comparator.run_comparison(epochs=20, batch_size=64)

    # 打印结果表格
    print("\n=========== 对比实验结果 ===========")
    header = "数据集\\方法"
    methods = list(results[list(results.keys())[0]].keys())

    # 构建表头
    for method in methods:
        header += f"\t{method}"
    print(header)

    # 构建表内容
    for dataset, dataset_results in results.items():
        row = f"{dataset}"
        for method in methods:
            row += f"\t{dataset_results[method]:.2f}%"
        print(row)

#
# if __name__ == "__main__":
#     # 1. 训练深度强化学习代理
#     print("开始训练 DRL 代理...")
#     agent = train_drl()
#
#     # 2. 运行数据集对比实验
#     print("\n开始数据集对比实验...")
#     run_dataset_comparison()
# ---------------------复现实验四的代码-----------------#
# ---------------------复现实验四的代码-----------------#
# ---------------------复现实验四的代码，做了对比试验-----------------#

# 创建保存图表的目录
os.makedirs("results", exist_ok=True)
# 确保结果可复现
torch.manual_seed(42)
np.random.seed(42)
random.seed(42)
def plot_figure2_accuracy_curve():
    """
    复现论文图2：不同数据非独立性（D）下的全局模型预测准确率曲线
    图2显示了在不同数据异质性（D = 0, 0.5, 1, 2）下，MDC2-DRL算法在
    MNIST、Fashion-MNIST和CIFAR-10数据集上的准确率变化曲线
    """
    print("开始复现图2：不同数据非独立性（D）下的全局模型预测准确率曲线...")

    # 创建数据集比较器实例
    comparator = HeterogeneousDatasetComparator(M=10, N=3)

    # 数据集列表和D值列表
    datasets = ['MNIST', 'FashionMNIST', 'CIFAR10']
    d_values = [0, 0.5, 1, 2]

    # 记录不同D值下的准确率历史
    accuracy_history = {
        dataset_name: {d: [] for d in d_values}
        for dataset_name in datasets
    }

    # 训练轮数
    epochs = 100  # 论文中使用更多轮次，这里为了演示减少

    # 对每个数据集
    for dataset_name in datasets:
        print(f"\n===== 在 {dataset_name} 数据集上进行实验 =====")

        # 对每个D值
        for d in d_values:
            print(f"数据异质性 D = {d}")

            # 初始化数据集并按D值分布
            comparator.load_dataset(dataset_name)
            comparator.distribute_data_by_heterogeneity(d)

            # 使用MDC2-DRL算法进行训练，记录准确率历史
            acc_history = comparator.train_mdc2(epochs=epochs)

            # 保存准确率历史
            accuracy_history[dataset_name][d] = acc_history

            # 打印最终准确率
            final_acc = acc_history[-1] if acc_history else 0
            print(f"D = {d} 的最终准确率: {final_acc:.2f}%\n")

    # 为每个数据集绘制一个子图
    fig, axes = plt.subplots(1, 3, figsize=(18, 5))

    for i, dataset_name in enumerate(datasets):
        ax = axes[i]

        # 绘制不同D值的曲线
        for d in d_values:
            acc_history = accuracy_history[dataset_name][d]
            epochs_range = list(range(1, len(acc_history) + 1))
            ax.plot(epochs_range, acc_history, label=f'D = {d}')

        ax.set_xlabel('通信轮数')
        ax.set_ylabel('准确率 (%)')
        ax.set_title(f'{dataset_name} 数据集')
        ax.legend()
        ax.grid(True)

    plt.tight_layout()
    plt.savefig("results/figure2_accuracy_curves.png", dpi=300)
    plt.show()

    print("图2复现完成，结果已保存在 results/figure2_accuracy_curves.png")

    return accuracy_history

# 复现论文图3：在MNIST数据集下不同算法的奖励变化对比（D = 2）
def plot_figure3_reward_curve():
    """
    复现论文图3：在MNIST数据集下不同算法的奖励变化对比（D = 2）

    图3显示了在MNIST数据集上，数据异质性D=2的情况下，不同算法的：
    (a) 总奖励变化
    (b) 全局奖励变化
    (c) 局部奖励变化
    """
    print("开始复现图3：不同算法的奖励变化对比...")

    # 算法列表
    algorithms = ['PD-MADDPG', 'P-MADDPG', 'P-DDPG', 'GREEDY', 'RANDOM']

    # 初始化奖励历史记录
    reward_history = {
        alg: {
            'total': [],
            'global': [],
            'local': []
        } for alg in algorithms
    }

    # 在MNIST数据集上，固定D=2
    dataset_name = 'MNIST'
    d_value = 2
    episodes = 50  # 论文中为500，这里简化

    print(f"\n===== 在 {dataset_name} 数据集上, D = {d_value} 时进行对比实验 =====")

    # 对每个算法
    for algorithm in algorithms:
        print(f"训练算法: {algorithm}")

        # 初始化环境和相应算法的代理
        env = UAVEnvironment(M=10, N=3, T=100)
        agent = create_agent_by_name(algorithm, M=10, N=3)

        # 分配异质数据
        comparator = HeterogeneousDatasetComparator(M=10, N=3)
        comparator.load_dataset(dataset_name)
        comparator.distribute_data_by_heterogeneity(d_value)
        env.set_user_data(comparator.user_data)

        # 训练代理并记录奖励
        total_rewards, global_rewards, local_rewards = train_agent_record_rewards(
            env, agent, episodes, algorithm)

        # 保存奖励历史
        reward_history[algorithm]['total'] = total_rewards
        reward_history[algorithm]['global'] = global_rewards
        reward_history[algorithm]['local'] = local_rewards

    # 绘制三个子图
    fig, axes = plt.subplots(1, 3, figsize=(18, 5))
    titles = ['(a) 总奖励', '(b) 全局奖励', '(c) 局部奖励']
    reward_types = ['total', 'global', 'local']

    for i, reward_type in enumerate(reward_types):
        ax = axes[i]

        # 为每个算法绘制曲线
        for algorithm in algorithms:
            rewards = reward_history[algorithm][reward_type]
            episodes_range = list(range(1, len(rewards) + 1))
            ax.plot(episodes_range, rewards, label=algorithm)

        ax.set_xlabel('训练轮数')
        ax.set_ylabel('奖励')
        ax.set_title(titles[i])
        ax.legend()
        ax.grid(True)

    plt.tight_layout()
    plt.savefig("results/figure3_reward_curves.png", dpi=300)
    plt.show()

    print("图3复现完成，结果已保存在 results/figure3_reward_curves.png")

    return reward_history


class HeterogeneousDatasetComparator(DatasetComparator):
    """扩展DatasetComparator类，支持不同异质性D值的数据分配"""

    def __init__(self, M=10, N=3):
        super().__init__()
        self.M = M  # 子区域数
        self.N = N  # 无人机数
        self.current_dataset = None
        self.user_data = None

    def load_dataset(self, dataset_name):
        """加载指定数据集"""
        if dataset_name in self.datasets:
            self.current_dataset = self.datasets[dataset_name]
            print(f"加载 {dataset_name} 数据集成功")
        else:
            raise ValueError(f"不支持的数据集: {dataset_name}")

    def distribute_data_by_heterogeneity(self, d_value):
        """
        根据异质性参数D将数据分配给各子区域

        D=0: 所有区域的标签分布相同（IID）
        D=0.5~2: 每个区域只有D个不同标签的数据
        """
        if self.current_dataset is None:
            raise ValueError("请先使用load_dataset()加载数据集")

        train_data = self.current_dataset['train']

        # 获取所有标签
        all_labels = []
        for _, label in train_data:
            all_labels.append(label)
        all_labels = np.array(all_labels)

        # 所有可能的标签
        unique_labels = np.unique(all_labels)
        num_classes = len(unique_labels)

        # 根据D值确定每个区域的标签数
        if d_value == 0:
            # IID情况：每个区域包含所有标签
            labels_per_region = num_classes
        else:
            # 非IID情况：每个区域只有D个标签
            # D可能是小数，向上取整确保至少有1个标签
            labels_per_region = min(num_classes, max(1, int(np.ceil(d_value))))

        # 为每个区域分配标签
        region_labels = []
        for i in range(self.M):
            # 随机选择labels_per_region个标签
            selected_labels = np.random.choice(
                unique_labels, size=labels_per_region, replace=False)
            region_labels.append(selected_labels)

        # 按标签分配数据
        self.user_data = []
        for i in range(self.M):
            # 获取该区域的标签
            selected_labels = region_labels[i]

            # 找到具有这些标签的样本索引
            indices = []
            for idx, (_, label) in enumerate(train_data):
                if label in selected_labels:
                    indices.append(idx)

            # 如果没有数据，随机选择一些
            if not indices:
                indices = np.random.choice(
                    len(train_data), size=100, replace=False).tolist()

            # 随机取样以平衡各区域数据量
            if len(indices) > 500:  # 限制每个区域最大样本数
                indices = np.random.choice(indices, size=500, replace=False).tolist()

            # 创建该区域的数据子集
            region_data = Subset(train_data, indices)
            self.user_data.append(region_data)

        print(f"已完成数据分配，异质性参数D={d_value}，每个区域有{labels_per_region}个不同标签")

    def train_mdc2(self, epochs=50):
        """
        训练MDC2-DRL模型并返回准确率历史
        修改自原始_train_mdc2方法，专注于记录准确率历史
        """
        if self.current_dataset is None or self.user_data is None:
            raise ValueError("请先加载数据集并分配数据")

        # 初始化无人机环境
        env = UAVEnvironment(M=10, N=3, T=epochs)
        env.set_user_data(self.user_data)

        # 初始化MDC2-DRL模型
        model = self._init_mdc2_model()

        # 获取测试集
        test_data = self.current_dataset['test']

        # 联邦学习参数
        N_max = 5  # 每个区域的本地训练轮数
        eta = 0.01  # 学习率

        global_model = model.cnn
        acc_history = []

        # 训练循环
        for t in range(epochs):
            # 1. 无人机决策
            state = env._get_state()
            cont_act, disc_act = model.get_action(state, epsilon=0.1)

            # 2. 执行通信和模型训练
            local_models = []
            for i in range(env.N):
                # 获取无人机覆盖区域的数据
                covered_regions = env.get_covered_regions(i)
                for k in covered_regions:
                    if k < env.M:
                        data = self.user_data[k]
                        # 本地训练
                        local_model = self._local_train(global_model, data, N_max, eta)
                        local_models.append(local_model)

            # 3. 全局聚合
            if local_models:
                global_model = self._aggregate_models(local_models)
                model.cnn = global_model

            # 4. 环境步进
            next_state, reward, done, _ = env.step(cont_act, disc_act)

            # 5. 评估并记录准确率
            acc = self._evaluate(global_model, test_data)
            acc_history.append(acc)

            # 打印进度
            if (t + 1) % 10 == 0:
                print(f"Epoch {t + 1}/{epochs}, 准确率: {acc:.2f}%")

        return acc_history


def create_agent_by_name(algorithm_name, M=10, N=3):
    """根据算法名称创建相应的代理"""
    if algorithm_name == 'PD-MADDPG':
        # 原始论文中的MDC2-DRL算法
        return MDC2_DRL_Agent(M=M, N=N)

    elif algorithm_name == 'P-MADDPG':
        # 没有分布式训练的MADDPG变体
        agent = MDC2_DRL_Agent(M=M, N=N)
        agent.use_distributed = False
        return agent

    elif algorithm_name == 'P-DDPG':
        # 单代理DDPG变体
        return DDPG_Agent(M=M, N=N)

    elif algorithm_name == 'GREEDY':
        # 贪婪算法代理
        return GreedyAgent(M=M, N=N)

    elif algorithm_name == 'RANDOM':
        # 随机算法代理
        return RandomAgent(M=M, N=N)

    else:
        raise ValueError(f"不支持的算法: {algorithm_name}")


class DDPG_Agent:
    """单代理DDPG变体"""

    def __init__(self, M=10, N=3):
        # 简化版DDPG代理实现
        self.M = M
        self.N = N
        self.state_dim = 2 * N + M + M * N + N + N + 3 * N
        self.continuous_action_dim = 2 * N
        self.discrete_action_dim = 4 * N

        # 单个代理网络
        self.actor = ActorNetwork(self.state_dim, self.continuous_action_dim, self.discrete_action_dim)
        self.critic = CriticNetwork(self.state_dim, self.continuous_action_dim + self.discrete_action_dim)
        self.target_actor = ActorNetwork(self.state_dim, self.continuous_action_dim, self.discrete_action_dim)
        self.target_critic = CriticNetwork(self.state_dim, self.continuous_action_dim + self.discrete_action_dim)

        # 硬拷贝参数
        self.hard_update(self.target_actor, self.actor)
        self.hard_update(self.target_critic, self.critic)

        # 优化器
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=0.0001)
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=0.0002)

        # 训练参数
        self.gamma = 0.95
        self.tau = 0.01
        self.memory = deque(maxlen=100000)
        self.batch_size = 64

    def hard_update(self, target, source):
        for target_param, param in zip(target.parameters(), source.parameters()):
            target_param.data.copy_(param.data)

    def soft_update(self, local_model, target_model, tau):
        for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
            target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data)

    def get_action(self, state, epsilon):
        state_tensor = torch.FloatTensor(state).unsqueeze(0)

        with torch.no_grad():
            continuous_actions, discrete_logits = self.actor(state_tensor)

        # 连续动作处理
        continuous_actions = continuous_actions.numpy()[0]
        continuous_actions[::2] *= 2 * math.pi
        continuous_actions[1::2] *= 10.0  # 最大距离

        # 离散动作处理
        if random.random() > epsilon:
            discrete_actions = (discrete_logits.numpy()[0] > 0.5).astype(np.float32)
        else:
            discrete_actions = np.random.randint(0, 2, size=self.discrete_action_dim).astype(np.float32)

        return continuous_actions, discrete_actions

    def store_transition(self, state, action, reward, next_state, done):
        self.memory.append((state, action, reward, next_state, done))

    def update_networks(self):
        if len(self.memory) < self.batch_size:
            return

        # 采样
        batch = random.sample(self.memory, self.batch_size)
        states, actions, rewards, next_states, dones = zip(*batch)

        states = torch.FloatTensor(np.array(states))
        actions = torch.FloatTensor(np.array(actions))
        rewards = torch.FloatTensor(np.array(rewards)).unsqueeze(1)
        next_states = torch.FloatTensor(np.array(next_states))
        dones = torch.FloatTensor(np.array(dones)).unsqueeze(1)

        # Critic更新
        with torch.no_grad():
            next_continuous, next_discrete = self.target_actor(next_states)
            next_actions = torch.cat([next_continuous, next_discrete], dim=1)
            next_q = self.target_critic(next_states, next_actions)
            target_q = rewards + (1 - dones) * self.gamma * next_q

        current_q = self.critic(states, actions)
        critic_loss = nn.MSELoss()(current_q, target_q)

        self.critic_optimizer.zero_grad()
        critic_loss.backward()
        torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 1.0)
        self.critic_optimizer.step()

        # Actor更新
        continuous_actions, discrete_actions = self.actor(states)
        policy_actions = torch.cat([continuous_actions, discrete_actions], dim=1)
        actor_loss = -self.critic(states, policy_actions).mean()

        self.actor_optimizer.zero_grad()
        actor_loss.backward()
        torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 1.0)
        self.actor_optimizer.step()

        # 软更新
        self.soft_update(self.actor, self.target_actor, self.tau)
        self.soft_update(self.critic, self.target_critic, self.tau)


class GreedyAgent:
    """贪婪算法代理"""

    def __init__(self, M=10, N=3):
        self.M = M
        self.N = N
        self.l_max = 10.0

    def get_action(self, state, epsilon=0):
        # 解析状态向量
        drone_positions = state[:2 * self.N].reshape(self.N, 2) * 10.0  # 反归一化
        aoi = state[2 * self.N:2 * self.N + self.M] * 100.0  # 反归一化

        # 计算每个区域的中心坐标
        grid_size = int(np.sqrt(self.M))
        region_centers = []
        for k in range(self.M):
            row, col = k // grid_size, k % grid_size
            center_x = row + 0.5
            center_y = col + 0.5
            region_centers.append([center_x * 10, center_y * 10])  # 换算为实际坐标
        region_centers = np.array(region_centers)

        # 连续动作（移动方向和距离）
        continuous_actions = np.zeros(2 * self.N)

        # 对每个无人机
        for i in range(self.N):
            # 找到AoI最高的区域
            max_aoi_idx = np.argmax(aoi)
            target_center = region_centers[max_aoi_idx]

            # 计算无人机到目标的方向和距离
            current_pos = drone_positions[i]
            direction = target_center - current_pos
            distance = np.linalg.norm(direction)

            if distance > 0:
                # 计算角度
                theta = np.arctan2(direction[1], direction[0])
                if theta < 0:
                    theta += 2 * np.pi

                # 限制移动距离
                d = min(distance, self.l_max)

                # 设置连续动作
                continuous_actions[i * 2] = theta / (2 * np.pi)  # 归一化
                continuous_actions[i * 2 + 1] = d / self.l_max  # 归一化

                # 标记该区域AoI为0（假设将被覆盖）
                aoi[max_aoi_idx] = 0
            else:
                # 如果已经在目标位置，随机移动
                theta = np.random.uniform(0, 2 * np.pi)
                d = np.random.uniform(0, self.l_max)
                continuous_actions[i * 2] = theta / (2 * np.pi)
                continuous_actions[i * 2 + 1] = d / self.l_max

        # 离散动作（总是选择执行联邦学习任务）
        discrete_actions = np.ones(4 * self.N)

        # 反归一化连续动作用于环境
        exec_continuous_actions = np.copy(continuous_actions)
        exec_continuous_actions[::2] *= 2 * np.pi
        exec_continuous_actions[1::2] *= self.l_max

        return exec_continuous_actions, discrete_actions

    def store_transition(self, *args):
        # 贪婪算法不需要存储经验
        pass

    def update_networks(self):
        # 贪婪算法不需要更新网络
        pass


class RandomAgent:
    """随机算法代理"""

    def __init__(self, M=10, N=3):
        self.M = M
        self.N = N
        self.continuous_action_dim = 2 * N
        self.discrete_action_dim = 4 * N
        self.l_max = 10.0

    def get_action(self, state, epsilon=0):
        # 随机连续动作
        continuous_actions = np.random.random(self.continuous_action_dim)
        continuous_actions[::2] *= 2 * np.pi  # 角度范围[0, 2π]
        continuous_actions[1::2] *= self.l_max  # 距离范围[0, l_max]

        # 随机离散动作
        discrete_actions = np.random.randint(0, 2, size=self.discrete_action_dim).astype(np.float32)

        return continuous_actions, discrete_actions

    def store_transition(self, *args):
        # 随机算法不需要存储经验
        pass

    def update_networks(self):
        # 随机算法不需要更新网络
        pass


def train_agent_record_rewards(env, agent, episodes, algorithm_name):
    """训练代理并记录奖励"""
    total_rewards = []
    global_rewards = []
    local_rewards = []

    epsilon = 1.0
    epsilon_min = 0.01
    epsilon_decay = 0.995

    for episode in range(episodes):
        state = env.reset()
        episode_total_reward = 0
        episode_global_reward = 0
        episode_local_reward = 0
        done = False
        step = 0

        while not done:
            # 1. 选择动作
            continuous_actions, discrete_actions = agent.get_action(state, epsilon)

            # 2. 执行动作
            next_state, reward, done, info = env.step(continuous_actions, discrete_actions)

            # 记录不同类型的奖励
            episode_total_reward += reward
            episode_global_reward += info.get('global_reward', 0)
            episode_local_reward += info.get('local_reward', 0)

            # 3. 存储转移（除随机和贪婪外的算法）
            if algorithm_name not in ['RANDOM', 'GREEDY']:
                action = np.concatenate([continuous_actions, discrete_actions])
                agent.store_transition(state, action, reward, next_state, done)

            # 4. 学习（除随机和贪婪外的算法）
            if algorithm_name not in ['RANDOM', 'GREEDY']:
                agent.update_networks()

            # 5. 更新状态
            state = next_state
            step += 1

        # 衰减探索率
        if algorithm_name not in ['RANDOM', 'GREEDY']:
            epsilon = max(epsilon_min, epsilon * epsilon_decay)

        # 记录奖励
        total_rewards.append(episode_total_reward)
        global_rewards.append(episode_global_reward)
        local_rewards.append(episode_local_reward)

        # 打印训练信息
        if (episode + 1) % 10 == 0:
            print(f"算法: {algorithm_name}, Episode: {episode + 1}/{episodes}, "
                  f"总奖励: {episode_total_reward:.2f}, "
                  f"全局奖励: {episode_global_reward:.2f}, "
                  f"局部奖励: {episode_local_reward:.2f}")

    return total_rewards, global_rewards, local_rewards
def plot_figure2_accuracy_curve():
    """复现论文图2: 不同D值下的准确率曲线"""
    # 设置参数
    epochs = 100  # 与论文图2保持一致
    batch_size = 64
    d_values = [0, 0.5, 1, 2]  # 与论文图2保持一致
    datasets_info = {
        'MNIST': {'input_channels': 1, 'normalize': ((0.1307,), (0.3081,))},
        'FashionMNIST': {'input_channels': 1, 'normalize': ((0.2860,), (0.3530,))},
        'CIFAR10': {'input_channels': 3, 'normalize': ((0.4914, 0.4822, 0.4465), (0.2470, 0.2435, 0.2616))}
    }

    # 创建结果存储容器
    results = {}

    for ds_name, ds_config in datasets_info.items():
        print(f"\n====== 处理数据集: {ds_name} ======")
        results[ds_name] = {}

        # 加载数据集
        if ds_name == 'MNIST':
            transform = transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize(*ds_config['normalize'])
            ])
            train_data = datasets.MNIST('./data', train=True, download=True, transform=transform)
            test_data = datasets.MNIST('./data', train=False, transform=transform)
        elif ds_name == 'FashionMNIST':
            transform = transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize(*ds_config['normalize'])
            ])
            train_data = datasets.FashionMNIST('./data', train=True, download=True, transform=transform)
            test_data = datasets.FashionMNIST('./data', train=False, transform=transform)
        elif ds_name == 'CIFAR10':
            transform = transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize(*ds_config['normalize'])
            ])
            train_data = datasets.CIFAR10('./data', train=True, download=True, transform=transform)
            test_data = datasets.CIFAR10('./data', train=False, transform=transform)

        # 为每个D值运行实验
        for d in d_values:
            print(f"\n正在使用D={d}进行测试...")
            results[ds_name][d] = []

            # 创建非IID数据分布
            distributor = NonIIDDataDistributor(train_data, num_regions=10, num_classes=10)
            user_data = distributor.distribute_by_d(d)

            # 初始化MDC2-DRL模型和环境
            input_channels = ds_config['input_channels']
            model = MDC2DRLModel(input_channels)
            env = UAVEnvironment(M=10, N=3, T=epochs)

            # 训练参数
            N_max = 5  # 本地训练轮数
            eta = 0.01  # 学习率

            global_model = model.cnn
            acc_history = []

            for t in range(epochs):
                # 1. 无人机探索决策
                state = env._get_state()
                cont_act, disc_act = model.get_action(state, epsilon=0.1)

                # 2. 执行通信和模型训练
                local_models = []
                for i in range(env.N):
                    # 获取无人机覆盖的区域
                    grid_size = int(np.sqrt(env.M))
                    x, y = env.drone_positions[i]
                    grid_x = int(x // 10)
                    grid_y = int(y // 10)
                    if 0 <= grid_x < grid_size and 0 <= grid_y < grid_size:
                        k = grid_x * grid_size + grid_y
                        if k < env.M:
                            data = user_data[k]
                            # 本地训练
                            local_model = _local_train(global_model, data, N_max, eta, input_channels)
                            local_models.append(local_model)

                # 3. 全局聚合
                if local_models:
                    global_model = _aggregate_models(local_models, input_channels)
                    model.cnn = global_model

                # 4. 环境步进
                next_state, reward, done, _ = env.step(cont_act, disc_act)

                # 5. 评估全局模型
                acc = evaluate_model(global_model, test_data, batch_size)
                acc_history.append(acc)

                # 每10轮打印一次进度
                if (t + 1) % 10 == 0:
                    print(f"Epoch {t + 1}/{epochs}, D={d}, 准确率: {acc:.2f}%")

            # 保存完整准确率历史
            results[ds_name][d] = acc_history

    # 绘制图2所示的准确率曲线
    plot_accuracy_curves(results, d_values, epochs)

    return results


def _local_train(global_model, data, N_max, eta, input_channels):
    """本地训练过程"""
    # 创建本地模型副本
    local_model = CNNModel(input_channels=input_channels)
    local_model.load_state_dict(global_model.state_dict())

    # 设置优化器
    optimizer = optim.SGD(local_model.parameters(), lr=eta)
    criterion = nn.CrossEntropyLoss()

    # 本地训练循环
    dataloader = DataLoader(data, batch_size=32, shuffle=True)
    local_model.train()

    # 执行N_max轮或直到数据用完
    for _ in range(min(N_max, len(dataloader))):
        for inputs, targets in dataloader:
            optimizer.zero_grad()
            outputs = local_model(inputs)
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()

    return local_model


def _aggregate_models(local_models, input_channels):
    """聚合本地模型"""
    global_model = CNNModel(input_channels=input_channels)

    # 平均模型参数 (FedAvg)
    with torch.no_grad():
        # 获取状态字典
        state_dicts = [model.state_dict() for model in local_models]

        # 初始化全局模型参数
        global_dict = global_model.state_dict()

        # 对每个参数取平均
        for key in global_dict.keys():
            # 堆叠所有本地模型的相同参数
            stacked = torch.stack([state_dict[key].float() for state_dict in state_dicts], 0)
            # 计算平均值并赋给全局模型
            global_dict[key] = torch.mean(stacked, 0).clone()

        # 加载平均后的参数
        global_model.load_state_dict(global_dict)

    return global_model


def evaluate_model(model, test_data, batch_size=128):
    """评估模型准确率"""
    model.eval()
    dataloader = DataLoader(test_data, batch_size=batch_size)

    correct = 0
    total = 0

    with torch.no_grad():
        for inputs, targets in dataloader:
            outputs = model(inputs)
            _, predicted = torch.max(outputs, 1)
            total += targets.size(0)
            correct += (predicted == targets).sum().item()

    return 100.0 * correct / total


def plot_accuracy_curves(results, d_values, epochs):
    """绘制不同D值下准确率曲线"""
    plt.figure(figsize=(18, 6))
    markers = ['o', 's', '^', 'D']
    colors = ['blue', 'green', 'red', 'purple']

    for i, ds_name in enumerate(results.keys()):
        plt.subplot(1, 3, i + 1)

        for j, d in enumerate(d_values):
            acc_history = results[ds_name][d]
            plt.plot(range(1, epochs + 1), acc_history,
                     marker=markers[j], markersize=5, markevery=10,
                     color=colors[j], label=f'D={d}')

        plt.xlabel('Training Round', fontsize=12)
        plt.ylabel('Test Accuracy (%)', fontsize=12)
        plt.title(f'{ds_name} Dataset', fontsize=14)
        plt.grid(True, linestyle='--', alpha=0.7)
        plt.legend(loc='lower right')

    plt.tight_layout()
    plt.savefig('figure2_accuracy_curves.png', dpi=300, bbox_inches='tight')
    plt.show()


class MDC2DRLModel(nn.Module):
    """支持不同输入通道的MDC2-DRL模型"""

    def __init__(self, input_channels=1):
        super(MDC2DRLModel, self).__init__()
        self.cnn = CNNModel(input_channels=input_channels)
        self.agent = MDC2_DRL_Agent(M=10, N=3)

    def forward(self, x):
        return self.cnn(x)

    def get_action(self, state, epsilon):
        return self.agent.get_action(state, epsilon)

class NonIIDDataDistributor:
    """按照论文2.2节和4.2节要求，实现不同异质性D的数据分配"""

    def __init__(self, dataset, num_regions=10, num_classes=10):
        self.dataset = dataset
        self.num_regions = num_regions
        self.num_classes = num_classes

        # 按类别索引数据样本
        self.class_indices = self._index_by_class()

    def _index_by_class(self):
        """将数据集样本按类别索引"""
        class_indices = [[] for _ in range(self.num_classes)]

        # 对于每个样本，按标签将索引存储
        for idx, (_, label) in enumerate(self.dataset):
            class_indices[label].append(idx)

        return class_indices

    def distribute_by_d(self, d_value):
        """
        按照D值分配数据，D控制标签多样性：
        - D=0: 每个区域只有1种标签
        - D=0.5: 每个区域有~2种标签
        - D=1: 每个区域有~3种标签
        - D=2: 每个区域有~6种标签
        """
        # 计算每个区域应有的标签数
        if d_value == 0:
            labels_per_region = 1
        else:
            labels_per_region = int(1 + d_value * self.num_classes / 3)
            labels_per_region = min(labels_per_region, self.num_classes)

        print(f"D={d_value}，每个区域分配约{labels_per_region}类标签数据")

        # 为每个区域随机分配类别
        region_to_labels = {}
        for region in range(self.num_regions):
            # 分配主要标签
            if d_value == 0:
                # D=0时每个区域只有一个标签
                primary_label = region % self.num_classes
                region_labels = [primary_label]
            else:
                # 随机分配labels_per_region个标签
                region_labels = random.sample(range(self.num_classes), labels_per_region)

            region_to_labels[region] = region_labels

        # 计算每个区域的数据量（平均分配）
        total_samples = len(self.dataset)
        samples_per_region = total_samples // self.num_regions

        # 分配数据
        region_data = [[] for _ in range(self.num_regions)]
        labels_used = {region: Counter() for region in range(self.num_regions)}

        # 随机打乱区域顺序，确保公平分配
        regions = list(range(self.num_regions))
        random.shuffle(regions)

        # 对每个标签的数据分配到相应区域
        for label in range(self.num_classes):
            # 找出包含此标签的区域
            regions_with_label = [r for r in regions if label in region_to_labels[r]]

            if not regions_with_label:
                continue

            # 获取此标签的所有数据索引
            indices = self.class_indices[label].copy()
            random.shuffle(indices)

            # 计算每个区域应分配的此标签样本数
            indices_per_region = len(indices) // len(regions_with_label)

            # 分配索引
            for i, region in enumerate(regions_with_label):
                start_idx = i * indices_per_region
                end_idx = (i + 1) * indices_per_region if i < len(regions_with_label) - 1 else len(indices)

                # 添加此标签的数据到区域
                region_indices = indices[start_idx:end_idx]
                region_data[region].extend(region_indices)
                labels_used[region][label] += len(region_indices)

        # 创建每个区域的Subset数据集
        region_datasets = [Subset(self.dataset, indices) for indices in region_data]

        # 打印统计信息
        for region in range(self.num_regions):
            print(f"区域 {region} 数据分布: {dict(labels_used[region])}, "
                  f"总样本数: {sum(labels_used[region].values())}, "
                  f"标签种类: {len(labels_used[region])}")

        return region_datasets


def plot_figure2_accuracy_curve():
    """复现论文图2: 不同D值下的准确率曲线"""
    # 设置参数
    epochs = 100  # 与论文图2保持一致
    batch_size = 64
    d_values = [0, 0.5, 1, 2]  # 与论文图2保持一致
    datasets_info = {
        'MNIST': {'input_channels': 1, 'normalize': ((0.1307,), (0.3081,))},
        'FashionMNIST': {'input_channels': 1, 'normalize': ((0.2860,), (0.3530,))},
        'CIFAR10': {'input_channels': 3, 'normalize': ((0.4914, 0.4822, 0.4465), (0.2470, 0.2435, 0.2616))}
    }

    # 创建结果存储容器
    results = {}

    for ds_name, ds_config in datasets_info.items():
        print(f"\n====== 处理数据集: {ds_name} ======")
        results[ds_name] = {}

        # 加载数据集
        if ds_name == 'MNIST':
            transform = transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize(*ds_config['normalize'])
            ])
            train_data = datasets.MNIST('./data', train=True, download=True, transform=transform)
            test_data = datasets.MNIST('./data', train=False, transform=transform)
        elif ds_name == 'FashionMNIST':
            transform = transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize(*ds_config['normalize'])
            ])
            train_data = datasets.FashionMNIST('./data', train=True, download=True, transform=transform)
            test_data = datasets.FashionMNIST('./data', train=False, transform=transform)
        elif ds_name == 'CIFAR10':
            transform = transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize(*ds_config['normalize'])
            ])
            train_data = datasets.CIFAR10('./data', train=True, download=True, transform=transform)
            test_data = datasets.CIFAR10('./data', train=False, transform=transform)

        # 为每个D值运行实验
        for d in d_values:
            print(f"\n正在使用D={d}进行测试...")
            results[ds_name][d] = []

            # 创建非IID数据分布
            distributor = NonIIDDataDistributor(train_data, num_regions=10, num_classes=10)
            user_data = distributor.distribute_by_d(d)

            # 初始化MDC2-DRL模型和环境
            input_channels = ds_config['input_channels']
            model = MDC2DRLModel(input_channels)
            env = UAVEnvironment(M=10, N=3, T=epochs)

            # 训练参数
            N_max = 5  # 本地训练轮数
            eta = 0.01  # 学习率

            global_model = model.cnn
            acc_history = []

            for t in range(epochs):
                # 1. 无人机探索决策
                state = env._get_state()
                cont_act, disc_act = model.get_action(state, epsilon=0.1)

                # 2. 执行通信和模型训练
                local_models = []
                for i in range(env.N):
                    # 获取无人机覆盖的区域
                    grid_size = int(np.sqrt(env.M))
                    x, y = env.drone_positions[i]
                    grid_x = int(x // 10)
                    grid_y = int(y // 10)
                    if 0 <= grid_x < grid_size and 0 <= grid_y < grid_size:
                        k = grid_x * grid_size + grid_y
                        if k < env.M:
                            data = user_data[k]
                            # 本地训练
                            local_model = _local_train(global_model, data, N_max, eta, input_channels)
                            local_models.append(local_model)

                # 3. 全局聚合
                if local_models:
                    global_model = _aggregate_models(local_models, input_channels)
                    model.cnn = global_model

                # 4. 环境步进
                next_state, reward, done, _ = env.step(cont_act, disc_act)

                # 5. 评估全局模型
                acc = evaluate_model(global_model, test_data, batch_size)
                acc_history.append(acc)

                # 每10轮打印一次进度
                if (t + 1) % 10 == 0:
                    print(f"Epoch {t + 1}/{epochs}, D={d}, 准确率: {acc:.2f}%")

            # 保存完整准确率历史
            results[ds_name][d] = acc_history

    # 绘制图2所示的准确率曲线
    plot_accuracy_curves(results, d_values, epochs)

    return results


def _local_train(global_model, data, N_max, eta, input_channels):
    """本地训练过程"""
    # 创建本地模型副本
    local_model = CNNModel(input_channels=input_channels)
    local_model.load_state_dict(global_model.state_dict())

    # 设置优化器
    optimizer = optim.SGD(local_model.parameters(), lr=eta)
    criterion = nn.CrossEntropyLoss()

    # 本地训练循环
    dataloader = DataLoader(data, batch_size=32, shuffle=True)
    local_model.train()

    # 执行N_max轮或直到数据用完
    for _ in range(min(N_max, len(dataloader))):
        for inputs, targets in dataloader:
            optimizer.zero_grad()
            outputs = local_model(inputs)
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()

    return local_model


def _aggregate_models(local_models, input_channels):
    """聚合本地模型"""
    global_model = CNNModel(input_channels=input_channels)

    # 平均模型参数 (FedAvg)
    with torch.no_grad():
        # 获取状态字典
        state_dicts = [model.state_dict() for model in local_models]

        # 初始化全局模型参数
        global_dict = global_model.state_dict()

        # 对每个参数取平均
        for key in global_dict.keys():
            # 堆叠所有本地模型的相同参数
            stacked = torch.stack([state_dict[key].float() for state_dict in state_dicts], 0)
            # 计算平均值并赋给全局模型
            global_dict[key] = torch.mean(stacked, 0).clone()

        # 加载平均后的参数
        global_model.load_state_dict(global_dict)

    return global_model


def evaluate_model(model, test_data, batch_size=128):
    """评估模型准确率"""
    model.eval()
    dataloader = DataLoader(test_data, batch_size=batch_size)

    correct = 0
    total = 0

    with torch.no_grad():
        for inputs, targets in dataloader:
            outputs = model(inputs)
            _, predicted = torch.max(outputs, 1)
            total += targets.size(0)
            correct += (predicted == targets).sum().item()

    return 100.0 * correct / total


def plot_accuracy_curves(results, d_values, epochs):
    """绘制不同D值下准确率曲线"""
    plt.figure(figsize=(18, 6))
    markers = ['o', 's', '^', 'D']
    colors = ['blue', 'green', 'red', 'purple']

    for i, ds_name in enumerate(results.keys()):
        plt.subplot(1, 3, i + 1)

        for j, d in enumerate(d_values):
            acc_history = results[ds_name][d]
            plt.plot(range(1, epochs + 1), acc_history,
                     marker=markers[j], markersize=5, markevery=10,
                     color=colors[j], label=f'D={d}')

        plt.xlabel('Training Round', fontsize=12)
        plt.ylabel('Test Accuracy (%)', fontsize=12)
        plt.title(f'{ds_name} Dataset', fontsize=14)
        plt.grid(True, linestyle='--', alpha=0.7)
        plt.legend(loc='lower right')

    plt.tight_layout()
    plt.savefig('figure2_accuracy_curves.png', dpi=300, bbox_inches='tight')
    plt.show()


class MDC2DRLModel(nn.Module):
    """支持不同输入通道的MDC2-DRL模型"""

    def __init__(self, input_channels=1):
        super(MDC2DRLModel, self).__init__()
        self.cnn = CNNModel(input_channels=input_channels)
        self.agent = MDC2_DRL_Agent(M=10, N=3)

    def forward(self, x):
        return self.cnn(x)

    def get_action(self, state, epsilon):
        return self.agent.get_action(state, epsilon)

class AlgorithmComparator:
    """用于复现论文图3的不同算法奖励对比"""

    def __init__(self, M=10, N=3, T=100, d_value=2):
        self.M = M  # 子区域数
        self.N = N  # 无人机数
        self.T = T  # 时隙数
        self.d_value = d_value  # 数据非独立性 (D=2)

        # 加载MNIST数据集
        self.dataset = self._load_mnist()

        # 创建非IID数据分布 (D=2)
        distributor = NonIIDDataDistributor(self.dataset['train'], num_regions=M)
        self.user_data = distributor.distribute_by_d(d_value)

        # 初始化可用算法
        self.algorithms = {
            'PD-MADDPG': self._init_pd_maddpg,
            'P-MADDPG': self._init_p_maddpg,
            'P-DDPG': self._init_p_ddpg,
            'GREEDY': self._init_greedy,
            'RANDOM': self._init_random
        }

    def _load_mnist(self):
        """加载MNIST数据集"""
        transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307,), (0.3081,))
        ])
        train = datasets.MNIST('./data', train=True, download=True, transform=transform)
        test = datasets.MNIST('./data', train=False, transform=transform)

        # 为训练集和测试集生成单独的随机索引
        train_size = int(0.7 * len(train))
        train_indices = torch.randperm(len(train)).tolist()

        test_size = int(0.3 * len(test))
        test_indices = torch.randperm(len(test)).tolist()

        return {
            'train': Subset(train, train_indices[:train_size]),
            'test': Subset(test, test_indices[:test_size])
        }

    def _init_pd_maddpg(self):
        """初始化PD-MADDPG (原论文提出的MDC2-DRL)"""
        return MDC2_DRL_Agent(M=self.M, N=self.N, algorithm='PD-MADDPG'), UAVEnvironment(M=self.M, N=self.N, T=self.T)

    def _init_p_maddpg(self):
        """初始化P-MADDPG (无锁定机制的MDC2-DRL)"""
        return MDC2_DRL_Agent(M=self.M, N=self.N, algorithm='P-MADDPG'), UAVEnvironment(M=self.M, N=self.N, T=self.T)

    def _init_p_ddpg(self):
        """初始化P-DDPG (单代理版MDC2-DRL)"""
        return MDC2_DRL_Agent(M=self.M, N=self.N, algorithm='P-DDPG'), UAVEnvironment(M=self.M, N=self.N, T=self.T)

    def _init_greedy(self):
        """初始化贪婪策略"""
        return GreedyAgent(M=self.M, N=self.N), UAVEnvironment(M=self.M, N=self.N, T=self.T)

    def _init_random(self):
        """初始化随机策略"""
        return RandomAgent(M=self.M, N=self.N), UAVEnvironment(M=self.M, N=self.N, T=self.T)

    def run_comparison(self, episodes=50):
        """运行所有算法的对比实验"""
        results = {}

        for algo_name, init_func in self.algorithms.items():
            print(f"\n==================")
            print(f"训练 {algo_name} 算法")
            print(f"==================")

            agent, env = init_func()

            # 训练和评估
            rewards_history = self._train_algorithm(agent, env, episodes)

            # 保存结果
            results[algo_name] = rewards_history

            print(f"{algo_name} 训练完成，平均总奖励: {np.mean(rewards_history['total'][-10:]):.2f}")

        # 绘制对比图
        self._plot_comparison_results(results)

        return results

    def _train_algorithm(self, agent, env, episodes):
        """训练指定算法"""
        rewards_history = {
            'total': [],  # 总奖励
            'global': [],  # 全局奖励 (AoI)
            'local': []  # 局部奖励 (能耗)
        }

        epsilon = 1.0
        epsilon_min = 0.01
        epsilon_decay = 0.995

        for episode in range(episodes):
            state = env.reset()
            total_reward = 0
            global_reward = 0
            local_reward = 0
            done = False

            while not done:
                # 选择动作 (根据不同算法)
                if hasattr(agent, 'get_action'):
                    continuous_actions, discrete_actions = agent.get_action(state, epsilon)
                else:
                    continuous_actions, discrete_actions = agent.act(state)

                # 执行动作
                next_state, reward, done, info = env.step(continuous_actions, discrete_actions)

                # 获取分解奖励
                global_r = info['global_reward']  # AoI奖励
                local_r = info['local_reward']  # 能耗奖励

                # 存储转移 (如果算法支持)
                if hasattr(agent, 'store_transition'):
                    action = np.concatenate([continuous_actions, discrete_actions])
                    agent.store_transition(state, action, reward, next_state, done)

                # 更新网络 (如果算法支持)
                if hasattr(agent, 'update_networks'):
                    agent.update_networks()

                # 更新状态和奖励
                state = next_state
                total_reward += reward
                global_reward += global_r
                local_reward += local_r

            # 记录奖励
            rewards_history['total'].append(total_reward)
            rewards_history['global'].append(global_reward)
            rewards_history['local'].append(local_reward)

            # 衰减探索率 (如果算法支持)
            if hasattr(agent, 'get_action'):
                epsilon = max(epsilon_min, epsilon * epsilon_decay)

            # 打印训练信息
            if (episode + 1) % 10 == 0:
                avg_reward = np.mean(rewards_history['total'][-10:])
                avg_global = np.mean(rewards_history['global'][-10:])
                avg_local = np.mean(rewards_history['local'][-10:])
                print(f"Episode: {episode + 1}, "
                      f"Total: {total_reward:.2f}, "
                      f"Global: {global_reward:.2f}, "
                      f"Local: {local_reward:.2f}, "
                      f"Avg Total (10 eps): {avg_reward:.2f}")

        return rewards_history

    def _plot_comparison_results(self, results):
        """绘制图3所示的奖励对比曲线"""
        plt.figure(figsize=(18, 6))
        reward_types = ['total', 'global', 'local']
        titles = ['(a) Total Reward', '(b) Global Reward', '(c) Local Reward']

        # 设置算法对应的线型和颜色
        styles = {
            'PD-MADDPG': {'color': 'red', 'marker': 'o', 'linestyle': '-'},
            'P-MADDPG': {'color': 'blue', 'marker': 's', 'linestyle': '-'},
            'P-DDPG': {'color': 'green', 'marker': '^', 'linestyle': '-'},
            'GREEDY': {'color': 'purple', 'marker': 'D', 'linestyle': '-'},
            'RANDOM': {'color': 'orange', 'marker': 'x', 'linestyle': '-'}
        }

        for i, reward_type in enumerate(reward_types):
            plt.subplot(1, 3, i + 1)

            for algo_name, rewards in results.items():
                reward_data = rewards[reward_type]
                style = styles[algo_name]

                # 应用平滑化
                smoothed_data = self._smooth_curve(reward_data, window=5)

                plt.plot(range(1, len(smoothed_data) + 1), smoothed_data,
                         color=style['color'], marker=style['marker'],
                         linestyle=style['linestyle'], markevery=5,
                         label=algo_name)

            plt.xlabel('Training Episode', fontsize=12)
            plt.ylabel('Reward', fontsize=12)
            plt.title(titles[i], fontsize=14)
            plt.grid(True, linestyle='--', alpha=0.7)

            # 只在第一个子图显示图例
            if i == 0:
                plt.legend(loc='lower right')

        plt.tight_layout()
        plt.savefig('figure3_reward_comparison.png', dpi=300, bbox_inches='tight')
        plt.show()

    def _smooth_curve(self, data, window=5):
        """平滑数据曲线"""
        smoothed = []
        for i in range(len(data)):
            start = max(0, i - window // 2)
            end = min(len(data), i + window // 2 + 1)
            smoothed.append(np.mean(data[start:end]))
        return smoothed


import numpy as np
import matplotlib.pyplot as plt
import torch
import random
from torch.utils.data import Subset, ConcatDataset, Dataset, DataLoader
from collections import defaultdict
from sklearn.model_selection import train_test_split

def create_non_iid_data(dataset, num_regions, D):
    """
按照论文中的非独立同分布设置分配数据
参数:
- dataset: 原始数据集
- num_regions: 子区域数量
- D: 数据非独立性参数 (0, 0.5, 1, 2)
返回:
- 分配给各区域的数据
"""
    all_indices = list(range(len(dataset)))
    targets = np.array([dataset[i][1] for i in all_indices])
    classes = np.unique(targets)
    num_classes = len(classes)

    # 为每个区域分配标签
    if D == 0:  # 完全独立同分布
        labels_per_region = num_classes
    else:
        # D=0.5，1，2分别对应每个区域5，3，1个标签
        labels_per_region = max(1, int(num_classes / D))

    region_data = []

    # 为每个区域选择特定的类别
    for i in range(num_regions):
        if D == 0:  # IID情况
            region_labels = classes
        else:  # Non-IID情况
            # 随机选择labels_per_region个类别
            region_labels = np.random.choice(classes, size=labels_per_region, replace=False)

        # 收集符合这些标签的所有数据索引
        region_indices = [idx for idx in all_indices if targets[idx] in region_labels]

        # 确保每个区域数据量相对均衡
        if len(region_indices) > len(dataset) // num_regions:
            region_indices = random.sample(region_indices, len(dataset) // num_regions)

        # 创建当前区域的子数据集
        region_dataset = Subset(dataset, region_indices)
        region_data.append(region_dataset)

    # 验证数据分布
    print(f"数据非独立性D={D}, 每个区域的标签数={labels_per_region}")
    for i, data in enumerate(region_data):
        labels = [dataset[data.indices[j]][1] for j in range(len(data))]
        unique_labels = set(labels)
        print(f"区域{i + 1}的标签分布: {unique_labels}, 数据量: {len(data)}")

    return region_data

def run_accuracy_experiment(dataset_name, d_values, epochs=100, num_regions=10, num_drones=3):
    """针对图2运行实验，记录不同D值下的准确率变化"""
    # 加载数据集
    if dataset_name == 'MNIST':
        transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307,), (0.3081,))
        ])
        train_dataset = datasets.MNIST('./data', train=True, download=True, transform=transform)
        test_dataset = datasets.MNIST('./data', train=False, transform=transform)
        input_channels = 1
    elif dataset_name == 'FashionMNIST':
        transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.2860,), (0.3530,))
        ])
        train_dataset = datasets.FashionMNIST('./data', train=True, download=True, transform=transform)
        test_dataset = datasets.FashionMNIST('./data', train=False, transform=transform)
        input_channels = 1
    elif dataset_name == 'CIFAR10':
        transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2470, 0.2435, 0.2616))
        ])
        train_dataset = datasets.CIFAR10('./data', train=True, download=True, transform=transform)
        test_dataset = datasets.CIFAR10('./data', train=False, transform=transform)
        input_channels = 3

    # 分割测试集
    test_loader = DataLoader(test_dataset, batch_size=64)

    # 实验结果存储
    results = {}

    # 对每个D值运行实验
    for d in d_values:
        print(f"\n========= 运行 {dataset_name} 数据集，数据非独立性 D={d} =========")

        # 创建非独立同分布数据
        region_data = create_non_iid_data(train_dataset, num_regions, d)

        # 创建环境和代理
        env = UAVEnvironment(M=num_regions, N=num_drones, T=epochs)
        env.users = [len(data) for data in region_data]  # 设置每个区域的用户数据量

        # 初始化MDC2-DRL模型 (PD-MADDPG)
        model = MDC2DRLModel(input_channels=input_channels)

        # 运行训练
        acc_history = []
        for t in range(epochs):
            # 1. 无人机决策
            state = env._get_state()
            cont_act, disc_act = model.get_action(state, epsilon=max(0.1, 1.0 - t / epochs))

            # 2. 执行通信和模型训练
            local_models = []
            for i in range(env.N):
                # 获取无人机覆盖区域的数据
                grid_size = int(np.sqrt(env.M))
                x, y = env.drone_positions[i]
                grid_x = int(x // 10)
                grid_y = int(y // 10)
                if 0 <= grid_x < grid_size and 0 <= grid_y < grid_size:
                    k = grid_x * grid_size + grid_y
                    if k < env.M:
                        data = region_data[k]
                        # 本地训练
                        local_model = train_local_model(model.cnn, data, epochs=5, lr=0.01)
                        local_models.append(local_model)

            # 3. 聚合模型
            if local_models:
                model.cnn = aggregate_models(local_models)

            # 4. 环境步进
            next_state, reward, done, _ = env.step(cont_act, disc_act)

            # 5. 评估当前模型准确率
            acc = evaluate_model(model.cnn, test_loader)
            acc_history.append(acc)

            if (t + 1) % 10 == 0:
                print(f"Epoch {t + 1}/{epochs}, 准确率: {acc:.2f}%")

        results[d] = acc_history

    return results

def train_local_model(global_model, data, epochs=5, lr=0.01):
    """联邦学习中的本地训练"""
    local_model = CNNModel(input_channels=global_model.input_channels)
    local_model.load_state_dict(global_model.state_dict())

    optimizer = optim.SGD(local_model.parameters(), lr=lr)
    criterion = nn.CrossEntropyLoss()

    dataloader = DataLoader(data, batch_size=32, shuffle=True)
    local_model.train()

    for _ in range(epochs):
        for inputs, targets in dataloader:
            optimizer.zero_grad()
            outputs = local_model(inputs)
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()

    return local_model

def aggregate_models(local_models):
    """FedAvg聚合本地模型"""
    global_model = CNNModel(input_channels=local_models[0].input_channels)

    # 平均模型参数
    with torch.no_grad():
        state_dicts = [model.state_dict() for model in local_models]
        global_dict = global_model.state_dict()

        for key in global_dict.keys():
            stacked = torch.stack([state_dict[key].float() for state_dict in state_dicts], 0)
            global_dict[key] = torch.mean(stacked, 0).clone()

        global_model.load_state_dict(global_dict)

    return global_model

def evaluate_model(model, dataloader):
    """评估模型准确率"""
    model.eval()
    correct = 0
    total = 0

    with torch.no_grad():
        for inputs, targets in dataloader:
            outputs = model(inputs)
            _, predicted = torch.max(outputs, 1)
            total += targets.size(0)
            correct += (predicted == targets).sum().item()

    return 100.0 * correct / total

def plot_figure2_accuracy(results, dataset_name):
    """绘制图2：不同数据非独立性下的准确率曲线"""
    plt.figure(figsize=(10, 6))

    markers = ['o', 's', '^', 'd']
    colors = ['blue', 'orange', 'green', 'red']

    for i, (d, acc_history) in enumerate(results.items()):
        epochs = list(range(1, len(acc_history) + 1))
        plt.plot(epochs, acc_history, label=f'D={d}', marker=markers[i],
                 markevery=10, color=colors[i], linewidth=2)

    plt.xlabel('训练轮数', fontsize=14)
    plt.ylabel('测试准确率 (%)', fontsize=14)
    plt.title(f'MDC²-DRL在{dataset_name}数据集上的准确率曲线', fontsize=16)
    plt.grid(True, linestyle='--', alpha=0.7)
    plt.legend(fontsize=12)
    plt.tight_layout()
    plt.savefig(f'figure2_{dataset_name}.png', dpi=300)
    plt.show()

# 运行图2实验
def reproduce_figure2():
    """复现论文图2：不同数据非独立性下的准确率曲线"""
    d_values = [0, 0.5, 1, 2]  # 论文中的数据非独立性设置
    datasets = ['MNIST', 'FashionMNIST', 'CIFAR10']

    for dataset in datasets:
        results = run_accuracy_experiment(
            dataset_name=dataset,
            d_values=d_values,
            epochs=100,  # 论文中使用的epoch数
            num_regions=10,
            num_drones=3
        )
        plot_figure2_accuracy(results, dataset)
# =========================================
class MDC2DRLModel(nn.Module):
    """原论文中的PD-MADDPG算法"""

    def __init__(self, input_channels=1):
        super(MDC2DRLModel, self).__init__()
        self.cnn = CNNModel(input_channels=input_channels)
        self.agent = MDC2_DRL_Agent(M=10, N=3)

    def forward(self, x):
        return self.cnn(x)

    def get_action(self, state, epsilon):
        return self.agent.get_action(state, epsilon)

class PMDDPGModel(nn.Module):
    """P-MDDPG算法变体，无奖励分解机制"""

    def __init__(self, input_channels=1):
        super(PMDDPGModel, self).__init__()
        self.cnn = CNNModel(input_channels=input_channels)
        # 使用单一策略网络
        self.agent = SinglePolicyAgent(M=10, N=3)

    def forward(self, x):
        return self.cnn(x)

    def get_action(self, state, epsilon):
        return self.agent.get_action(state, epsilon)

class SinglePolicyAgent:
    """单一策略智能体，用于P-DDPG算法"""

    def __init__(self, M=10, N=3):
        self.M = M
        self.N = N
        self.continuous_action_dim = 2 * N
        self.discrete_action_dim = 4 * N

        # 简化为单一网络
        self.actor = nn.Sequential(
            nn.Linear(2 * N + M + M * N + N + N + 3 * N, 256),
            nn.ReLU(),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Linear(128, self.continuous_action_dim + self.discrete_action_dim),
            nn.Sigmoid()
        )

        self.optimizer = optim.Adam(self.actor.parameters(), lr=0.001)

    def get_action(self, state, epsilon):
        state_tensor = torch.FloatTensor(state).unsqueeze(0)

        with torch.no_grad():
            actions = self.actor(state_tensor).numpy()[0]

        # 分离连续和离散动作
        continuous_actions = actions[:self.continuous_action_dim]
        discrete_actions = (actions[self.continuous_action_dim:] > 0.5).astype(np.float32)

        # 随机探索
        if random.random() < epsilon:
            continuous_actions = np.random.random(size=self.continuous_action_dim)
            discrete_actions = np.random.randint(0, 2, size=self.discrete_action_dim).astype(np.float32)

        # 转换θ到[0,2π]和d到[0,l_max]
        continuous_actions[::2] *= 2 * np.pi
        continuous_actions[1::2] *= 5.0  # l_max

        return continuous_actions, discrete_actions

class GreedyAlgorithm:
    """贪婪算法，总是选择AoI最高的区域移动"""

    def __init__(self, M=10, N=3):
        self.M = M
        self.N = N
        self.continuous_action_dim = 2 * N
        self.discrete_action_dim = 4 * N

    def get_action(self, state, epsilon=0):
        # 提取AoI信息
        aoi = state[2 * self.N:2 * self.N + self.M]

        continuous_actions = np.zeros(self.continuous_action_dim)
        discrete_actions = np.zeros(self.discrete_action_dim)

        # 为每个无人机选择AoI最高的区域移动
        for i in range(self.N):
            # 当前无人机位置
            pos_x = state[i * 2] * 10
            pos_y = state[i * 2 + 1] * 10

            # 找到AoI最高的区域
            grid_size = int(np.sqrt(self.M))
            max_aoi_idx = np.argmax(aoi)
            target_x = (max_aoi_idx // grid_size) * 10 + 5
            target_y = (max_aoi_idx % grid_size) * 10 + 5

            # 计算移动角度和距离
            dx = target_x - pos_x
            dy = target_y - pos_y
            theta = np.arctan2(dy, dx)
            d = min(5.0, np.sqrt(dx * dx + dy * dy))

            # 设置连续动作
            continuous_actions[i * 2] = theta
            continuous_actions[i * 2 + 1] = d

            # 设置离散动作 (收集和处理)
            discrete_actions[i * 4] = 1  # 总是收集数据
            discrete_actions[i * 4 + 1] = 1  # 本地处理
            discrete_actions[i * 4 + 2] = 0  # 不传输给其他无人机
            discrete_actions[i * 4 + 3] = 1  # 传输给基站

            # 标记该区域AoI为0，避免多个无人机选择同一区域
            aoi[max_aoi_idx] = 0

        return continuous_actions, discrete_actions

class RandomAlgorithm:
    """随机算法，随机选择动作"""

    def __init__(self, M=10, N=3):
        self.M = M
        self.N = N
        self.continuous_action_dim = 2 * N
        self.discrete_action_dim = 4 * N

    def get_action(self, state, epsilon=0):
        continuous_actions = np.random.random(size=self.continuous_action_dim)
        continuous_actions[::2] *= 2 * np.pi
        continuous_actions[1::2] *= 5.0  # l_max

        discrete_actions = np.random.randint(0, 2, size=self.discrete_action_dim).astype(np.float32)

        return continuous_actions, discrete_actions

class ModifiedUAVEnvironment(UAVEnvironment):
    """修改后的环境，支持奖励分解"""

    def __init__(self, M=10, N=3, T=400):
        super().__init__(M, N, T)

    def step(self, continuous_actions, discrete_actions):
        """重写step方法，分解全局和局部奖励"""
        next_state, total_reward, done, info = super().step(continuous_actions, discrete_actions)

        # 计算全局奖励 (AoI相关)
        global_reward = -np.sum(self.aoi)

        # 计算局部奖励 (能量相关)
        local_reward = -np.sum(info.get('energy', 0))

        # 更新信息
        info['global_reward'] = global_reward
        info['local_reward'] = local_reward

        return next_state, total_reward, done, info

def run_algorithm_comparison(algorithm_models, epochs=100, D=2.0):
    """运行不同算法的对比实验"""
    # 加载MNIST数据集
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])
    train_dataset = datasets.MNIST('./data', train=True, download=True, transform=transform)
    test_dataset = datasets.MNIST('./data', train=False, transform=transform)

    # 创建非IID数据分布 (D=2)
    region_data = create_non_iid_data(train_dataset, num_regions=10, D=D)

    # 记录各算法的奖励历史
    results = {name: {'total_rewards': [], 'global_rewards': [], 'local_rewards': []}
               for name in algorithm_models.keys()}

    # 为每个算法运行实验
    for name, model in algorithm_models.items():
        print(f"\n====== 运行算法 {name} ======")

        # 创建环境
        env = ModifiedUAVEnvironment(M=10, N=3, T=epochs)
        env.users = [len(data) for data in region_data]

        # 训练循环
        state = env.reset()

        for t in range(epochs):
            # 1. 获取动作
            cont_act, disc_act = model.get_action(state, epsilon=max(0.1, 1.0 - t / epochs))

            # 2. 执行环境步进
            next_state, reward, done, info = env.step(cont_act, disc_act)

            # 3. 记录奖励
            results[name]['total_rewards'].append(reward)
            results[name]['global_rewards'].append(info['global_reward'])
            results[name]['local_rewards'].append(info['local_reward'])

            # 4. 更新状态
            state = next_state

            # 5. 打印进度
            if (t + 1) % 10 == 0:
                print(f"Epoch {t + 1}/{epochs}, 总奖励: {reward:.2f}, "
                      f"全局奖励: {info['global_reward']:.2f}, "
                      f"局部奖励: {info['local_reward']:.2f}")

    return results

def plot_figure3_rewards(results):
    """绘制图3：不同算法的奖励对比曲线"""
    reward_types = ['total_rewards', 'global_rewards', 'local_rewards']
    titles = ['(a) 总奖励', '(b) 全局奖励', '(c) 局部奖励']

    fig, axes = plt.subplots(1, 3, figsize=(18, 6))

    colors = {
        'PD-MADDPG': 'red',
        'P-MADDPG': 'blue',
        'P-DDPG': 'green',
        'GREEDY': 'orange',
        'RANDOM': 'purple'
    }

    for i, (reward_type, title) in enumerate(zip(reward_types, titles)):
        ax = axes[i]

        for name, color in colors.items():
            if name in results:
                # 使用移动平均平滑曲线
                raw_rewards = results[name][reward_type]
                smoothed_rewards = np.convolve(raw_rewards, np.ones(5) / 5, mode='valid')
                epochs = list(range(1, len(smoothed_rewards) + 1))

                ax.plot(epochs, smoothed_rewards, label=name, color=color, linewidth=2)

        ax.set_xlabel('训练轮数', fontsize=12)
        ax.set_ylabel('奖励', fontsize=12)
        ax.set_title(title, fontsize=14)
        ax.grid(True, linestyle='--', alpha=0.7)
        ax.legend()

    plt.tight_layout()
    plt.savefig('figure3_rewards.png', dpi=300)
    plt.show()

# 运行图3实验
def reproduce_figure3():
    """复现论文图3：不同算法的奖励对比"""
    # 初始化不同算法模型
    algorithm_models = {
        'PD-MADDPG': MDC2DRLModel(input_channels=1),
        'P-MADDPG': MDC2DRLModel(input_channels=1),  # 使用与PD-MADDPG相同的模型，但不分解奖励
        'P-DDPG': PMDDPGModel(input_channels=1),
        'GREEDY': GreedyAlgorithm(M=10, N=3),
        'RANDOM': RandomAlgorithm(M=10, N=3)
    }

    # 运行实验
    results = run_algorithm_comparison(
        algorithm_models=algorithm_models,
        epochs=100,
        D=2.0  # 论文图3使用的数据非独立性
    )

    # 绘制结果
    plot_figure3_rewards(results)
# MNIST数据集下不同算法的奖励变化对比（D=2）
#             主函数：运行两个实验
def main():
    print("开始复现论文实验结果...")

    # 复现图2：不同数据非独立性下的准确率曲线
    print("\n========= 复现图2：不同数据非独立性下的准确率曲线 =========")
    reproduce_figure2()

    # 复现图3：不同算法的奖励对比
    print("\n========= 复现图3：不同算法的奖励对比 =========")
    reproduce_figure3()

    print("\n实验复现完成！")

if __name__ == "__main__":
    main()