import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from collections import deque
import random
import math
from torch.utils.data import DataLoader, Subset
from torchvision import datasets, transforms


class ActorNetwork(nn.Module):
    def __init__(self, state_dim=58, continuous_action_dim=6, discrete_action_dim=12):
        """
        根据论文公式(23)的状态表示和动作空间定义
        M=10子区域，N=3无人机时：
        - 状态维度：3(位置) + 10(AoI) + 30(连接状态) + 3(电量) + 3(能量) + 9(数据量) = 58
        - 连续动作：θ和d对每架无人机 → 3×2=6
        - 离散动作：每架无人机的b,x,y,z决策 → 3×4=12
        """
        super(ActorNetwork, self).__init__()
        # 共享特征提取层
        self.fc_shared = nn.Sequential(
            nn.Linear(state_dim, 256),
            nn.ReLU(),
            nn.Linear(256, 128),
            nn.ReLU()
        )
        # 连续动作头 (θ和d)
        self.continuous_head = nn.Sequential(
            nn.Linear(128, 64),
            nn.ReLU(),
            nn.Linear(64, continuous_action_dim),
            nn.Sigmoid()  # θ∈[0,2π), d∈[0,l_max]
        )
        # 离散动作头 (b,x,y,z)
        self.discrete_head = nn.Sequential(
            nn.Linear(128, 64),
            nn.ReLU(),
            nn.Linear(64, discrete_action_dim),
            nn.Sigmoid()  # 各决策项∈[0,1]
        )

    def forward(self, state):
        shared_features = self.fc_shared(state)
        continuous_actions = self.continuous_head(shared_features)
        discrete_logits = self.discrete_head(shared_features)
        return continuous_actions, discrete_logits


class CriticNetwork(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(CriticNetwork, self).__init__()
        self.q_network = nn.Sequential(
            nn.Linear(state_dim + action_dim, 256),
            nn.ReLU(),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Linear(128, 1)
        )

    def forward(self, state, action):
        x = torch.cat([state, action], dim=1)
        return self.q_network(x)


class MDC2_DRL_Agent:
    def __init__(self, M=10, N=3):
        # 系统参数初始化
        self.M = M  # 子区域数
        self.N = N  # 无人机数
        self.l_max = 5.0  # 最大飞行距离(km)
        self.H = 0.1  # 飞行高度(km)
        self.T = 100  # 总时隙数

        # 计算状态和动作维度
        self.state_dim = 2 * N + M + M * N + N + N + 3 * N  # 位置 + AoI + 连接 + 电量 + 能量 + 数据量
        self.continuous_action_dim = 2 * N  # θ和d
        self.discrete_action_dim = 4 * N  # b,x,y,z

        # 网络初始化
        self.p_actor = ActorNetwork(self.state_dim, self.continuous_action_dim, self.discrete_action_dim)
        self.p_critic = CriticNetwork(self.state_dim, self.continuous_action_dim + self.discrete_action_dim)
        self.t_actor = ActorNetwork(self.state_dim, self.continuous_action_dim, self.discrete_action_dim)
        self.t_critic = CriticNetwork(self.state_dim, self.continuous_action_dim + self.discrete_action_dim)

        # 硬更新目标网络
        self.hard_update(self.t_actor, self.p_actor)
        self.hard_update(self.t_critic, self.p_critic)

        # 优化器
        self.actor_optimizer = optim.Adam(self.p_actor.parameters(), lr=0.0001)
        self.critic_optimizer = optim.Adam(self.p_critic.parameters(), lr=0.0002)

        # 训练参数
        self.gamma = 0.95  # 折扣因子
        self.tau = 0.01  # 软更新系数
        self.memory = deque(maxlen=100000)
        self.batch_size = 64

    def hard_update(self, target, source):
        for target_param, param in zip(target.parameters(), source.parameters()):
            target_param.data.copy_(param.data)

    def get_action(self, state, epsilon):
        state_tensor = torch.FloatTensor(state).unsqueeze(0)

        with torch.no_grad():
            continuous_actions, discrete_logits = self.p_actor(state_tensor)

        # 连续动作处理
        continuous_actions = continuous_actions.numpy()[0]
        continuous_actions[::2] *= 2 * math.pi  # θ∈[0,2π]
        continuous_actions[1::2] *= self.l_max  # d∈[0,l_max]

        # 离散动作处理 (ε-greedy)
        if random.random() > epsilon:
            # 确定性策略
            discrete_actions = (discrete_logits.numpy()[0] > 0.5).astype(np.float32)
        else:
            # 随机探索
            discrete_actions = np.random.randint(0, 2, size=self.discrete_action_dim).astype(np.float32)

        return continuous_actions, discrete_actions

    def store_transition(self, state, action, reward, next_state, done):
        self.memory.append((state, action, reward, next_state, done))

    def update_networks(self):
        if len(self.memory) < self.batch_size:
            return

        # 从记忆库中采样
        batch = random.sample(self.memory, self.batch_size)
        states, actions, rewards, next_states, dones = zip(*batch)

        states = torch.FloatTensor(np.array(states))
        actions = torch.FloatTensor(np.array(actions))
        rewards = torch.FloatTensor(np.array(rewards)).unsqueeze(1)
        next_states = torch.FloatTensor(np.array(next_states))
        dones = torch.FloatTensor(np.array(dones)).unsqueeze(1)

        # Critic更新 (论文公式29)
        with torch.no_grad():
            next_continuous, next_discrete = self.t_actor(next_states)
            next_actions = torch.cat([next_continuous, next_discrete], dim=1)
            next_q = self.t_critic(next_states, next_actions)
            target_q = rewards + (1 - dones) * self.gamma * next_q

        current_q = self.p_critic(states, actions)
        critic_loss = nn.MSELoss()(current_q, target_q)

        self.critic_optimizer.zero_grad()
        critic_loss.backward()
        torch.nn.utils.clip_grad_norm_(self.p_critic.parameters(), 1.0)
        self.critic_optimizer.step()

        # Actor更新 (论文公式31)
        continuous_actions, discrete_actions = self.p_actor(states)
        policy_actions = torch.cat([continuous_actions, discrete_actions], dim=1)
        actor_loss = -self.p_critic(states, policy_actions).mean()

        self.actor_optimizer.zero_grad()
        actor_loss.backward()
        torch.nn.utils.clip_grad_norm_(self.p_actor.parameters(), 1.0)
        self.actor_optimizer.step()

        # 目标网络软更新 (论文公式32)
        self.soft_update(self.p_actor, self.t_actor, self.tau)
        self.soft_update(self.p_critic, self.t_critic, self.tau)

    def soft_update(self, local_model, target_model, tau):
        for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
            target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data)


class UAVEnvironment:
    def __init__(self, M=10, N=3, T=400):
        """
        严格按论文4.1节参数设置
        M: 子区域数 (10m×10m网格)
        N: 无人机数量 (3架)
        T: 最大全局迭代数 (400)
        """
        # 按论文4.1节精确参数设置
        self.M = M
        self.N = N
        self.T = T
        self.R_max = 1.0  # 覆盖半径1m
        self.H = 0.1  # 飞行高度0.1m
        self.l_max = 10.0  # 最大飞行距离10m
        self.W = 100  # 带宽100MHz
        self.bs_position = np.array([-1, -1])  # 基站位置[-1,-1]

        # 联邦学习参数 (论文4.1节)
        self.N_max = 500  # 最大本地训练轮数
        self.eta = 0.01  # 学习率
        self.epsilon = 1.0  # 目标精度

        # 能耗参数 (论文2.5节)
        self.p_mov = 10.0  # 移动功率(W)
        self.p_rev = 5.0  # 接收功率
        self.p_cmp = 15.0  # 计算功率
        self.p_U_tra = 8.0  # U2U传输功率
        self.p_B_tra = 12.0  # U2B传输功率

        self.reset()

    def reset(self):
        """按论文要求初始化环境状态"""
        self.current_step = 0
        # 无人机初始位置 (10m×10m网格坐标)
        self.drone_positions = np.random.uniform(0, 10, size=(self.N, 2))

        # 初始化AoI (论文公式12)
        self.aoi = np.zeros(self.M)

        # 能量相关 (论文2.6节)
        self.battery = np.full(self.N, 100.0)  # 初始电量100%
        self.energy_harvest = np.random.uniform(0, 10, size=self.N)

        # 数据量 (论文公式4)
        self.data_volume = np.zeros((self.N, 3))  # 本地/U2U/U2B数据量

        # 用户设备分布 (每个子区域随机1-3个用户)
        self.users = [np.random.randint(1, 4) for _ in range(self.M)]

        return self._get_state()

    def _get_state(self):
        """严格按论文公式23构建状态向量"""
        state = []
        # 1. 无人机位置 (2*N维坐标)
        state.extend(self.drone_positions.flatten() / 10.0)  # 归一化

        # 2. 各子区域AoI (M维)
        state.extend(self.aoi / 100.0)  # 归一化

        # 3. 连接状态 (M×N维)
        connection = np.zeros((self.M, self.N))
        grid_size = int(np.sqrt(self.M))
        for i in range(self.N):
            for k in range(self.M):
                row, col = k // grid_size, k % grid_size
                grid_center = np.array([row * 10 + 5, col * 10 + 5])
                dist = np.linalg.norm(self.drone_positions[i] - grid_center)
                connection[k, i] = 1 if dist <= self.R_max else 0
        state.extend(connection.flatten())

        # 4-6. 电量、收集能量、数据量 (按论文公式)
        state.extend(self.battery / 100.0)
        state.extend(self.energy_harvest / 10.0)
        state.extend(self.data_volume.flatten() / 50.0)

        return np.array(state, dtype=np.float32)

    def step(self, continuous_actions, discrete_actions):
        """严格按论文2.1-2.6节实现"""
        # 1. 移动无人机 (论文2.1节)
        for i in range(self.N):
            theta = continuous_actions[i * 2]  # θ∈[0,2π]
            d = continuous_actions[i * 2 + 1]  # d∈[0,l_max]
            new_x = self.drone_positions[i, 0] + d * np.cos(theta)
            new_y = self.drone_positions[i, 1] + d * np.sin(theta)
            self.drone_positions[i] = np.clip([new_x, new_y], 0, 10)

        # 2. 计算能耗 (论文2.5节)
        move_energy = np.zeros(self.N)
        comm_energy = np.zeros(self.N)
        comp_energy = np.zeros(self.N)

        for i in range(self.N):
            # 计算移动能耗
            d = continuous_actions[i * 2 + 1]
            move_energy[i] = self.p_mov * d / 5.0  # 假设速度5m/s

            # 读取离散动作
            b_offset = i * 4
            x_offset = i * 4 + 1
            y_offset = i * 4 + 2
            z_offset = i * 4 + 3

            b = discrete_actions[b_offset]
            x = discrete_actions[x_offset]
            y = discrete_actions[y_offset]
            z = discrete_actions[z_offset]

            # 通信能耗计算
            comm_energy[i] = self.p_rev * b * 0.1 + self.p_U_tra * y * 0.2 + self.p_B_tra * z * 0.3
            comp_energy[i] = self.p_cmp * (x * 0.5 + y * 0.3 + z * 0.7)

        # 3. 更新电量 (论文2.6节)
        total_energy = move_energy + comm_energy + comp_energy
        self.battery -= total_energy
        self.battery = np.clip(self.battery + self.energy_harvest, 0, 100)

        # 更新AoI
        self.aoi += 1
        grid_size = int(np.sqrt(self.M))
        for i in range(self.N):
            # 确定无人机当前覆盖哪个区域
            x, y = self.drone_positions[i]
            grid_x = int(x // 10)
            grid_y = int(y // 10)
            if 0 <= grid_x < grid_size and 0 <= grid_y < grid_size:
                k = grid_x * grid_size + grid_y
                if k < self.M:
                    self.aoi[k] = 0  # 覆盖区域的AoI重置为0

        # 4. 计算奖励 (论文公式13)
        aoi_penalty = -np.sum(self.aoi)
        energy_penalty = -np.sum(total_energy)
        reward = 1000 * aoi_penalty + 0.1 * energy_penalty  # 论文4.1节μ1=1000,μ2=0.1

        # 5. 更新状态
        self.current_step += 1
        done = self.current_step >= self.T
        next_state = self._get_state()

        return next_state, reward, done, {
            'aoi': np.mean(self.aoi),
            'energy': np.mean(total_energy),
            'battery': np.mean(self.battery)
        }


class CNNModel(nn.Module):
    def __init__(self, input_channels=1):
        super(CNNModel, self).__init__()
        self.features = nn.Sequential(
            nn.Conv2d(input_channels, 32, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2),
            nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2)
        )

        # 计算特征尺寸 (MNIST和FashionMNIST是28x28，CIFAR10是32x32)
        if input_channels == 3:  # CIFAR10
            self.classifier = nn.Sequential(
                nn.Linear(64 * 8 * 8, 128),
                nn.ReLU(),
                nn.Linear(128, 10)
            )
        else:  # MNIST/FashionMNIST
            self.classifier = nn.Sequential(
                nn.Linear(64 * 7 * 7, 128),
                nn.ReLU(),
                nn.Linear(128, 10)
            )

        self.input_channels = input_channels

    # ‌特征提取‌：通过卷积层提取图像特征。
    # ‌维度适配‌：根据输入数据调整展平维度。
    # ‌分类‌：通过全连接层输出预测结果。
    # 适用于多通道（CIFAR10）和单通道（MNIST）数据集的分类任务。
    def forward(self, x):
        x = self.features(x)
        if self.input_channels == 3:  # CIFAR10
            x = x.view(-1, 64 * 8 * 8)
        else:  # MNIST/FashionMNIST
            x = x.view(-1, 64 * 7 * 7)
        x = self.classifier(x)
        return x


class DatasetComparator:
    def __init__(self):
        # 按论文4.2节设置三个数据集
        self.datasets = {
            'MNIST': self._load_mnist(),
            'FashionMNIST': self._load_fashion(),
            'CIFAR10': self._load_cifar10()
        }

        # 对比算法模型
        self.models = {
            'Random': self._init_random_model(),
            'Centralized': self._init_centralized_model(),
            'FedAvg': self._init_fedavg_model(),
            'MDC2_DRL': self._init_mdc2_model()
        }

    def _load_mnist(self):
        """加载MNIST数据集 (70%训练, 30%测试)"""
        transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307,), (0.3081,))
        ])
        train = datasets.MNIST('./data', train=True, download=True, transform=transform)
        test = datasets.MNIST('./data', train=False, transform=transform)

        # 为训练集和测试集生成单独的随机索引
        train_size = int(0.7 * len(train))
        train_indices = torch.randperm(len(train)).tolist()

        test_size = int(0.3 * len(test))
        test_indices = torch.randperm(len(test)).tolist()

        return {
            'train': Subset(train, train_indices[:train_size]),
            'test': Subset(test, test_indices[:test_size])
        }

    def _load_fashion(self):
        """加载FashionMNIST数据集"""
        transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.2860,), (0.3530,))
        ])
        train = datasets.FashionMNIST('./data', train=True, download=True, transform=transform)
        test = datasets.FashionMNIST('./data', train=False, transform=transform)

        # 为训练集和测试集生成单独的随机索引
        train_size = int(0.7 * len(train))
        train_indices = torch.randperm(len(train)).tolist()

        test_size = int(0.3 * len(test))
        test_indices = torch.randperm(len(test)).tolist()

        return {
            'train': Subset(train, train_indices[:train_size]),
            'test': Subset(test, test_indices[:test_size])
        }

    def _load_cifar10(self):
        """加载CIFAR-10数据集"""
        transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2470, 0.2435, 0.2616))
        ])
        train = datasets.CIFAR10('./data', train=True, download=True, transform=transform)
        test = datasets.CIFAR10('./data', train=False, transform=transform)

        # 为训练集和测试集生成单独的随机索引
        train_size = int(0.7 * len(train))
        train_indices = torch.randperm(len(train)).tolist()

        test_size = int(0.3 * len(test))
        test_indices = torch.randperm(len(test)).tolist()

        return {
            'train': Subset(train, train_indices[:train_size]),
            'test': Subset(test, test_indices[:test_size])
        }

    def _init_random_model(self):
        """随机基线模型"""

        class RandomModel(nn.Module):
            def __init__(self):
                super(RandomModel, self).__init__()

            def forward(self, x):
                return torch.randn(x.size(0), 10)

        return RandomModel()

    def _init_centralized_model(self):
        """集中式学习模型"""
        return CNNModel(input_channels=1)  # 默认为MNIST

    def _init_fedavg_model(self):
        """FedAvg模型"""
        return CNNModel(input_channels=1)

    def _init_mdc2_model(self):
        """MDC2-DRL模型"""

        class MDC2DRLModel(nn.Module):
            def __init__(self):
                super(MDC2DRLModel, self).__init__()
                self.cnn = CNNModel(input_channels=1)
                self.agent = MDC2_DRL_Agent(M=10, N=3)

            def forward(self, x):
                return self.cnn(x)

            def get_action(self, state, epsilon):
                return self.agent.get_action(state, epsilon)

        return MDC2DRLModel()

    def _distribute_data(self, train_data, num_regions):
        """将数据分配给各子区域"""
        data_per_region = len(train_data) // num_regions
        distributed_data = []

        for i in range(num_regions):
            start_idx = i * data_per_region
            end_idx = (i + 1) * data_per_region if i < num_regions - 1 else len(train_data)
            distributed_data.append(Subset(train_data, range(start_idx, end_idx)))

        return distributed_data

    def _local_train(self, global_model, data, N_max, eta):
        """本地训练过程"""
        # 创建本地模型副本
        local_model = CNNModel(input_channels=1)
        local_model.load_state_dict(global_model.state_dict())

        # 设置优化器
        optimizer = optim.SGD(local_model.parameters(), lr=eta)
        criterion = nn.CrossEntropyLoss()

        # 本地训练循环
        dataloader = DataLoader(data, batch_size=32, shuffle=True)
        local_model.train()

        for _ in range(min(N_max, len(dataloader))):
            for inputs, targets in dataloader:
                optimizer.zero_grad()
                outputs = local_model(inputs)
                loss = criterion(outputs, targets)
                loss.backward()
                optimizer.step()

        return local_model

    def _aggregate_models(self, local_models):
        """聚合本地模型"""
        global_model = CNNModel(input_channels=1)

        # 平均模型参数 (FedAvg)
        with torch.no_grad():
            # 获取状态字典
            state_dicts = [model.state_dict() for model in local_models]

            # 初始化全局模型参数
            global_dict = global_model.state_dict()

            # 对每个参数取平均
            for key in global_dict.keys():
                # 堆叠所有本地模型的相同参数
                stacked = torch.stack([state_dict[key].float() for state_dict in state_dicts], 0)
                # 计算平均值并赋给全局模型
                global_dict[key] = torch.mean(stacked, 0).clone()

            # 加载平均后的参数
            global_model.load_state_dict(global_dict)

        return global_model

    def _evaluate(self, model, test_data):
        """评估模型准确率"""
        model.eval()
        dataloader = DataLoader(test_data, batch_size=128)

        correct = 0
        total = 0

        with torch.no_grad():
            for inputs, targets in dataloader:
                outputs = model(inputs)
                _, predicted = torch.max(outputs, 1)
                total += targets.size(0)
                correct += (predicted == targets).sum().item()

        return 100.0 * correct / total

    def run_comparison(self, epochs=10, batch_size=64):
        """运行对比实验 (论文4.2节)"""
        results = {}

        for ds_name, ds in self.datasets.items():
            results[ds_name] = {}
            print(f"===== 在 {ds_name} 数据集上进行实验 =====")

            # 调整模型输入通道
            if ds_name == 'CIFAR10':
                input_channels = 3
            else:
                input_channels = 1

            for model_name in self.models:
                if model_name != 'Random':
                    if model_name == 'MDC2_DRL':
                        self.models[model_name].cnn = CNNModel(input_channels=input_channels)
                    else:
                        self.models[model_name] = CNNModel(input_channels=input_channels)

            for model_name, model in self.models.items():
                print(f"训练 {model_name} 模型...")

                # 训练模型
                if model_name == 'MDC2_DRL':
                    # 我们的方法需要特殊训练流程
                    acc = self._train_mdc2(model, ds['train'], ds['test'],
                                           epochs, batch_size)
                else:
                    # 其他标准方法
                    acc = self._train_standard(model, ds['train'], ds['test'],
                                               epochs, batch_size)

                results[ds_name][model_name] = acc
                print(f"{model_name} 在 {ds_name} 上的准确率: {acc:.2f}%")

        return results

    def _train_standard(self, model, train_data, test_data, epochs, batch_size):
        """标准训练流程"""
        train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
        test_loader = DataLoader(test_data, batch_size=batch_size)

        # 检查模型是否有可训练参数
        has_params = len(list(model.parameters())) > 0

        if not has_params:
            # 如果没有参数（比如RandomModel），跳过训练直接评估
            print("模型没有可训练参数，直接进行评估...")
            model.eval()
            correct = 0
            total = 0
            with torch.no_grad():
                for x, y in test_loader:
                    output = model(x)
                    _, predicted = torch.max(output.data, 1)
                    total += y.size(0)
                    correct += (predicted == y).sum().item()
            return 100 * correct / total

        # 有参数的模型正常训练
        optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
        criterion = torch.nn.CrossEntropyLoss()

        for epoch in range(epochs):
            model.train()
            for x, y in train_loader:
                optimizer.zero_grad()
                output = model(x)
                loss = criterion(output, y)
                loss.backward()
                optimizer.step()

            # 每个epoch打印进度
            if (epoch + 1) % 5 == 0:
                # 测试准确率
                model.eval()
                correct = 0
                total = 0
                with torch.no_grad():
                    for x, y in test_loader:
                        output = model(x)
                        _, predicted = torch.max(output.data, 1)
                        total += y.size(0)
                        correct += (predicted == y).sum().item()
                print(f"Epoch {epoch + 1}/{epochs}, 准确率: {100 * correct / total:.2f}%")

        # 最终测试准确率
        model.eval()
        correct = 0
        total = 0
        with torch.no_grad():
            for x, y in test_loader:
                output = model(x)
                _, predicted = torch.max(output.data, 1)
                total += y.size(0)
                correct += (predicted == y).sum().item()

        return 100 * correct / total

    def _train_mdc2(self, model, train_data, test_data, epochs, batch_size):
        """MDC2-DRL特殊训练流程 (算法1)"""
        # 初始化无人机环境
        env = UAVEnvironment(M=10, N=3, T=epochs)

        # 将数据分配给用户设备 (论文4.2节)
        user_data = self._distribute_data(train_data, env.M)

        # 联邦学习参数 (论文4.1节)
        N_max = 5  # 最大本地训练轮数 (简化版，原文为500)
        eta = 0.01  # 学习率

        global_model = model.cnn
        acc_history = []

        for t in range(epochs):
            # 1. 无人机探索决策 (算法2)
            state = env._get_state()
            cont_act, disc_act = model.get_action(state, epsilon=0.1)

            # 2. 执行通信和模型训练
            local_models = []
            for i in range(env.N):
                # 获取分配给当前无人机覆盖区域的数据
                grid_size = int(np.sqrt(env.M))
                x, y = env.drone_positions[i]
                grid_x = int(x // 10)
                grid_y = int(y // 10)
                if 0 <= grid_x < grid_size and 0 <= grid_y < grid_size:
                    k = grid_x * grid_size + grid_y
                    if k < env.M:
                        data = user_data[k]
                        # 本地训练 (论文算法1)
                        local_model = self._local_train(global_model, data, N_max, eta)
                        local_models.append(local_model)

            # 3. 全局聚合 (论文公式8)
            if local_models:  # 确保有本地模型更新
                global_model = self._aggregate_models(local_models)
                model.cnn = global_model  # 更新MDC2模型内的CNN

            # 4. 环境步进
            next_state, reward, done, _ = env.step(cont_act, disc_act)

            # 5. 评估全局模型
            if (t + 1) % 5 == 0:
                acc = self._evaluate(global_model, test_data)
                acc_history.append(acc)
                print(f"Epoch {t + 1}/{epochs}, 准确率: {acc:.2f}%")

        return np.mean(acc_history[-5:]) if acc_history else 0.0  # 返回最后5轮平均准确率


# 训练过程
def train_drl():
    # 初始化时明确指定T参数
    env = UAVEnvironment(M=10, N=3, T=100)  # 与论文2.1节保持一致
    agent = MDC2_DRL_Agent(M=10, N=3)

    episodes = 50  # 简化为1000轮
    epsilon = 1.0
    epsilon_min = 0.01
    epsilon_decay = 0.995

    rewards_history = []  # 记录每轮奖励

    for episode in range(episodes):
        state = env.reset()
        total_reward = 0
        done = False

        while not done:
            # 1. 选择动作
            continuous_actions, discrete_actions = agent.get_action(state, epsilon)

            # 2. 执行动作
            next_state, reward, done, _ = env.step(continuous_actions, discrete_actions)

            # 3. 存储转移
            action = np.concatenate([continuous_actions, discrete_actions])
            agent.store_transition(state, action, reward, next_state, done)

            # 4. 学习
            agent.update_networks()

            # 5. 更新状态
            state = next_state
            total_reward += reward

        # 记录奖励
        rewards_history.append(total_reward)

        # 衰减探索率
        epsilon = max(epsilon_min, epsilon * epsilon_decay)

        # 打印训练信息
        if (episode + 1) % 10 == 0:
            avg_reward = np.mean(rewards_history[-10:])
            print(f"Episode: {episode + 1}, Total Reward: {total_reward:.2f}, "
                  f"Avg Reward (10 eps): {avg_reward:.2f}, "
                  f"Avg AoI: {np.mean(env.aoi):.2f}, "
                  f"Avg Battery: {np.mean(env.battery):.2f}%, "
                  f"Epsilon: {epsilon:.3f}")

    # 保存模型
    torch.save(agent.p_actor.state_dict(), "actor_model.pth")
    torch.save(agent.p_critic.state_dict(), "critic_model.pth")
    print("训练完成，模型已保存")

    return agent


def run_dataset_comparison():
    """运行数据集对比实验"""
    comparator = DatasetComparator()
    results = comparator.run_comparison(epochs=20, batch_size=64)

    # 打印结果表格
    print("\n=========== 对比实验结果 ===========")
    header = "数据集\\方法"
    methods = list(results[list(results.keys())[0]].keys())

    # 构建表头
    for method in methods:
        header += f"\t{method}"
    print(header)

    # 构建表内容
    for dataset, dataset_results in results.items():
        row = f"{dataset}"
        for method in methods:
            row += f"\t{dataset_results[method]:.2f}%"
        print(row)


if __name__ == "__main__":
    # 1. 训练深度强化学习代理
    print("开始训练 DRL 代理...")
    agent = train_drl()

    # 2. 运行数据集对比实验
    print("\n开始数据集对比实验...")
    run_dataset_comparison()
