import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import random
from collections import deque
import os
import time
import matplotlib
import copy

matplotlib.rcParams['font.sans-serif'] = ['SimHei']  # 使中文显示正常
matplotlib.rcParams['axes.unicode_minus'] = False

# 设置随机种子确保结果可复现
SEED = 42
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)

# 检查GPU可用性
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

# 环境参数
AREA_SIZE = 100  # 区域大小 100m x 100m
NUM_USERS = 10  # 用户数量
ACTIVE_USERS = 8  # 每次激活的用户数量
MAX_STEPS = 200  # 每个episode的最大步数
MAX_DISTANCE_COLLECT = 15  # UAV可收集任务的最大距离

# UAV参数
UAV_SPEED = 5.0  # UAV速度 (m/s)
UAV_ENERGY_PER_METER = 0.1  # 每米能耗
UAV_HOVER_ENERGY = 0.5  # 悬停能耗

# 任务参数
TASK_SIZE = [10, 50]  # 任务大小范围 (MB)
TASK_DELAY_WEIGHT = 0.6  # 延迟权重
ENERGY_WEIGHT = 0.4  # 能耗权重

# TD3参数
ACTOR_LR = 3e-4
CRITIC_LR = 3e-4
GAMMA = 0.99
TAU = 0.005
BUFFER_SIZE = 200000
BATCH_SIZE = 256
EXPLORATION_NOISE_START = 0.4  # 初始探索噪声
EXPLORATION_NOISE_END = 0.05  # 最终探索噪声
REWARD_SCALE = 0.1  # 奖励缩放因子

# EWC参数
EWC_LAMBDA = 100  # EWC正则化强度
FISHER_SAMPLE_SIZE = 1000  # 计算Fisher信息矩阵的样本数量


class Environment:
    def __init__(self):
        # 初始化用户位置 (固定)
        self.user_positions = np.random.uniform(0, AREA_SIZE, size=(NUM_USERS, 2))

        # 初始化任务大小 (固定)
        self.task_sizes = np.random.uniform(TASK_SIZE[0], TASK_SIZE[1], size=NUM_USERS)

        # UAV初始位置 (使用float类型)
        self.uav_position = np.array([AREA_SIZE / 2, AREA_SIZE / 2], dtype=float)

        # 活跃用户列表 (只有活跃用户会生成任务)
        self.active_users = np.zeros(NUM_USERS, dtype=bool)
        self.update_active_users()

        # 任务收集状态
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)

        # 步数计数器
        self.step_count = 0

        # 总延迟和能耗
        self.total_delay = 0
        self.total_energy = 0

        # 历史轨迹
        self.trajectory = [self.uav_position.copy()]

        # 上一次的距离，用于计算奖励
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])

    def update_active_users(self):
        """随机选择8个用户作为活跃用户"""
        active_indices = np.random.choice(NUM_USERS, ACTIVE_USERS, replace=False)
        self.active_users = np.zeros(NUM_USERS, dtype=bool)
        self.active_users[active_indices] = True

    def reset(self):
        # 重置UAV位置到中心点 (更合理的起点)
        self.uav_position = np.array([AREA_SIZE / 2, AREA_SIZE / 2], dtype=float)

        # 重置任务收集状态
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)

        # 重置步数
        self.step_count = 0

        # 重置总延迟和能耗
        self.total_delay = 0
        self.total_energy = 0

        # 重置轨迹
        self.trajectory = [self.uav_position.copy()]

        # 重置上一次的距离
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])

        return self._get_state()

    def step(self, action):
        # 更新UAV位置 (action是相对移动，范围[-1,1])
        action = np.clip(action, -1, 1)
        movement = action * UAV_SPEED
        prev_position = self.uav_position.copy()
        self.uav_position += movement
        self.uav_position = np.clip(self.uav_position, 0, AREA_SIZE)

        # 记录轨迹
        self.trajectory.append(self.uav_position.copy())

        # 计算移动距离和能耗
        distance_moved = np.linalg.norm(self.uav_position - prev_position)
        energy_consumed = distance_moved * UAV_ENERGY_PER_METER

        # 计算与所有用户的新距离
        new_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])

        # 收集任务
        newly_collected = 0
        collected_indices = []

        for i in range(NUM_USERS):
            # 只考虑活跃用户的任务
            if self.active_users[i] and not self.collected_tasks[i]:
                if new_distances[i] <= MAX_DISTANCE_COLLECT:
                    self.collected_tasks[i] = True
                    newly_collected += 1
                    collected_indices.append(i)

                    # 计算任务延迟 (与距离和任务大小成正比)
                    delay = new_distances[i] * self.task_sizes[i] / 10
                    self.total_delay += delay

                    # 悬停能耗
                    energy_consumed += UAV_HOVER_ENERGY

        # 累计总能耗
        self.total_energy += energy_consumed

        # 更新步数
        self.step_count += 1

        # 计算奖励
        reward = self._calculate_reward(newly_collected, energy_consumed, collected_indices,
                                        new_distances, self.last_distances)

        # 更新上一次的距离
        self.last_distances = new_distances

        # 判断是否结束
        active_tasks_collected = sum(self.collected_tasks[i] for i in range(NUM_USERS) if self.active_users[i])
        active_tasks_total = sum(self.active_users)

        done = (self.step_count >= MAX_STEPS) or (active_tasks_collected == active_tasks_total)

        return self._get_state(), reward, done, {
            "collected": active_tasks_collected,
            "active_users": active_tasks_total,
            "energy": self.total_energy,
            "delay": self.total_delay,
            "newly_collected": newly_collected,
            "total_users": NUM_USERS
        }

    def _get_state(self):
        # 改进状态表示: UAV位置, 与每个用户的距离, 任务收集状态, 用户活跃状态, 归一化步数
        state = np.zeros(2 + NUM_USERS * 3 + 1)

        # UAV位置 (归一化)
        state[0:2] = self.uav_position / AREA_SIZE

        # 与每个用户的距离, 任务收集状态和活跃状态
        for i in range(NUM_USERS):
            dist = np.linalg.norm(self.uav_position - self.user_positions[i])
            state[2 + i * 3] = dist / np.sqrt(2 * AREA_SIZE ** 2)  # 归一化距离
            state[2 + i * 3 + 1] = float(self.collected_tasks[i])
            state[2 + i * 3 + 2] = float(self.active_users[i])

        # 归一化步数
        state[-1] = self.step_count / MAX_STEPS

        return state

    def _calculate_reward(self, newly_collected, energy_consumed, collected_indices,
                          new_distances, old_distances):
        # 基础任务收集奖励
        collection_reward = newly_collected * 20

        # 能耗惩罚 (适度惩罚，不要过高)
        energy_penalty = energy_consumed * 0.8

        # 任务完成进度奖励 (鼓励收集更多任务)
        active_tasks_collected = sum(self.collected_tasks[i] for i in range(NUM_USERS) if self.active_users[i])
        active_tasks_total = sum(self.active_users)
        progress_reward = active_tasks_collected / active_tasks_total * 10 if active_tasks_total > 0 else 0

        # 接近未收集任务的奖励 (引导UAV接近任务)
        proximity_reward = 0
        for i in range(NUM_USERS):
            # 只考虑活跃用户的任务
            if self.active_users[i] and not self.collected_tasks[i]:
                # 如果距离减小，给予奖励；如果距离增加，给予惩罚
                dist_diff = old_distances[i] - new_distances[i]

                # 距离越近，变化越显著
                proximity_factor = max(0, 1 - new_distances[i] / (MAX_DISTANCE_COLLECT * 3))
                proximity_reward += dist_diff * 2 * proximity_factor

        # 全部完成奖励 (大幅奖励全部收集任务)
        completion_reward = 100 if (active_tasks_collected == active_tasks_total and active_tasks_total > 0) else 0

        # 如果步数大，但任务收集的不多，增加惩罚
        efficiency_penalty = 0
        if self.step_count > 50 and active_tasks_collected / active_tasks_total < 0.5:
            efficiency_penalty = self.step_count * 0.05

        # 综合奖励
        reward = collection_reward + progress_reward + proximity_reward + completion_reward - energy_penalty - efficiency_penalty

        # 奖励缩放和稳定化
        reward = reward * REWARD_SCALE

        return reward

    def render(self, episode=0, clear_output=True):
        """可视化当前环境状态"""
        plt.figure(figsize=(10, 10))

        # 绘制用户位置
        for i, pos in enumerate(self.user_positions):
            if self.active_users[i]:
                color = 'green' if self.collected_tasks[i] else 'red'
                marker = 'o'
            else:
                color = 'gray'  # 非活跃用户显示为灰色
                marker = 'x'
            plt.scatter(pos[0], pos[1], s=100, c=color, marker=marker)
            plt.annotate(f"{i + 1}", (pos[0], pos[1]), fontsize=12)

        # 绘制UAV当前位置和轨迹
        trajectory = np.array(self.trajectory)
        plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', alpha=0.5)
        plt.scatter(self.uav_position[0], self.uav_position[1], s=200, c='blue', marker='*')

        # 绘制收集范围
        circle = plt.Circle((self.uav_position[0], self.uav_position[1]),
                            MAX_DISTANCE_COLLECT, color='blue', fill=False, alpha=0.3)
        plt.gca().add_patch(circle)

        plt.xlim(0, AREA_SIZE)
        plt.ylim(0, AREA_SIZE)

        active_tasks_collected = sum(self.collected_tasks[i] for i in range(NUM_USERS) if self.active_users[i])
        active_tasks_total = sum(self.active_users)

        plt.title(f"Episode {episode}, Step {self.step_count}, Collected {active_tasks_collected}/{active_tasks_total}")
        plt.grid(True)

        plt.savefig(f"results/step_{episode}_{self.step_count}.png")
        plt.close()


# Actor网络 (策略网络)
class Actor(nn.Module):
    def __init__(self, state_dim, action_dim, max_action):
        super(Actor, self).__init__()

        self.layer1 = nn.Linear(state_dim, 512)
        self.layer2 = nn.Linear(512, 256)
        self.layer3 = nn.Linear(256, 128)
        self.layer4 = nn.Linear(128, action_dim)

        self.max_action = max_action

        self.ln1 = nn.LayerNorm(512)
        self.ln2 = nn.LayerNorm(256)
        self.ln3 = nn.LayerNorm(128)

        # 初始化网络权重
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0.0)

    def forward(self, state):
        x = self.ln1(torch.relu(self.layer1(state)))
        x = self.ln2(torch.relu(self.layer2(x)))
        x = self.ln3(torch.relu(self.layer3(x)))
        action = torch.tanh(self.layer4(x))
        return self.max_action * action


# Critic网络 (价值网络)
class Critic(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(Critic, self).__init__()

        # Q1 架构
        self.q1_layer1 = nn.Linear(state_dim + action_dim, 512)
        self.q1_layer2 = nn.Linear(512, 256)
        self.q1_layer3 = nn.Linear(256, 128)
        self.q1_output = nn.Linear(128, 1)

        self.q1_ln1 = nn.LayerNorm(512)
        self.q1_ln2 = nn.LayerNorm(256)
        self.q1_ln3 = nn.LayerNorm(128)

        # Q2 架构 (双Q网络提高稳定性)
        self.q2_layer1 = nn.Linear(state_dim + action_dim, 512)
        self.q2_layer2 = nn.Linear(512, 256)
        self.q2_layer3 = nn.Linear(256, 128)
        self.q2_output = nn.Linear(128, 1)

        self.q2_ln1 = nn.LayerNorm(512)
        self.q2_ln2 = nn.LayerNorm(256)
        self.q2_ln3 = nn.LayerNorm(128)

        # 初始化网络权重
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0.01)

    def forward(self, state, action):
        x = torch.cat([state, action], dim=1)

        # Q1
        q1 = self.q1_ln1(torch.relu(self.q1_layer1(x)))
        q1 = self.q1_ln2(torch.relu(self.q1_layer2(q1)))
        q1 = self.q1_ln3(torch.relu(self.q1_layer3(q1)))
        q1 = self.q1_output(q1)

        # Q2
        q2 = self.q2_ln1(torch.relu(self.q2_layer1(x)))
        q2 = self.q2_ln2(torch.relu(self.q2_layer2(q2)))
        q2 = self.q2_ln3(torch.relu(self.q2_layer3(q2)))
        q2 = self.q2_output(q2)

        return q1, q2

    def Q1(self, state, action):
        x = torch.cat([state, action], dim=1)

        q1 = self.q1_ln1(torch.relu(self.q1_layer1(x)))
        q1 = self.q1_ln2(torch.relu(self.q1_layer2(q1)))
        q1 = self.q1_ln3(torch.relu(self.q1_layer3(q1)))
        q1 = self.q1_output(q1)

        return q1


class ReplayBuffer:
    def __init__(self, max_size=BUFFER_SIZE):
        self.buffer = deque(maxlen=max_size)

    def add(self, state, action, reward, next_state, done):
        self.buffer.append((state, action, reward, next_state, done))

    def sample(self, batch_size):
        batch = random.sample(self.buffer, min(len(self.buffer), batch_size))
        state, action, reward, next_state, done = map(np.stack, zip(*batch))
        return state, action, reward, next_state, done

    def __len__(self):
        return len(self.buffer)


class ContinualTD3:
    """Twin Delayed DDPG (TD3) with EWC for Continual RL"""

    def __init__(self, state_dim, action_dim, max_action):
        self.actor = Actor(state_dim, action_dim, max_action).to(device)
        self.actor_target = Actor(state_dim, action_dim, max_action).to(device)
        self.actor_target.load_state_dict(self.actor.state_dict())
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=ACTOR_LR)

        self.critic = Critic(state_dim, action_dim).to(device)
        self.critic_target = Critic(state_dim, action_dim).to(device)
        self.critic_target.load_state_dict(self.critic.state_dict())
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=CRITIC_LR)

        self.max_action = max_action
        self.memory = ReplayBuffer()

        # TD3特定参数
        self.policy_noise = 0.2 * max_action
        self.noise_clip = 0.5 * max_action
        self.policy_freq = 2
        self.total_it = 0

        # 学习率调度器
        self.actor_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.actor_optimizer, mode='max', factor=0.5, patience=100, verbose=True
        )
        self.critic_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.critic_optimizer, mode='max', factor=0.5, patience=100, verbose=True
        )

        # EWC参数
        self.fisher_actor = {}
        self.fisher_critic = {}
        self.optimal_actor_params = {}
        self.optimal_critic_params = {}
        self.ewc_lambda = EWC_LAMBDA
        self.use_ewc = False

    def calculate_fisher_information(self):
        """计算Fisher信息矩阵"""
        # 初始化Fisher信息矩阵
        fisher_actor = {}
        fisher_critic = {}

        for name, param in self.actor.named_parameters():
            fisher_actor[name] = torch.zeros_like(param.data).to(device)

        for name, param in self.critic.named_parameters():
            fisher_critic[name] = torch.zeros_like(param.data).to(device)

        # 从经验回放中采样计算Fisher信息
        samples = min(len(self.memory), FISHER_SAMPLE_SIZE)
        if samples == 0:
            return {}, {}

        # 批量采样
        state, action, reward, next_state, done = self.memory.sample(samples)
        state = torch.FloatTensor(state).to(device)
        action = torch.FloatTensor(action).to(device)
        reward = torch.FloatTensor(reward.reshape(-1, 1)).to(device)
        next_state = torch.FloatTensor(next_state).to(device)
        done = torch.FloatTensor(done.reshape(-1, 1)).to(device)

        # 计算Actor的Fisher信息
        for i in range(samples):
            self.actor.zero_grad()
            s = state[i:i + 1]
            a = self.actor(s)
            loss = -self.critic.Q1(s, a).mean()
            loss.backward()

            # 累积梯度平方
            for name, param in self.actor.named_parameters():
                if param.grad is not None:
                    fisher_actor[name] += param.grad.data ** 2 / samples

        # 计算Critic的Fisher信息
        for i in range(samples):
            self.critic.zero_grad()
            s = state[i:i + 1]
            a = action[i:i + 1]
            r = reward[i:i + 1]
            ns = next_state[i:i + 1]
            d = done[i:i + 1]

            with torch.no_grad():
                noise = torch.FloatTensor(a.shape).data.normal_(0, self.policy_noise).to(device)
                noise = noise.clamp(-self.noise_clip, self.noise_clip)
                next_a = (self.actor_target(ns) + noise).clamp(-self.max_action, self.max_action)
                target_q1, target_q2 = self.critic_target(ns, next_a)
                target_q = torch.min(target_q1, target_q2)
                target_q = r + (1 - d) * GAMMA * target_q

            current_q1, current_q2 = self.critic(s, a)
            loss = nn.MSELoss()(current_q1, target_q) + nn.MSELoss()(current_q2, target_q)
            loss.backward()

            # 累积梯度平方
            for name, param in self.critic.named_parameters():
                if param.grad is not None:
                    fisher_critic[name] += param.grad.data ** 2 / samples

        return fisher_actor, fisher_critic

    def store_current_parameters(self):
        """存储当前参数"""
        for name, param in self.actor.named_parameters():
            self.optimal_actor_params[name] = param.data.clone()

        for name, param in self.critic.named_parameters():
            self.optimal_critic_params[name] = param.data.clone()

    def enable_ewc(self):
        """启用EWC正则化"""
        # 计算Fisher信息
        self.fisher_actor, self.fisher_critic = self.calculate_fisher_information()
        # 存储当前参数
        self.store_current_parameters()
        # 启用EWC
        self.use_ewc = True
        print("\nEWC正则化已启用\n")

    def select_action(self, state, noise_scale=EXPLORATION_NOISE_START):
        state = torch.FloatTensor(state.reshape(1, -1)).to(device)
        action = self.actor(state).cpu().data.numpy().flatten()

        if noise_scale > 0:
            noise = np.random.normal(0, self.max_action * noise_scale, size=action.shape)
            action = action + noise

        return np.clip(action, -self.max_action, self.max_action)

    def train(self):
        self.total_it += 1

        if len(self.memory) < BATCH_SIZE:
            return None

        # 从经验回放中采样
        state, action, reward, next_state, done = self.memory.sample(BATCH_SIZE)

        state = torch.FloatTensor(state).to(device)
        action = torch.FloatTensor(action).to(device)
        reward = torch.FloatTensor(reward.reshape(-1, 1)).to(device)
        next_state = torch.FloatTensor(next_state).to(device)
        done = torch.FloatTensor(done.reshape(-1, 1)).to(device)

        with torch.no_grad():
            # 添加目标策略平滑
            noise = torch.FloatTensor(action.shape).data.normal_(0, self.policy_noise).to(device)
            noise = noise.clamp(-self.noise_clip, self.noise_clip)

            next_action = (self.actor_target(next_state) + noise).clamp(-self.max_action, self.max_action)

            # 计算目标Q值
            target_q1, target_q2 = self.critic_target(next_state, next_action)
            target_q = torch.min(target_q1, target_q2)
            target_q = reward + (1 - done) * GAMMA * target_q

        # 计算当前Q值
        current_q1, current_q2 = self.critic(state, action)

        # 计算Critic损失
        critic_loss = nn.MSELoss()(current_q1, target_q) + nn.MSELoss()(current_q2, target_q)

        # 如果启用了EWC，添加EWC正则化
        if self.use_ewc:
            ewc_loss_critic = 0
            for name, param in self.critic.named_parameters():
                if name in self.fisher_critic and name in self.optimal_critic_params:
                    ewc_loss_critic += (
                                self.fisher_critic[name] * (param - self.optimal_critic_params[name]) ** 2).sum()
            critic_loss += self.ewc_lambda * ewc_loss_critic

        # 优化Critic
        self.critic_optimizer.zero_grad()
        critic_loss.backward()
        torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 1.0)  # 梯度裁剪
        self.critic_optimizer.step()

        # 延迟策略更新
        actor_loss = 0
        if self.total_it % self.policy_freq == 0:
            # 计算Actor损失
            actor_loss = -self.critic.Q1(state, self.actor(state)).mean()

            # 如果启用了EWC，添加EWC正则化
            if self.use_ewc:
                ewc_loss_actor = 0
                for name, param in self.actor.named_parameters():
                    if name in self.fisher_actor and name in self.optimal_actor_params:
                        ewc_loss_actor += (
                                    self.fisher_actor[name] * (param - self.optimal_actor_params[name]) ** 2).sum()
                actor_loss += self.ewc_lambda * ewc_loss_actor

            # 优化Actor
            self.actor_optimizer.zero_grad()
            actor_loss.backward()
            torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 1.0)  # 梯度裁剪
            self.actor_optimizer.step()

            # 更新目标网络
            for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)

            for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)

        return {
            "critic_loss": critic_loss.item(),
            "actor_loss": actor_loss if isinstance(actor_loss, (int, float)) else actor_loss.item()
        }

    def update_lr_schedulers(self, reward):
        """更新学习率调度器"""
        self.actor_scheduler.step(reward)
        self.critic_scheduler.step(reward)


def train():
    """训练ContinualTD3智能体"""

    # 创建保存结果的目录
    os.makedirs("results", exist_ok=True)

    # 初始化环境
    env = Environment()

    # 计算状态和动作维度
    state_dim = 2 + NUM_USERS * 3 + 1  # UAV位置(2) + 用户信息(距离+收集状态+活跃状态)*NUM_USERS + 步数(1)
    action_dim = 2
    max_action = 1

    # 初始化ContinualTD3智能体
    agent = ContinualTD3(state_dim, action_dim, max_action)

    # 训练参数
    max_episodes = 6000
    task_switch_episodes = [2000, 4000]  # 任务切换点
    eval_freq = 100  # 评估频率

    # 记录训练过程
    rewards_history = []
    smoothed_rewards = []
    collection_history = []
    energy_history = []
    best_reward = -float('inf')
    best_collection = 0
    losses = {"critic": [], "actor": []}

    # 探索噪声衰减
    noise_schedule = np.linspace(EXPLORATION_NOISE_START, EXPLORATION_NOISE_END, max_episodes)

    start_time = time.time()
    current_task = 1

    for episode in range(1, max_episodes + 1):
        # 检查是否需要切换任务
        if episode in task_switch_episodes:
            current_task += 1
            print(f"\n=== 切换到任务 {current_task} ===")

            # 启用EWC正则化
            agent.enable_ewc()

            # 更新环境中的活跃用户
            env.update_active_users()

        state = env.reset()
        episode_reward = 0
        last_collection = 0
        episode_losses = {"critic": [], "actor": []}

        # 当前探索噪声
        current_noise = noise_schedule[min(episode - 1, len(noise_schedule) - 1)]

        for step in range(1, MAX_STEPS + 1):
            # 选择动作
            action = agent.select_action(state, noise_scale=current_noise)

            # 执行动作
            next_state, reward, done, info = env.step(action)

            # 存储经验
            agent.memory.add(state, action, reward, next_state, done)

            # 训练智能体
            loss_info = agent.train()
            if loss_info:
                episode_losses["critic"].append(loss_info["critic_loss"])
                episode_losses["actor"].append(loss_info["actor_loss"])

            # 更新状态和累计奖励
            state = next_state
            episode_reward += reward
            last_collection = info["collected"]

            # 每隔一定步数可视化当前状态
            if episode % eval_freq == 0 and step % 20 == 0:
                env.render(episode)

            if done:
                break

        # 记录历史数据
        rewards_history.append(episode_reward)
        collection_history.append(last_collection)
        energy_history.append(info["energy"])

        # 平滑奖励曲线
        if len(rewards_history) >= 10:
            smoothed_rewards.append(np.mean(rewards_history[-10:]))
        else:
            smoothed_rewards.append(episode_reward)

        # 记录平均损失
        if episode_losses["critic"]:
            losses["critic"].append(np.mean(episode_losses["critic"]))
        if episode_losses["actor"]:
            losses["actor"].append(np.mean(episode_losses["actor"]))

        # 更新学习率调度器
        agent.update_lr_schedulers(episode_reward)

        # 更新最佳结果
        active_tasks_total = info["active_users"]
        if last_collection > best_collection or (last_collection == best_collection and episode_reward > best_reward):
            best_reward = episode_reward
            best_collection = last_collection

            # 保存最佳模型
            torch.save(agent.actor.state_dict(), f"results/best_actor_task_{current_task}.pth")

        # 打印训练信息
        elapsed_time = time.time() - start_time
        print(f"Episode: {episode}/{max_episodes} (Task {current_task}) | "
              f"Tasks: {last_collection}/{active_tasks_total} | "
              f"Reward: {episode_reward:.2f} | "
              f"Energy: {info['energy']:.2f} | "
              f"Steps: {env.step_count} | "
              f"Noise: {current_noise:.3f} | "
              f"Time: {elapsed_time:.2f}s")

        # 每隔一定轮次生成图表
        if episode % eval_freq == 0 or episode == max_episodes:
            plt.figure(figsize=(20, 5))

            # 奖励曲线
            plt.subplot(1, 4, 1)
            plt.plot(rewards_history, alpha=0.3, color='blue', label='Raw')
            plt.plot(smoothed_rewards, color='red', label='Smoothed')
            # 标记任务切换点
            for task_ep in task_switch_episodes:
                plt.axvline(x=task_ep, color='green', linestyle='--', label=f'Task Switch')
            plt.title("Reward")
            plt.xlabel("Episode")
            plt.ylabel("Reward")
            plt.legend()
            plt.grid(True)

            # 收集任务数量曲线
            plt.subplot(1, 4, 2)
            plt.plot(collection_history)
            # 标记任务切换点
            for task_ep in task_switch_episodes:
                plt.axvline(x=task_ep, color='green', linestyle='--')
            plt.title("Collected Tasks")
            plt.xlabel("Episode")
            plt.ylabel("Number of Tasks")
            plt.ylim(0, NUM_USERS + 1)
            plt.grid(True)

            # 能耗曲线
            plt.subplot(1, 4, 3)
            plt.plot(energy_history)
            # 标记任务切换点
            for task_ep in task_switch_episodes:
                plt.axvline(x=task_ep, color='green', linestyle='--')
            plt.title("Total Energy")
            plt.xlabel("Episode")
            plt.ylabel("Energy")
            plt.grid(True)
            # 损失曲线
            plt.subplot(1, 4, 4)
            if losses["critic"]:
                plt.plot(losses["critic"], label='Critic Loss')
            if losses["actor"]:
                plt.plot(losses["actor"], label='Actor Loss')
            # 标记任务切换点
            for task_ep in task_switch_episodes:
                plt.axvline(x=task_ep, color='green', linestyle='--')
            plt.title("Training Loss")
            plt.xlabel("Episode")
            plt.ylabel("Loss")
            plt.legend()
            plt.grid(True)

            plt.tight_layout()
            plt.savefig(f"results/training_curves_episode_{episode}.png")
            plt.close()

            # 保存检查点
            torch.save({
                'actor_state_dict': agent.actor.state_dict(),
                'critic_state_dict': agent.critic.state_dict(),
                'actor_optimizer': agent.actor_optimizer.state_dict(),
                'critic_optimizer': agent.critic_optimizer.state_dict(),
                'episode': episode,
                'rewards_history': rewards_history,
                'collection_history': collection_history,
                'best_reward': best_reward,
                'best_collection': best_collection,
                'current_task': current_task,
                'fisher_actor': agent.fisher_actor,
                'fisher_critic': agent.fisher_critic,
                'optimal_actor_params': agent.optimal_actor_params,
                'optimal_critic_params': agent.optimal_critic_params,
            }, f"results/checkpoint_episode_{episode}.pt")

    print(f"Training completed! Best result: {best_collection}/{ACTIVE_USERS} tasks, Reward: {best_reward:.2f}")
    return agent, env


def test_and_visualize(agent, env, model_path=None, task=3):
    """测试训练好的模型并生成可视化结果"""

    # 如果提供了模型路径，加载最佳模型
    if model_path:
        agent.actor.load_state_dict(torch.load(model_path))
    else:
        # 否则使用最后一个任务的最佳模型
        agent.actor.load_state_dict(torch.load(f"results/best_actor_task_{task}.pth"))
    agent.actor.eval()

    # 重置环境
    state = env.reset()

    total_reward = 0
    step_rewards = []

    # 记录测试过程
    trajectory = [env.uav_position.copy()]
    collection_times = np.zeros(NUM_USERS)
    collection_order = []

    for step in range(1, MAX_STEPS + 1):
        # 选择动作 (不添加噪声)
        action = agent.select_action(state, noise_scale=0)

        # 记录UAV位置
        trajectory.append(env.uav_position.copy())

        # 记录本次收集的任务
        collected_before = env.collected_tasks.copy()

        # 执行动作
        next_state, reward, done, info = env.step(action)

        # 更新收集时间
        for i in range(NUM_USERS):
            if env.collected_tasks[i] and not collected_before[i]:
                collection_times[i] = step
                collection_order.append(i)

        # 累计奖励
        total_reward += reward
        step_rewards.append(reward)

        # 更新状态
        state = next_state

        # 可视化当前状态
        if step % 5 == 0 or done:
            env.render(step)

        if done:
            break

    # 转换为numpy数组便于绘图
    trajectory = np.array(trajectory)

    # 绘制完整轨迹图
    plt.figure(figsize=(12, 10))

    # 绘制用户位置
    for i, (x, y) in enumerate(env.user_positions):
        if env.active_users[i]:
            color = 'green' if env.collected_tasks[i] else 'red'
            marker = 'o'
            label = "活跃用户" if i == 0 else None  # 只为第一个添加标签
        else:
            color = 'gray'  # 非活跃用户显示为灰色
            marker = 'x'
            label = "非活跃用户" if i == 0 else None  # 只为第一个添加标签

        plt.scatter(x, y, s=150, c=color, marker=marker, label=label)

        # 标注用户编号和收集时间
        if env.active_users[i]:
            if env.collected_tasks[i]:
                plt.annotate(f"用户 {i + 1}\n(步骤 {int(collection_times[i])})",
                             (x, y), textcoords="offset points",
                             xytext=(0, 10), ha='center', fontsize=10)
            else:
                plt.annotate(f"用户 {i + 1}\n(未收集)",
                             (x, y), textcoords="offset points",
                             xytext=(0, 10), ha='center', fontsize=10)
        else:
            plt.annotate(f"用户 {i + 1}\n(非活跃)",
                         (x, y), textcoords="offset points",
                         xytext=(0, 10), ha='center', fontsize=10)

    # 绘制UAV轨迹
    plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', label='UAV轨迹', alpha=0.7)

    # 标注起点和终点
    plt.scatter(trajectory[0, 0], trajectory[0, 1], s=200, c='blue', marker='^', label='起点')
    plt.scatter(trajectory[-1, 0], trajectory[-1, 1], s=200, c='purple', marker='*', label='终点')

    # 每隔一定步数标记UAV位置
    for i in range(0, len(trajectory), 10):
        plt.annotate(f"{i}", (trajectory[i, 0], trajectory[i, 1]),
                     fontsize=8, ha='center', va='center',
                     bbox=dict(boxstyle="circle,pad=0.2", fc="white", alpha=0.7))

    # 为收集到的任务绘制连接线
    for i in range(NUM_USERS):
        if env.active_users[i] and env.collected_tasks[i]:
            # 找到收集该任务时UAV的位置
            step = int(collection_times[i])
            if step < len(trajectory):
                uav_pos = trajectory[step]
                plt.plot([uav_pos[0], env.user_positions[i, 0]],
                         [uav_pos[1], env.user_positions[i, 1]],
                         'g--', alpha=0.5)

    active_tasks_collected = sum(env.collected_tasks[i] for i in range(NUM_USERS) if env.active_users[i])
    active_tasks_total = sum(env.active_users)

    plt.title(
        f"UAV任务收集轨迹 (收集 {active_tasks_collected}/{active_tasks_total} 个任务, 步数: {env.step_count})")
    plt.xlabel("X坐标 (m)")
    plt.ylabel("Y坐标 (m)")
    plt.grid(True)
    plt.legend()
    plt.xlim(0, AREA_SIZE)
    plt.ylim(0, AREA_SIZE)
    plt.savefig("results/final_uav_trajectory.png")
    plt.close()

    # 绘制奖励曲线
    plt.figure(figsize=(15, 5))

    plt.subplot(1, 2, 1)
    plt.plot(step_rewards)
    plt.title("每步奖励")
    plt.xlabel("步数")
    plt.ylabel("奖励")
    plt.grid(True)

    plt.subplot(1, 2, 2)
    plt.plot(np.cumsum(step_rewards))
    plt.title("累计奖励")
    plt.xlabel("步数")
    plt.ylabel("累计奖励")
    plt.grid(True)

    plt.tight_layout()
    plt.savefig("results/test_rewards.png")
    plt.close()

    active_tasks_collected = sum(env.collected_tasks[i] for i in range(NUM_USERS) if env.active_users[i])
    active_tasks_total = sum(env.active_users)

    print("\n测试结果:")
    print(
        f"收集任务: {active_tasks_collected}/{active_tasks_total} ({active_tasks_collected / active_tasks_total * 100:.1f}%)")
    print(f"总奖励: {total_reward:.2f}")
    print(f"总能耗: {info['energy']:.2f}")
    print(f"总延迟: {info['delay']:.2f}")
    print(f"总步数: {env.step_count}")

    # 输出任务收集详情
    print("\n任务收集详情:")
    for i in range(NUM_USERS):
        if env.active_users[i]:
            status = f"步骤 {int(collection_times[i])}" if env.collected_tasks[i] else "未收集"
            print(f"用户 {i + 1}: {status} (活跃)")
        else:
            print(f"用户 {i + 1}: 非活跃")


def test_on_different_tasks(agent, env, tasks=3):
    """测试模型在不同任务上的性能"""
    print("\n=== 在所有任务上测试模型性能 ===")

    results = []

    for task in range(1, tasks + 1):
        print(f"\n测试任务 {task}:")

        # 加载该任务的最佳模型
        try:
            agent.actor.load_state_dict(torch.load(f"results/best_actor_task_{task}.pth"))
            agent.actor.eval()
        except:
            print(f"找不到任务 {task} 的模型，跳过")
            continue

        # 设置环境中的活跃用户，使其与训练任务相符
        # 这里为了简化，我们随机设置活跃用户
        env.update_active_users()

        # 重置环境
        state = env.reset()

        total_reward = 0

        for step in range(1, MAX_STEPS + 1):
            # 选择动作 (不添加噪声)
            action = agent.select_action(state, noise_scale=0)

            # 执行动作
            next_state, reward, done, info = env.step(action)

            # 累计奖励
            total_reward += reward

            # 更新状态
            state = next_state

            if done:
                break

        active_tasks_collected = sum(env.collected_tasks[i] for i in range(NUM_USERS) if env.active_users[i])
        active_tasks_total = sum(env.active_users)

        print(
            f"收集任务: {active_tasks_collected}/{active_tasks_total} ({active_tasks_collected / active_tasks_total * 100:.1f}%)")
        print(f"总奖励: {total_reward:.2f}")
        print(f"总步数: {env.step_count}")

        results.append({
            "task": task,
            "collected": active_tasks_collected,
            "total": active_tasks_total,
            "reward": total_reward,
            "steps": env.step_count
        })

    return results


if __name__ == "__main__":
    # 训练智能体
    agent, env = train()

    # 测试并可视化结果
    test_and_visualize(agent, env)

    # 测试不同任务上的性能
    test_on_different_tasks(agent, env)


