import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import random
from collections import deque, namedtuple
import os
import time
import matplotlib
'''
奖励波动严重：奖励曲线显示大幅度波动，没有稳定上升趋势，表明学习过程不稳定。可能原因：

探索噪声衰减过快（EXPLORATION_NOISE_START=0.4 到 EXPLORATION_NOISE_END=0.05）

奖励尺度较小（REWARD_SCALE=0.1），导致信号微弱

奖励函数过于复杂，多个奖励组件间权重不平衡

任务收集效率低：任务收集曲线显示间歇性成功，没有稳定提高。可能原因：

环境参数设置问题，如 MAX_DISTANCE_COLLECT=15 可能偏小

对任务收集的奖励激励不足，无法引导智能体形成有效的收集策略

能耗管理不佳：能耗曲线波动大，表明UAV移动策略不高效。可能原因：

能耗惩罚系数（energy_penalty = energy_consumed * 0.5）可能偏低

在_calculate_reward中对能耗的惩罚权重不足

训练损失不稳定：

Actor损失持续上升，表明策略网络更新方向不正确

Critic损失波动大，表明值函数估计不准确

相变问题处理不当：

相变时（第1阶段→第2阶段→第3阶段）奖励和收集任务数量有明显下降

EWC正则化强度（EWC_LAMBDA=100）可能不合适

网络结构问题：

GRU隐藏状态可能没有正确地跨批次和跨回合保持或重置

在train方法中，隐藏状态的重置可能导致表示不连贯

建议改进：

奖励函数简化：减少奖励组件数量，增加核心任务（收集任务）的权重

调整探索策略：降低初始探索噪声，延缓衰减速率

重新平衡能耗惩罚：增加能耗惩罚权重，引导更高效移动

优化网络更新：降低学习率，增加批次大小，尝试梯度裁剪值调整

增强记忆缓冲：可考虑增加优先经验回放

重新设计相变策略：在任务切换时使用更平滑的过渡机制

简化状态表示：当前状态包含大量信息，可能导致学习困难

'''
# 设置随机种子确保结果可复现
SEED = 30
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)

# 检查GPU可用性
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

# 训练参数
EPISODES_PER_TASK = 1500

# 环境参数 - 优化后
AREA_SIZE = 100  # 区域大小 100m x 100m
NUM_USERS = 10  # 用户数量
MAX_STEPS = 200  # 每个episode的最大步数
MAX_DISTANCE_COLLECT = 20  # UAV可收集任务的最大距离 (增加)

# UAV参数 - 优化后
UAV_SPEED = 10.0  # UAV速度 (m/s)
UAV_ENERGY_PER_METER = 0.08  # 每米能耗 (降低)
UAV_HOVER_ENERGY = 0.4  # 悬停能耗 (降低)

# 任务参数
TASK_SIZE = [10, 50]  # 任务大小范围 (MB)

# TD3参数 - 优化后
ACTOR_LR = 1e-4  # 降低学习率
CRITIC_LR = 1e-4  # 降低学习率
GAMMA = 0.99
TAU = 0.005
BUFFER_SIZE = 500000  # 增加缓冲区大小
BATCH_SIZE = 512  # 增加批次大小
EXPLORATION_NOISE_START = 0.3  # 降低初始噪声
EXPLORATION_NOISE_END = 0.05
REWARD_SCALE = 0.5  # 增加奖励尺度

# EWC参数 - 优化后
EWC_LAMBDA = 50  # 降低EWC强度
FISHER_SAMPLE_SIZE = 2000  # 增加计算Fisher信息的样本数

# GRU参数
SEQUENCE_LENGTH = 10  # GRU序列长度
HIDDEN_SIZE = 128  # GRU隐藏层大小

# 优先级经验回放参数
PER_ALPHA = 0.6  # 优先级的指数参数
PER_BETA_START = 0.4  # 重要性采样起始值
PER_BETA_END = 1.0  # 重要性采样最终值
PER_EPSILON = 1e-6  # 防止优先级为0


class Environment:
    def __init__(self):
        # 初始化用户位置 (固定)
        self.user_positions = np.random.uniform(0, AREA_SIZE, size=(NUM_USERS, 2))

        # 初始化任务大小 (固定)
        self.task_sizes = np.random.uniform(TASK_SIZE[0], TASK_SIZE[1], size=NUM_USERS)

        # 任务生成状态 - 初始所有用户都生成任务
        self.task_generating_users = np.ones(NUM_USERS, dtype=bool)

        # UAV初始位置
        self.uav_position = np.array([AREA_SIZE / 2, AREA_SIZE / 2], dtype=float)

        # 任务收集状态
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)

        # 步数计数器
        self.step_count = 0

        # 总延迟和能耗
        self.total_delay = 0
        self.total_energy = 0

        # 历史轨迹
        self.trajectory = [self.uav_position.copy()]

        # 上一次的距离，用于计算奖励
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])

        # 存储观测历史用于GRU
        self.observation_history = deque(maxlen=SEQUENCE_LENGTH)

        # 当前环境阶段
        self.current_phase = 1

        # 用于记录每个任务的收集时间
        self.collection_times = np.zeros(NUM_USERS) - 1  # -1表示未收集

    def update_task_generating_users(self, phase):
        """根据训练阶段更新生成任务的用户"""
        self.current_phase = phase

        if phase == 1:  # 第一阶段：所有用户生成任务
            self.task_generating_users = np.ones(NUM_USERS, dtype=bool)
        elif phase == 2:  # 第二阶段：随机9个用户生成任务
            indices = np.random.choice(NUM_USERS, 9, replace=False)
            self.task_generating_users = np.zeros(NUM_USERS, dtype=bool)
            self.task_generating_users[indices] = True
        else:  # 第三阶段：随机8个用户生成任务
            indices = np.random.choice(NUM_USERS, 8, replace=False)
            self.task_generating_users = np.zeros(NUM_USERS, dtype=bool)
            self.task_generating_users[indices] = True

        print(f"Phase {phase}: {sum(self.task_generating_users)} users are generating tasks")
        print(f"Task generating users: {np.where(self.task_generating_users)[0]}")

    def reset(self):
        # 重置UAV位置到中心点
        self.uav_position = np.array([AREA_SIZE / 2, AREA_SIZE / 2], dtype=float)

        # 重置任务收集状态
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)

        # 重置步数
        self.step_count = 0

        # 重置总延迟和能耗
        self.total_delay = 0
        self.total_energy = 0

        # 重置轨迹
        self.trajectory = [self.uav_position.copy()]

        # 重置上一次的距离
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])

        # 重置观测历史
        self.observation_history = deque(maxlen=SEQUENCE_LENGTH)

        # 重置收集时间
        self.collection_times = np.zeros(NUM_USERS) - 1  # -1表示未收集

        # 初始状态
        state = self._get_state()

        # 填充观测历史
        for _ in range(SEQUENCE_LENGTH):
            self.observation_history.append(state)

        return self._get_gru_state()

    def step(self, action):
        # 更新UAV位置 (action是相对移动，范围[-1,1])
        action = np.clip(action, -1, 1)
        movement = action * UAV_SPEED
        prev_position = self.uav_position.copy()
        self.uav_position += movement
        self.uav_position = np.clip(self.uav_position, 0, AREA_SIZE)

        # 记录轨迹
        self.trajectory.append(self.uav_position.copy())

        # 计算移动距离和能耗
        distance_moved = np.linalg.norm(self.uav_position - prev_position)
        energy_consumed = distance_moved * UAV_ENERGY_PER_METER

        # 计算与所有用户的新距离
        new_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])

        # 收集任务
        newly_collected = 0
        collected_indices = []

        for i in range(NUM_USERS):
            # 只收集生成任务的用户的任务
            if self.task_generating_users[i] and not self.collected_tasks[i]:
                if new_distances[i] <= MAX_DISTANCE_COLLECT:
                    self.collected_tasks[i] = True
                    self.collection_times[i] = self.step_count  # 记录收集时间
                    newly_collected += 1
                    collected_indices.append(i)

                    # 计算任务延迟
                    delay = new_distances[i] * self.task_sizes[i] / 10
                    self.total_delay += delay

                    # 悬停能耗
                    energy_consumed += UAV_HOVER_ENERGY

        # 累计总能耗
        self.total_energy += energy_consumed

        # 更新步数
        self.step_count += 1

        # 计算奖励 - 优化后的奖励函数
        reward = self._calculate_reward_optimized(newly_collected, energy_consumed,
                                                  collected_indices, new_distances, self.last_distances)

        # 更新上一次的距离
        self.last_distances = new_distances

        # 判断是否结束 - 收集完所有任务生成用户的任务或达到最大步数
        total_tasks_to_collect = sum(self.task_generating_users)
        collected_required_tasks = sum(self.collected_tasks & self.task_generating_users)
        done = (self.step_count >= MAX_STEPS) or (collected_required_tasks == total_tasks_to_collect)

        # 获取当前状态
        state = self._get_state()

        # 更新观测历史
        self.observation_history.append(state)

        return self._get_gru_state(), reward, done, {
            "collected": sum(self.collected_tasks),
            "collected_required": collected_required_tasks,
            "total_required": total_tasks_to_collect,
            "energy": self.total_energy,
            "delay": self.total_delay,
            "newly_collected": newly_collected,
            "total_users": NUM_USERS
        }

    def _get_state(self):
        # 优化的状态表示 - 更聚焦于关键信息

        # 状态1: UAV位置 (归一化)
        uav_pos = self.uav_position / AREA_SIZE  # 2维

        # 状态2: 到所有生成任务用户的距离 (仅关注任务生成的用户)
        active_distances = []
        active_collected = []

        for i in range(NUM_USERS):
            if self.task_generating_users[i]:
                dist = np.linalg.norm(self.uav_position - self.user_positions[i])
                active_distances.append(dist / np.sqrt(2 * AREA_SIZE ** 2))  # 归一化距离
                active_collected.append(float(self.collected_tasks[i]))

        # 如果没有任务生成用户，添加一个默认值
        if not active_distances:
            active_distances = [1.0]  # 最大归一化距离
            active_collected = [0.0]  # 未收集

        # 状态3: 归一化步数
        norm_step = self.step_count / MAX_STEPS  # 1维

        # 状态4: 收集进度
        total_required = sum(self.task_generating_users)
        if total_required > 0:
            progress = sum(self.collected_tasks & self.task_generating_users) / total_required
        else:
            progress = 1.0  # 如果没有任务，则进度为100%

        # 组合所有状态
        state = np.concatenate([
            uav_pos,  # UAV位置 (2维)
            np.array(active_distances),  # 到活跃用户的距离 (变长)
            np.array(active_collected),  # 已收集状态 (变长)
            np.array([norm_step, progress])  # 步数和进度 (2维)
        ])

        return state

    def _get_gru_state(self):
        """返回用于GRU的序列状态"""
        # 确保观测历史已满
        while len(self.observation_history) < SEQUENCE_LENGTH:
            self.observation_history.append(self._get_state())

        return np.array(list(self.observation_history))

    def _calculate_reward_optimized(self, newly_collected, energy_consumed, collected_indices, new_distances,
                                    old_distances):
        """优化后的奖励计算函数 - 更简洁、更关注任务收集"""

        # 1. 主奖励：任务收集奖励 (大幅增加)
        collection_reward = newly_collected * 30.0

        # 2. 能耗惩罚 (适度惩罚)
        energy_penalty = energy_consumed * 0.8

        # 3. 完成进度奖励
        collected_required = sum(self.collected_tasks & self.task_generating_users)
        total_required = sum(self.task_generating_users)

        if total_required > 0:
            # 指数增长的进度奖励，完成更多任务时奖励增加更快
            progress_ratio = collected_required / total_required
            progress_reward = 10.0 * (progress_ratio ** 1.5)  # 指数增长

            # 完成所有任务的额外奖励
            if collected_required == total_required:
                progress_reward += 30.0
        else:
            progress_reward = 0

        # 4. 接近未收集任务的奖励 (简化版)
        proximity_reward = 0
        uncollected_indices = []

        for i in range(NUM_USERS):
            if self.task_generating_users[i] and not self.collected_tasks[i]:
                uncollected_indices.append(i)

        if uncollected_indices:
            # 计算到最近未收集任务的距离
            min_distance = min(new_distances[uncollected_indices])
            # 距离越近，奖励越大，使用平滑的反比例关系
            proximity_factor = 1.0 - min(1.0, min_distance / (2 * MAX_DISTANCE_COLLECT))
            proximity_reward = 5.0 * proximity_factor

            # 额外添加距离减少的奖励
            for i in uncollected_indices:
                dist_diff = old_distances[i] - new_distances[i]
                if dist_diff > 0:  # 只在距离减少时给予奖励
                    proximity_reward += dist_diff * 0.3

        # 总奖励
        reward = collection_reward + progress_reward + proximity_reward - energy_penalty

        # 奖励尺度调整
        reward = reward * REWARD_SCALE

        return reward

    def render(self, episode=0, clear_output=True):
        """可视化当前环境状态"""
        plt.figure(figsize=(10, 10))

        # 绘制用户位置和任务状态
        for i, pos in enumerate(self.user_positions):
            if self.task_generating_users[i]:
                # 生成任务的用户
                if self.collected_tasks[i]:
                    color = 'green'  # 已收集
                else:
                    color = 'red'  # 未收集
            else:
                # 不生成任务的用户
                color = 'gray'  # 灰色表示不生成任务

            plt.scatter(pos[0], pos[1], s=100, c=color)
            plt.annotate(f"{i + 1}", (pos[0], pos[1]), fontsize=12)

        # 绘制UAV当前位置和轨迹
        trajectory = np.array(self.trajectory)
        plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', alpha=0.5)
        plt.scatter(self.uav_position[0], self.uav_position[1], s=200, c='blue', marker='*')

        # 绘制收集范围
        circle = plt.Circle((self.uav_position[0], self.uav_position[1]),
                            MAX_DISTANCE_COLLECT, color='blue', fill=False, alpha=0.3)
        plt.gca().add_patch(circle)

        plt.xlim(0, AREA_SIZE)
        plt.ylim(0, AREA_SIZE)

        # 添加任务状态信息
        title = f"Episode {episode}, Step {self.step_count}\n"
        title += f"收集: {sum(self.collected_tasks & self.task_generating_users)}/{sum(self.task_generating_users)} 任务"
        plt.title(title)
        plt.grid(True)

        plt.savefig(f"results/step_{episode}_{self.step_count}.png")
        plt.close()


class GRUActor(nn.Module):
    def __init__(self, state_dim, action_dim, max_action):
        super(GRUActor, self).__init__()

        self.max_state_dim = state_dim  # 最大可能的状态维度
        self.seq_len = SEQUENCE_LENGTH
        self.hidden_size = HIDDEN_SIZE

        # GRU层 - 使用最大状态维度
        self.gru = nn.GRU(
            input_size=state_dim,
            hidden_size=self.hidden_size,
            num_layers=1,
            batch_first=True
        )

        # 全连接层
        self.layer1 = nn.Linear(self.hidden_size, 256)
        self.layer2 = nn.Linear(256, 128)
        self.layer3 = nn.Linear(128, action_dim)

        self.max_action = max_action

        self.ln1 = nn.LayerNorm(256)
        self.ln2 = nn.LayerNorm(128)

        # 存储GRU隐藏状态
        self.hidden = None

        # 初始化网络权重 - 使用改进的初始化方法
        self._init_weights()

    def _init_weights(self):
        """改进的初始化方法"""
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0.01)
            elif isinstance(m, nn.GRU):
                for name, param in m.named_parameters():
                    if 'weight_ih' in name:
                        nn.init.xavier_uniform_(param.data)
                    elif 'weight_hh' in name:
                        nn.init.orthogonal_(param.data)
                    elif 'bias' in name:
                        param.data.fill_(0)

    def forward(self, state, hidden=None):
        # 输入形状: [batch_size, seq_len, state_dim]

        # 如果提供了隐藏状态，使用提供的隐藏状态
        if hidden is not None:
            self.hidden = hidden

        # 如果隐藏状态不存在，重置它
        if self.hidden is None or self.hidden.size(1) != state.size(0):
            self.reset_hidden(state.size(0))

        # 处理批次维度不匹配的问题
        if self.hidden.size(1) != state.size(0):
            self.reset_hidden(state.size(0))

        # GRU处理序列
        gru_out, self.hidden = self.gru(state, self.hidden)

        # 我们只使用序列中的最后一个输出
        x = gru_out[:, -1]

        # 全连接层
        x = F.relu(self.ln1(self.layer1(x)))
        x = F.relu(self.ln2(self.layer2(x)))
        action = torch.tanh(self.layer3(x))

        return self.max_action * action, self.hidden

    def reset_hidden(self, batch_size=1):
        """重置GRU的隐藏状态"""
        self.hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)
        return self.hidden


class GRUCritic(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(GRUCritic, self).__init__()

        self.max_state_dim = state_dim  # 最大可能的状态维度
        self.seq_len = SEQUENCE_LENGTH
        self.hidden_size = HIDDEN_SIZE

        # 两个独立的GRU处理状态序列
        self.q1_gru = nn.GRU(
            input_size=state_dim,
            hidden_size=self.hidden_size,
            num_layers=1,
            batch_first=True
        )

        self.q2_gru = nn.GRU(
            input_size=state_dim,
            hidden_size=self.hidden_size,
            num_layers=1,
            batch_first=True
        )

        # Q1网络
        self.q1_layer1 = nn.Linear(self.hidden_size + action_dim, 256)
        self.q1_layer2 = nn.Linear(256, 128)
        self.q1_output = nn.Linear(128, 1)

        self.q1_ln1 = nn.LayerNorm(256)
        self.q1_ln2 = nn.LayerNorm(128)

        # Q2网络
        self.q2_layer1 = nn.Linear(self.hidden_size + action_dim, 256)
        self.q2_layer2 = nn.Linear(256, 128)
        self.q2_output = nn.Linear(128, 1)

        self.q2_ln1 = nn.LayerNorm(256)
        self.q2_ln2 = nn.LayerNorm(128)

        # 存储GRU隐藏状态
        self.q1_hidden = None
        self.q2_hidden = None

        # 初始化网络权重 - 使用改进的初始化方法
        self._init_weights()

    def _init_weights(self):
        """改进的初始化方法"""
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0.01)
            elif isinstance(m, nn.GRU):
                for name, param in m.named_parameters():
                    if 'weight_ih' in name:
                        nn.init.xavier_uniform_(param.data)
                    elif 'weight_hh' in name:
                        nn.init.orthogonal_(param.data)
                    elif 'bias' in name:
                        param.data.fill_(0)

    def forward(self, state, action, q1_hidden=None, q2_hidden=None):
        # 状态输入形状: [batch_size, seq_len, state_dim]
        # 动作输入形状: [batch_size, action_dim]

        # 如果提供了隐藏状态，使用提供的隐藏状态
        if q1_hidden is not None:
            self.q1_hidden = q1_hidden
        if q2_hidden is not None:
            self.q2_hidden = q2_hidden

        # 如果隐藏状态不存在或批次大小不匹配，重置它们
        if self.q1_hidden is None or self.q1_hidden.size(1) != state.size(0):
            self.reset_q1_hidden(state.size(0))
        if self.q2_hidden is None or self.q2_hidden.size(1) != state.size(0):
            self.reset_q2_hidden(state.size(0))

        # GRU处理序列
        q1_gru_out, self.q1_hidden = self.q1_gru(state, self.q1_hidden)
        q2_gru_out, self.q2_hidden = self.q2_gru(state, self.q2_hidden)

        # 提取序列中的最后一个输出
        q1_state = q1_gru_out[:, -1]
        q2_state = q2_gru_out[:, -1]

        # 合并状态表示和动作
        q1_x = torch.cat([q1_state, action], dim=1)
        q2_x = torch.cat([q2_state, action], dim=1)

        # Q1网络
        q1 = F.relu(self.q1_ln1(self.q1_layer1(q1_x)))
        q1 = F.relu(self.q1_ln2(self.q1_layer2(q1)))
        q1 = self.q1_output(q1)

        # Q2网络
        q2 = F.relu(self.q2_ln1(self.q2_layer1(q2_x)))
        q2 = F.relu(self.q2_ln2(self.q2_layer2(q2)))
        q2 = self.q2_output(q2)

        return q1, q2, self.q1_hidden, self.q2_hidden

    def Q1(self, state, action, q1_hidden=None):
        # 用于Actor训练
        if q1_hidden is not None:
            self.q1_hidden = q1_hidden

        if self.q1_hidden is None or self.q1_hidden.size(1) != state.size(0):
            self.reset_q1_hidden(state.size(0))

        q1_gru_out, self.q1_hidden = self.q1_gru(state, self.q1_hidden)
        q1_state = q1_gru_out[:, -1]
        q1_x = torch.cat([q1_state, action], dim=1)

        q1 = F.relu(self.q1_ln1(self.q1_layer1(q1_x)))
        q1 = F.relu(self.q1_ln2(self.q1_layer2(q1)))
        q1 = self.q1_output(q1)

        return q1, self.q1_hidden

    def reset_hidden(self, batch_size=1):
        """重置两个GRU网络的隐藏状态"""
        self.q1_hidden = self.reset_q1_hidden(batch_size)
        self.q2_hidden = self.reset_q2_hidden(batch_size)
        return self.q1_hidden, self.q2_hidden

    def reset_q1_hidden(self, batch_size=1):
        self.q1_hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)
        return self.q1_hidden

    def reset_q2_hidden(self, batch_size=1):
        self.q2_hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)
        return self.q2_hidden


class PrioritizedReplayBuffer:
    """优先经验回放缓冲区"""

    def __init__(self, capacity, alpha=PER_ALPHA):
        self.capacity = capacity
        self.alpha = alpha  # 优先级的指数
        self.buffer = []
        self.position = 0
        self.priorities = np.zeros((capacity,), dtype=np.float32)

    def push(self, state, action, reward, next_state, done):
        max_priority = self.priorities.max() if self.buffer else 1.0

        if len(self.buffer) < self.capacity:
            self.buffer.append((state, action, reward, next_state, done))
        else:
            self.buffer[self.position] = (state, action, reward, next_state, done)

        self.priorities[self.position] = max_priority
        self.position = (self.position + 1) % self.capacity

    def sample(self, batch_size, beta=0.4):
        if len(self.buffer) == 0:
            return None, None, None, None, None, None, None

        if len(self.buffer) < self.capacity:
            priorities = self.priorities[:len(self.buffer)]
        else:
            priorities = self.priorities

        # 计算采样概率
        probs = priorities ** self.alpha
        probs /= probs.sum()

        # 采样索引
        indices = np.random.choice(len(self.buffer), batch_size, p=probs)

        # 构建批次
        states = []
        actions = []
        rewards = []
        next_states = []
        dones = []

        # 计算重要性采样权重
        total = len(self.buffer)
        weights = (total * probs[indices]) ** (-beta)
        weights /= weights.max()
        weights = np.array(weights, dtype=np.float32)

        # 提取样本
        for i in indices:
            state, action, reward, next_state, done = self.buffer[i]
            states.append(np.array(state, copy=False))
            actions.append(np.array(action, copy=False))
            rewards.append(reward)
            next_states.append(np.array(next_state, copy=False))
            dones.append(done)

        return (
            np.array(states),
            np.array(actions),
            np.array(rewards).reshape(-1, 1),
            np.array(next_states),
            np.array(dones).reshape(-1, 1),
            indices,
            weights
        )

    def update_priorities(self, batch_indices, batch_priorities):
        for idx, priority in zip(batch_indices, batch_priorities):
            self.priorities[idx] = priority + PER_EPSILON  # 防止优先级为零

    def __len__(self):
        return len(self.buffer)


class EWC:
    """弹性权重巩固(Elastic Weight Consolidation) - 优化版"""

    def __init__(self, model, fisher_sample_size=FISHER_SAMPLE_SIZE):
        self.model = model
        self.fisher_sample_size = fisher_sample_size
        self.importance = {}  # Fisher信息矩阵
        self.old_params = {}  # 上一个任务的参数
        self.fisher_diagonal = {}  # Fisher对角线

        # 增加EWC衰减因子，使得旧任务的重要性随时间减弱
        self.decay_factor = 0.8

    def _calculate_fisher_info(self, replay_buffer, beta=0.4):
        """计算Fisher信息矩阵 - 使用优先经验回放样本"""
        # 初始化Fisher矩阵为0
        fisher = {}
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                fisher[name] = torch.zeros_like(param).to(device)

        # 采样计算梯度
        self.model.train()
        samples_count = min(self.fisher_sample_size, len(replay_buffer))
        if samples_count <= 0:
            return fisher

        # 分批计算Fisher信息
        batch_size = min(BATCH_SIZE, samples_count)
        num_batches = samples_count // batch_size

        for _ in range(num_batches):
            # 获取随机样本
            states, actions, rewards, next_states, dones, _, _ = replay_buffer.sample(batch_size, beta)

            if states is None:
                continue

            states = torch.FloatTensor(states).to(device)
            actions = torch.FloatTensor(actions).to(device)

            # 前向传播
            self.model.zero_grad()

            # 根据模型类型执行不同操作
            if isinstance(self.model, GRUActor):
                # Actor模型输出动作
                outputs, _ = self.model(states)
                # 我们想要保持当前输出，所以损失是当前输出与自身的MSE
                loss = ((outputs - actions) ** 2).mean()
            else:
                # Critic模型输出Q值
                outputs, _, _, _ = self.model(states, actions)
                # 类似地，我们使用当前输出作为目标
                loss = outputs.mean()

            # 反向传播计算梯度
            loss.backward()

            # 累加梯度的平方
            for name, param in self.model.named_parameters():
                if param.requires_grad and param.grad is not None:
                    fisher[name] += param.grad.pow(2) / (num_batches * batch_size)

        return fisher

    def store_task_parameters(self, task_id, replay_buffer, beta=0.4):
        """存储当前任务的参数和计算Fisher信息矩阵"""
        print(f"Storing parameters for task {task_id} and computing Fisher information matrix")

        # 衰减现有的Fisher信息（如果存在）
        if self.importance:
            for name in self.importance:
                self.importance[name] *= self.decay_factor

        # 存储当前参数
        self.old_params = {}
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                self.old_params[name] = param.data.clone()

        # 计算Fisher信息矩阵
        new_importance = self._calculate_fisher_info(replay_buffer, beta)

        # 合并新旧Fisher信息
        if not self.importance:  # 第一个任务
            self.importance = new_importance
        else:  # 合并
            for name in new_importance:
                if name in self.importance:
                    self.importance[name] += new_importance[name]
                else:
                    self.importance[name] = new_importance[name]

        print(f"Stored {len(self.old_params)} parameters and computed Fisher matrices")

    def calculate_ewc_loss(self, lam=EWC_LAMBDA):
        """计算EWC正则化损失"""
        loss = 0

        if not self.old_params or not self.importance:
            return loss

        for name, param in self.model.named_parameters():
            if name in self.old_params and name in self.importance and param.requires_grad:
                # 计算与旧参数的差异，并通过Fisher信息进行加权
                loss += torch.sum(self.importance[name] * (param - self.old_params[name]).pow(2))

        return lam * loss


import torch.nn.functional as F


class TD3:
    """Twin Delayed DDPG (TD3)和弹性权重巩固(EWC)的结合 - 优化版"""

    def __init__(self, state_dim, action_dim, max_action):
        self.actor = GRUActor(state_dim, action_dim, max_action).to(device)
        self.actor_target = GRUActor(state_dim, action_dim, max_action).to(device)
        self.actor_target.load_state_dict(self.actor.state_dict())
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=ACTOR_LR)

        self.critic = GRUCritic(state_dim, action_dim).to(device)
        self.critic_target = GRUCritic(state_dim, action_dim).to(device)
        self.critic_target.load_state_dict(self.critic.state_dict())
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=CRITIC_LR)

        self.max_action = max_action
        self.memory = PrioritizedReplayBuffer(BUFFER_SIZE)

        # TD3特定参数
        self.policy_noise = 0.2 * max_action
        self.noise_clip = 0.5 * max_action
        self.policy_freq = 2
        self.total_it = 0

        # 梯度裁剪值
        self.grad_clip = 1.0

        # EWC相关组件
        self.ewc_actor = EWC(self.actor)
        self.ewc_critic = EWC(self.critic)
        self.current_task = 1  # 初始任务ID

        # 任务特定探索噪声 - 更平滑的过渡
        self.task_noise = {
            1: np.linspace(EXPLORATION_NOISE_START, EXPLORATION_NOISE_END, EPISODES_PER_TASK),
            2: np.linspace(EXPLORATION_NOISE_START * 0.85, EXPLORATION_NOISE_END, EPISODES_PER_TASK),
            3: np.linspace(EXPLORATION_NOISE_START * 0.75, EXPLORATION_NOISE_END, EPISODES_PER_TASK)
        }

        # 学习率调度器
        self.actor_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.actor_optimizer, mode='max', factor=0.5, patience=150, verbose=True, min_lr=1e-5
        )
        self.critic_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.critic_optimizer, mode='max', factor=0.5, patience=150, verbose=True, min_lr=1e-5
        )

        # 当前beta值 - 优先经验回放的重要性采样参数
        self.current_beta = PER_BETA_START

    def update_beta(self, progress):
        """更新beta值 - 从PER_BETA_START线性增加到PER_BETA_END"""
        self.current_beta = PER_BETA_START + progress * (PER_BETA_END - PER_BETA_START)
        return self.current_beta

    def select_action(self, state, noise_scale=0.1):
        # 确保状态是GRU所需的序列格式 [batch, seq_len, feature]
        if len(state.shape) == 2:
            state = np.expand_dims(state, 0)  # 添加batch维度

        state = torch.FloatTensor(state).to(device)

        # 推理模式
        self.actor.eval()

        with torch.no_grad():
            # 重置隐藏状态为批次大小1
            hidden = self.actor.reset_hidden(1)
            # 获取动作和新的隐藏状态
            action, hidden = self.actor(state, hidden)
            action = action.cpu().data.numpy().flatten()

        self.actor.train()

        # 添加探索噪声
        if noise_scale > 0:
            noise = np.random.normal(0, noise_scale * self.max_action, size=action.shape)
            action = action + noise

        return np.clip(action, -self.max_action, self.max_action)

    def switch_task(self, task_id):
        """切换到新任务 - 优化的任务切换策略"""
        print(f"\nSwitching to task {task_id}")

        # 存储旧任务参数和计算Fisher信息矩阵
        if self.current_task > 0 and len(self.memory) > 0:
            self.ewc_actor.store_task_parameters(self.current_task, self.memory, self.current_beta)
            self.ewc_critic.store_task_parameters(self.current_task, self.memory, self.current_beta)

        # 更新当前任务
        self.current_task = task_id

        # 重置Actor和Critic的GRU状态
        self.actor.reset_hidden()
        self.critic.reset_hidden()

        # 在任务切换时保存模型快照
        torch.save(self.actor.state_dict(), f"results/actor_task_{self.current_task - 1}_end.pth")
        torch.save(self.critic.state_dict(), f"results/critic_task_{self.current_task - 1}_end.pth")

        print(f"Reset GRU states and saved model snapshot for task transition {task_id - 1} -> {task_id}")

    def train(self, replay_iter=1):
        """训练TD3智能体 - 优化版"""
        losses = {"critic": 0, "actor": 0}

        # 如果经验缓存太小，跳过训练
        if len(self.memory) < BATCH_SIZE:
            return losses

        # 多次重放训练，提高样本利用率
        for _ in range(replay_iter):
            self.total_it += 1

            # 从优先经验回放中采样，并获取重要性权重
            state, action, reward, next_state, done, indices, weights = self.memory.sample(BATCH_SIZE,
                                                                                           self.current_beta)

            state = torch.FloatTensor(state).to(device)
            action = torch.FloatTensor(action).to(device)
            reward = torch.FloatTensor(reward).to(device)
            next_state = torch.FloatTensor(next_state).to(device)
            done = torch.FloatTensor(done).to(device)
            weights = torch.FloatTensor(weights).to(device)

            # 重置GRU隐藏状态
            critic_q1_h, critic_q2_h = self.critic.reset_hidden(BATCH_SIZE)
            actor_h = self.actor.reset_hidden(BATCH_SIZE)

            # 重置目标网络隐藏状态
            critic_target_q1_h, critic_target_q2_h = self.critic_target.reset_hidden(BATCH_SIZE)
            actor_target_h = self.actor_target.reset_hidden(BATCH_SIZE)

            with torch.no_grad():
                # 添加目标策略平滑
                noise = torch.randn_like(action) * self.policy_noise
                noise = noise.clamp(-self.noise_clip, self.noise_clip)

                # 计算下一个动作
                next_action, _ = self.actor_target(next_state, actor_target_h)
                next_action = (next_action + noise).clamp(-self.max_action, self.max_action)

                # 计算目标Q值
                target_q1, target_q2, _, _ = self.critic_target(next_state, next_action, critic_target_q1_h,
                                                                critic_target_q2_h)
                target_q = torch.min(target_q1, target_q2)
                target_q = reward + (1 - done) * GAMMA * target_q

            # 计算当前Q值
            current_q1, current_q2, _, _ = self.critic(state, action, critic_q1_h, critic_q2_h)

            # 计算TD误差用于优先级更新
            td_error1 = torch.abs(current_q1 - target_q).detach()
            td_error2 = torch.abs(current_q2 - target_q).detach()
            td_error = torch.max(td_error1, td_error2).cpu().numpy()

            # 带权重的MSE损失
            critic_loss_1 = (weights * F.mse_loss(current_q1, target_q, reduction='none')).mean()
            critic_loss_2 = (weights * F.mse_loss(current_q2, target_q, reduction='none')).mean()
            critic_loss = critic_loss_1 + critic_loss_2

            # 添加EWC正则化
            if self.current_task > 1:
                critic_ewc_loss = self.ewc_critic.calculate_ewc_loss()
                critic_loss += critic_ewc_loss

            # 优化Critic
            self.critic_optimizer.zero_grad()
            critic_loss.backward()
            torch.nn.utils.clip_grad_norm_(self.critic.parameters(), self.grad_clip)
            self.critic_optimizer.step()

            # 更新优先级
            self.memory.update_priorities(indices, td_error.flatten())

            # 记录critic损失
            losses["critic"] = critic_loss.item()

            # 延迟策略更新
            actor_loss = 0
            if self.total_it % self.policy_freq == 0:
                # 重置Actor和Critic隐藏状态
                actor_h = self.actor.reset_hidden(BATCH_SIZE)
                critic_q1_h = self.critic.reset_q1_hidden(BATCH_SIZE)

                # 计算Actor损失 - 策略梯度
                actor_actions, _ = self.actor(state, actor_h)
                q1, _ = self.critic.Q1(state, actor_actions, critic_q1_h)
                actor_loss = -q1.mean()

                # 添加EWC正则化
                if self.current_task > 1:
                    actor_ewc_loss = self.ewc_actor.calculate_ewc_loss()
                    actor_loss += actor_ewc_loss

                # 优化Actor
                self.actor_optimizer.zero_grad()
                actor_loss.backward()
                torch.nn.utils.clip_grad_norm_(self.actor.parameters(), self.grad_clip)
                self.actor_optimizer.step()

                # 更新目标网络 - 软更新
                for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
                    target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)

                for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
                    target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)

                # 记录actor损失
                losses["actor"] = actor_loss.item()

        return losses

    def update_lr_schedulers(self, reward):
        """更新学习率调度器"""
        self.actor_scheduler.step(reward)
        self.critic_scheduler.step(reward)

    def save(self, filename):
        """保存模型"""
        torch.save(self.actor.state_dict(), filename + "_actor.pth")
        torch.save(self.critic.state_dict(), filename + "_critic.pth")
        torch.save(self.actor_optimizer.state_dict(), filename + "_actor_optimizer.pth")
        torch.save(self.critic_optimizer.state_dict(), filename + "_critic_optimizer.pth")

    def load(self, filename):
        """加载模型"""
        self.actor.load_state_dict(torch.load(filename + "_actor.pth"))
        self.critic.load_state_dict(torch.load(filename + "_critic.pth"))
        self.actor_target.load_state_dict(self.actor.state_dict())
        self.critic_target.load_state_dict(self.critic.state_dict())

        try:
            self.actor_optimizer.load_state_dict(torch.load(filename + "_actor_optimizer.pth"))
            self.critic_optimizer.load_state_dict(torch.load(filename + "_critic_optimizer.pth"))
        except:
            print("Could not load optimizer state, using default.")


def train():
    """训练TD3智能体 - 优化版"""
    # 创建保存结果的目录
    os.makedirs("results", exist_ok=True)

    # 初始化环境
    env = Environment()

    # 计算状态和动作维度 - 使用最大可能维度
    max_state_dim = 2 + NUM_USERS * 2 + 2  # UAV位置(2) + 最大用户信息(2*NUM_USERS) + 步数和进度(2)
    action_dim = 2
    max_action = 1

    # 初始化TD3智能体
    agent = TD3(max_state_dim, action_dim, max_action)

    # 训练参数
    total_episodes = EPISODES_PER_TASK * 3  # 总轮数
    episodes_per_task = EPISODES_PER_TASK  # 每个任务的轮数
    eval_freq = 100  # 评估频率
    replay_iter = 2  # 每个步骤的回放迭代次数

    # 记录训练过程
    rewards_history = []
    smoothed_rewards = []
    collection_history = []
    energy_history = []
    best_reward = -float('inf')
    best_collection = 0
    losses = {"critic": [], "actor": []}

    # 每个阶段的最佳结果
    best_rewards_by_phase = {1: -float('inf'), 2: -float('inf'), 3: -float('inf')}
    best_collections_by_phase = {1: 0, 2: 0, 3: 0}

    start_time = time.time()

    for phase in range(1, 4):
        # 更新任务生成状态
        env.update_task_generating_users(phase)

        # 切换任务
        agent.switch_task(phase)

        # 阶段进度跟踪
        phase_start_episode = (phase - 1) * episodes_per_task + 1

        # 针对每个阶段的探索噪声调整
        phase_noise = agent.task_noise[phase]

        for episode in range(1, episodes_per_task + 1):
            global_episode = (phase - 1) * episodes_per_task + episode
            # 计算训练进度用于beta更新
            progress = global_episode / total_episodes
            current_beta = agent.update_beta(progress)

            # 重置环境
            state = env.reset()
            episode_reward = 0
            last_collection = 0
            episode_losses = {"critic": [], "actor": []}

            # 当前探索噪声
            current_noise = phase_noise[episode - 1]

            for step in range(1, MAX_STEPS + 1):
                # 选择动作
                action = agent.select_action(state, noise_scale=current_noise)

                # 执行动作
                next_state, reward, done, info = env.step(action)

                # 存储经验
                agent.memory.push(state, action, reward, next_state, done)

                # 训练智能体 - 根据回放迭代次数
                loss_info = agent.train(replay_iter)
                if loss_info["critic"] != 0:
                    episode_losses["critic"].append(loss_info["critic"])
                if loss_info["actor"] != 0:
                    episode_losses["actor"].append(loss_info["actor"])

                # 更新状态和累计奖励
                state = next_state
                episode_reward += reward
                last_collection = info["collected_required"]

                # 每隔一定步数可视化当前状态
                if global_episode % eval_freq == 0 and step % 25 == 0:
                    env.render(global_episode)

                if done:
                    break

            # 记录历史数据
            rewards_history.append(episode_reward)
            collection_history.append(last_collection)
            energy_history.append(info["energy"])

            # 平滑奖励曲线
            if len(rewards_history) >= 10:
                smoothed_rewards.append(np.mean(rewards_history[-10:]))
            else:
                smoothed_rewards.append(episode_reward)

            # 记录平均损失
            if episode_losses["critic"]:
                losses["critic"].append(np.mean(episode_losses["critic"]))
            if episode_losses["actor"]:
                losses["actor"].append(np.mean(episode_losses["actor"]))

            # 更新学习率调度器
            agent.update_lr_schedulers(episode_reward)

            # 更新最佳结果 - 考虑不同阶段的任务数量
            current_required = info["total_required"]
            collection_ratio = last_collection / current_required if current_required > 0 else 0

            # 全局最佳
            if collection_ratio > best_collection or (
                    collection_ratio == best_collection and episode_reward > best_reward):
                best_reward = episode_reward
                best_collection = collection_ratio
                agent.save(f"results/best_global")

            # 阶段最佳
            if collection_ratio > best_collections_by_phase[phase] or (
                    collection_ratio == best_collections_by_phase[phase] and
                    episode_reward > best_rewards_by_phase[phase]):
                best_rewards_by_phase[phase] = episode_reward
                best_collections_by_phase[phase] = collection_ratio
                agent.save(f"results/best_phase_{phase}")

            # 每隔一定轮次保存检查点
            if episode % 100 == 0:
                agent.save(f"results/checkpoint_phase_{phase}_episode_{episode}")

            # 打印训练信息
            elapsed_time = time.time() - start_time
            print(f"Phase: {phase} | Episode: {episode}/{episodes_per_task} | "
                  f"Global Episode: {global_episode}/{total_episodes} | "
                  f"Tasks: {last_collection}/{info['total_required']} | "
                  f"Reward: {episode_reward:.2f} | "
                  f"Energy: {info['energy']:.2f} | "
                  f"Steps: {env.step_count} | "
                  f"Beta: {current_beta:.3f} | "
                  f"Noise: {current_noise:.3f} | "
                  f"Time: {elapsed_time:.2f}s")

            # 每隔一定轮次生成图表
            if global_episode % eval_freq == 0 or global_episode == total_episodes:
                plt.figure(figsize=(20, 10))

                # 奖励曲线
                plt.subplot(2, 3, 1)
                plt.plot(rewards_history, alpha=0.3, color='blue', label='Raw')
                plt.plot(smoothed_rewards, color='red', label='Smoothed')
                plt.axvline(x=episodes_per_task, color='green', linestyle='--', label='Phase 1->2')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--', label='Phase 2->3')
                plt.title("Reward")
                plt.xlabel("Episode")
                plt.ylabel("Reward")
                plt.legend()
                plt.grid(True)

                # 收集任务数量曲线
                plt.subplot(2, 3, 2)
                plt.plot(collection_history)
                plt.axvline(x=episodes_per_task, color='green', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.title("Collected Tasks")
                plt.xlabel("Episode")
                plt.ylabel("Number of Tasks")
                plt.grid(True)

                # 能耗曲线
                plt.subplot(2, 3, 3)
                plt.plot(energy_history)
                plt.axvline(x=episodes_per_task, color='green', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.title("Total Energy")
                plt.xlabel("Episode")
                plt.ylabel("Energy")
                plt.grid(True)

                # Critic损失曲线
                plt.subplot(2, 3, 4)
                if losses["critic"]:
                    plt.plot(losses["critic"])
                plt.axvline(x=episodes_per_task, color='green', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.title("Critic Loss")
                plt.xlabel("Episode")
                plt.ylabel("Loss")
                plt.grid(True)

                # Actor损失曲线
                plt.subplot(2, 3, 5)
                if losses["actor"]:
                    plt.plot(losses["actor"])
                plt.axvline(x=episodes_per_task, color='green', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.title("Actor Loss")
                plt.xlabel("Episode")
                plt.ylabel("Loss")
                plt.grid(True)

                # 完成率曲线 - 任务收集率
                plt.subplot(2, 3, 6)
                collection_rates = []
                for i, collected in enumerate(collection_history):
                    episode_phase = (i // episodes_per_task) + 1
                    if episode_phase == 1:
                        total = 10  # 第一阶段有10个任务
                    elif episode_phase == 2:
                        total = 9  # 第二阶段有9个任务
                    else:
                        total = 8  # 第三阶段有8个任务
                    collection_rates.append(collected / total if total > 0 else 0)

                plt.plot(collection_rates)
                plt.axvline(x=episodes_per_task, color='green', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.axhline(y=1.0, color='red', linestyle='--')
                plt.title("Task Collection Rate")
                plt.xlabel("Episode")
                plt.ylabel("Collection Rate")
                plt.grid(True)

                plt.tight_layout()
                plt.savefig(f"results/training_curves_episode_{global_episode}.png")
                plt.close()

        # 每个任务阶段结束后保存模型
        agent.save(f"results/final_phase_{phase}")

    # 训练结束，输出最终结果
    print("\n" + "=" * 50)
    print("Training completed!")
    print("=" * 50)
    print("Global Best Results:")
    print(f"Collection Rate: {best_collection * 100:.1f}%, Reward: {best_reward:.2f}")
    print("\nPhase-wise Best Results:")
    for phase in range(1, 4):
        print(f"Phase {phase}: Collection Rate: {best_collections_by_phase[phase] * 100:.1f}%, "
              f"Reward: {best_rewards_by_phase[phase]:.2f}")
    print("=" * 50)

    return agent, env


def test_and_visualize(agent, env, model_path="results/best_global", phase=3):
    """测试训练好的模型并生成可视化结果 - 优化版"""

    # 加载模型
    agent.actor.load_state_dict(torch.load(model_path + "_actor.pth"))
    agent.actor.eval()

    # 设置环境阶段
    env.update_task_generating_users(phase)

    # 重置环境和GRU状态
    state = env.reset()

    total_reward = 0
    step_rewards = []

    # 记录测试过程
    trajectory = [env.uav_position.copy()]
    collection_times = np.zeros(NUM_USERS) - 1  # -1表示未收集
    collection_order = []

    for step in range(1, MAX_STEPS + 1):
        # 选择动作 (不添加噪声)
        action = agent.select_action(state, noise_scale=0)

        # 记录UAV位置
        trajectory.append(env.uav_position.copy())

        # 记录本次收集的任务
        collected_before = env.collected_tasks.copy()

        # 执行动作
        next_state, reward, done, info = env.step(action)

        # 更新收集时间
        for i in range(NUM_USERS):
            if env.task_generating_users[i] and env.collected_tasks[i] and not collected_before[i]:
                collection_times[i] = step
                collection_order.append(i)

        # 累计奖励
        total_reward += reward
        step_rewards.append(reward)

        # 更新状态
        state = next_state

        # 可视化当前状态
        if step % 5 == 0 or done:
            env.render(10000 + step)  # 使用一个高索引避免与训练图片混淆

        if done:
            break

    # 转换为numpy数组便于绘图
    trajectory = np.array(trajectory)

    # 绘制完整轨迹图
    plt.figure(figsize=(12, 10))

    # 绘制用户位置
    for i, (x, y) in enumerate(env.user_positions):
        if env.task_generating_users[i]:
            if env.collected_tasks[i]:
                color = 'green'  # 已收集的任务生成用户
                plt.scatter(x, y, s=150, c=color, marker='o')
                # 标注用户编号和收集时间
                plt.annotate(f"用户 {i + 1}\n(步数 {int(collection_times[i])})",
                             (x, y), textcoords="offset points",
                             xytext=(0, 10), ha='center', fontsize=10)
            else:
                color = 'red'  # 未收集的任务生成用户
                plt.scatter(x, y, s=150, c=color, marker='o')
                plt.annotate(f"用户 {i + 1}\n(未收集)",
                             (x, y), textcoords="offset points",
                             xytext=(0, 10), ha='center', fontsize=10)
        else:
            color = 'gray'  # 不生成任务的用户
            plt.scatter(x, y, s=100, c=color, marker='o')
            plt.annotate(f"用户 {i + 1}\n(不产生任务)",
                         (x, y), textcoords="offset points",
                         xytext=(0, 10), ha='center', fontsize=10)

    # 绘制UAV轨迹
    plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', label='UAV轨迹', alpha=0.7)

    # 标注起点和终点
    plt.scatter(trajectory[0, 0], trajectory[0, 1], s=200, c='blue', marker='^', label='起点')
    plt.scatter(trajectory[-1, 0], trajectory[-1, 1], s=200, c='purple', marker='*', label='终点')

    # 每隔一定步数标记UAV位置
    for i in range(0, len(trajectory), 10):
        plt.annotate(f"{i}", (trajectory[i, 0], trajectory[i, 1]),
                     fontsize=8, ha='center', va='center',
                     bbox=dict(boxstyle="circle,pad=0.2", fc="white", alpha=0.7))

    # 为收集到的任务绘制连接线
    for i in range(NUM_USERS):
        if env.task_generating_users[i] and env.collected_tasks[i]:
            # 找到收集该任务时UAV的位置
            step = int(collection_times[i])
            if step >= 0 and step < len(trajectory):  # 检查有效步数
                uav_pos = trajectory[step]
                plt.plot([uav_pos[0], env.user_positions[i, 0]],
                         [uav_pos[1], env.user_positions[i, 1]],
                         'g--', alpha=0.5)

    plt.title(
        f"UAV任务收集轨迹 (阶段{phase}: 收集 {sum(env.collected_tasks & env.task_generating_users)}/{sum(env.task_generating_users)} 任务, 步数: {env.step_count})")
    plt.xlabel("X坐标 (m)")
    plt.ylabel("Y坐标 (m)")
    plt.grid(True)
    plt.legend()
    plt.xlim(0, AREA_SIZE)
    plt.ylim(0, AREA_SIZE)
    plt.savefig(f"results/final_uav_trajectory_phase_{phase}.png")
    plt.close()

    # 绘制奖励曲线
    plt.figure(figsize=(15, 5))

    plt.subplot(1, 2, 1)
    plt.plot(step_rewards)
    plt.title("步奖励")
    plt.xlabel("步数")
    plt.ylabel("奖励")
    plt.grid(True)

    plt.subplot(1, 2, 2)
    plt.plot(np.cumsum(step_rewards))
    plt.title("累计奖励")
    plt.xlabel("步数")
    plt.ylabel("累计奖励")
    plt.grid(True)

    plt.tight_layout()
    plt.savefig(f"results/test_rewards_phase_{phase}.png")
    plt.close()

    # 打印测试结果
    print(f"\n测试结果 (阶段 {phase}):")
    collected_count = sum(env.collected_tasks & env.task_generating_users)
    total_count = sum(env.task_generating_users)
    collection_rate = collected_count / total_count * 100 if total_count > 0 else 0

    print(f"收集任务: {collected_count}/{total_count} ({collection_rate:.1f}%)")
    print(f"总奖励: {total_reward:.2f}")
    print(f"总能耗: {info['energy']:.2f}")
    print(f"总延迟: {info['delay']:.2f}")
    print(f"总步数: {env.step_count}")

    # 输出任务收集详情
    print("\n任务收集详情:")
    collection_indices = [(i, int(collection_times[i])) for i in range(NUM_USERS)
                          if env.task_generating_users[i] and env.collected_tasks[i] and collection_times[i] >= 0]
    collection_indices.sort(key=lambda x: x[1])

    for i, step in collection_indices:
        print(f"用户 {i + 1}: 在步数 {step} 收集")

    for i in range(NUM_USERS):
        if env.task_generating_users[i] and not env.collected_tasks[i]:
            print(f"用户 {i + 1}: 未收集")

    return collection_rate, total_reward, info['energy'], info['delay'], env.step_count


if __name__ == "__main__":
    # 创建结果目录
    os.makedirs("results", exist_ok=True)

    # 训练智能体
    agent, env = train()

    # 测试各阶段的性能
    results = []
    for phase in range(1, 4):
        print(f"\n测试阶段 {phase} 的模型性能 (使用该阶段最佳模型):")
        # 使用每个阶段特定的最佳模型
        model_path = f"results/best_phase_{phase}"
        phase_results = test_and_visualize(agent, env, model_path=model_path, phase=phase)
        results.append((phase, *phase_results))

    # 使用全局最佳模型测试
    print("\n使用全局最佳模型测试所有阶段:")
    global_results = []
    for phase in range(1, 4):
        print(f"\n测试阶段 {phase} 的模型性能 (使用全局最佳模型):")
        phase_results = test_and_visualize(agent, env, model_path="results/best_global", phase=phase)
        global_results.append((phase, *phase_results))

    # 输出汇总结果
    print("\n" + "=" * 60)
    print("性能汇总 - 每个阶段的最佳模型")
    print("=" * 60)
    print(f"{'阶段':^10}{'收集率 (%)':^15}{'总奖励':^15}{'总能耗':^15}{'总延迟':^15}{'总步数':^15}")
    print("-" * 60)

    for phase, rate, reward, energy, delay, steps in results:
        print(f"{phase:^10}{rate:^15.1f}{reward:^15.2f}{energy:^15.2f}{delay:^15.2f}{steps:^15}")

    print("\n" + "=" * 60)
    print("性能汇总 - 全局最佳模型")
    print("=" * 60)
    print(f"{'阶段':^10}{'收集率 (%)':^15}{'总奖励':^15}{'总能耗':^15}{'总延迟':^15}{'总步数':^15}")
    print("-" * 60)

    for phase, rate, reward, energy, delay, steps in global_results:
        print(f"{phase:^10}{rate:^15.1f}{reward:^15.2f}{energy:^15.2f}{delay:^15.2f}{steps:^15}")

