import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Normal
import matplotlib.pyplot as plt
from collections import deque
import random
import time
import os

# 设备配置
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")

# 环境参数
AREA_SIZE = 1000
NUM_USERS = 10
UAV_HEIGHT = 100
UAV_SPEED = 50
COMPUTING_CAPACITY = 1e9
TASK_SIZE_RANGE = (1e6, 5e6)
DEADLINE_RANGE = (10, 30)

# PPO算法参数
ACTOR_LR = 3e-4
CRITIC_LR = 3e-4
GAMMA = 0.99
GAE_LAMBDA = 0.95
CLIP_EPSILON = 0.2
PPO_EPOCHS = 10
VALUE_LOSS_COEF = 0.5
ENTROPY_COEF = 0.01
MAX_GRAD_NORM = 0.5

# 训练参数
EPISODES_PER_TASK = 200
MAX_STEPS = 200
HIDDEN_SIZE = 128
BUFFER_SIZE = 2048  # PPO使用较小的buffer
BATCH_SIZE = 64
TAU = 0.005

# EWC参数
EWC_LAMBDA = 1000
FISHER_SAMPLE_SIZE = 200

# 探索参数（PPO主要通过熵正则化探索）
EXPLORATION_NOISE_START = 0.1
EXPLORATION_NOISE_END = 0.05


# 环境实现（保持不变）
class Environment:
    def __init__(self):
        self.user_positions = np.random.uniform(50, AREA_SIZE - 50, (NUM_USERS, 2))
        self.task_sizes = np.random.uniform(*TASK_SIZE_RANGE, NUM_USERS)
        self.deadlines = np.random.uniform(*DEADLINE_RANGE, NUM_USERS)
        self.task_generating_users = np.ones(NUM_USERS, dtype=bool)
        self.uav_position = np.array([AREA_SIZE / 2, AREA_SIZE / 2])
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)
        self.step_count = 0
        self.trajectory = [self.uav_position.copy()]

    def update_task_generating_users(self, task_id):
        if task_id == 1:
            self.task_generating_users = np.array([1, 1, 1, 0, 0, 0, 0, 0, 0, 0], dtype=bool)
        elif task_id == 2:
            self.task_generating_users = np.array([0, 0, 1, 1, 1, 1, 1, 0, 0, 0], dtype=bool)
        elif task_id == 3:
            self.task_generating_users = np.array([0, 0, 0, 0, 1, 1, 1, 1, 1, 1], dtype=bool)

    def reset(self):
        self.uav_position = np.array([AREA_SIZE / 2, AREA_SIZE / 2])
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)
        self.step_count = 0
        self.trajectory = [self.uav_position.copy()]
        return self._get_state()

    def _get_state(self):
        state = [self.uav_position[0] / AREA_SIZE, self.uav_position[1] / AREA_SIZE]
        for i in range(NUM_USERS):
            if self.task_generating_users[i]:
                rel_pos = (self.user_positions[i] - self.uav_position) / AREA_SIZE
                state.extend([rel_pos[0], rel_pos[1], self.task_sizes[i] / 5e6,
                              int(not self.collected_tasks[i])])
            else:
                state.extend([0, 0, 0, 0])
        state.append(self.step_count / MAX_STEPS)
        return np.array(state, dtype=np.float32)

    def step(self, action):
        self.step_count += 1
        old_position = self.uav_position.copy()

        # 动作归一化和应用
        action = np.clip(action, -1, 1)
        move_distance = UAV_SPEED
        self.uav_position += action * move_distance
        self.uav_position = np.clip(self.uav_position, 0, AREA_SIZE)
        self.trajectory.append(self.uav_position.copy())

        # 计算奖励和收集任务
        reward, info = self._calculate_reward()
        done = (self.step_count >= MAX_STEPS or
                sum(self.collected_tasks & self.task_generating_users) == sum(self.task_generating_users))

        return self._get_state(), reward, done, info

    def _calculate_reward(self):
        reward = 0
        info = {'energy': 0, 'delay': 0, 'collected_required': 0, 'total_required': 0}

        # 计算能耗
        flight_energy = 0.5 * UAV_SPEED ** 2
        info['flight_energy'] = flight_energy

        # 收集任务逻辑
        total_required_tasks = sum(self.task_generating_users)
        collected_required_tasks = sum(self.collected_tasks & self.task_generating_users)

        collection_reward = 0
        proximity_reward = 0
        completion_bonus = 0
        objective_penalty = 0
        comp_energy = 0
        total_delay = 0
        offloading_delays = []

        for i in range(NUM_USERS):
            if not self.task_generating_users[i]:
                continue

            distance = np.linalg.norm(self.uav_position - self.user_positions[i])

            # 邻近奖励
            proximity_reward += max(0, 1 - distance / 100)

            # 任务收集
            if distance <= 50 and not self.collected_tasks[i]:
                self.collected_tasks[i] = True
                collection_reward += 50

                # 计算计算能耗和延迟
                comp_energy += self.task_sizes[i] * 1e-9
                offloading_delay = self.task_sizes[i] / (10e6)  # 10Mbps传输
                processing_delay = self.task_sizes[i] / COMPUTING_CAPACITY
                task_delay = offloading_delay + processing_delay
                offloading_delays.append(offloading_delay)
                total_delay += task_delay

                # 截止时间惩罚
                if self.step_count > self.deadlines[i]:
                    objective_penalty -= 5

        # 完成奖励
        if collected_required_tasks == total_required_tasks and total_required_tasks > 0:
            completion_bonus = 100

        # 总奖励计算
        total_energy = flight_energy + comp_energy
        avg_delay = total_delay / max(collected_required_tasks, 1)

        reward = (collection_reward + proximity_reward + completion_bonus +
                  objective_penalty - 0.1 * total_energy - 10 * avg_delay)

        # 更新info
        info.update({
            'energy': total_energy,
            'delay': avg_delay,
            'collected_required': collected_required_tasks,
            'total_required': total_required_tasks,
            'comp_energy': comp_energy,
            'reward_breakdown': {
                'collection_reward': collection_reward,
                'proximity_reward': proximity_reward,
                'completion_bonus': completion_bonus,
                'objective_penalty': objective_penalty
            },
            'delay_breakdown': {
                'total_delay': total_delay,
                'avg_offloading_delay': np.mean(offloading_delays) if offloading_delays else 0
            }
        })

        return reward, info

    def render(self, episode):
        plt.figure(figsize=(10, 8))
        for i, (x, y) in enumerate(self.user_positions):
            if self.task_generating_users[i]:
                color = 'green' if self.collected_tasks[i] else 'red'
                plt.scatter(x, y, s=150, c=color, marker='o')
                status = "已收集" if self.collected_tasks[i] else "未收集"
                plt.annotate(f"用户{i + 1}\n({status})", (x, y),
                             textcoords="offset points", xytext=(0, 10), ha='center')
            else:
                plt.scatter(x, y, s=100, c='gray', marker='o')
                plt.annotate(f"用户{i + 1}\n(不产生任务)", (x, y),
                             textcoords="offset points", xytext=(0, 10), ha='center')

        trajectory = np.array(self.trajectory)
        plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', alpha=0.7, label='UAV轨迹')
        plt.scatter(trajectory[0, 0], trajectory[0, 1], s=200, c='blue', marker='^', label='起点')
        plt.scatter(trajectory[-1, 0], trajectory[-1, 1], s=200, c='purple', marker='*', label='终点')

        plt.title(f"Episode {episode} - UAV任务收集轨迹")
        plt.xlabel("X坐标 (m)")
        plt.ylabel("Y坐标 (m)")
        plt.legend()
        plt.grid(True)
        plt.xlim(0, AREA_SIZE)
        plt.ylim(0, AREA_SIZE)
        plt.savefig(f"results/trajectory_episode_{episode}.png")
        plt.close()


# PPO Actor网络（GRU + 策略网络）
class PPOActor(nn.Module):
    def __init__(self, state_dim, action_dim, max_action, hidden_size=HIDDEN_SIZE):
        super(PPOActor, self).__init__()
        self.max_action = max_action
        self.hidden_size = hidden_size

        # GRU层
        self.gru = nn.GRU(state_dim, hidden_size, batch_first=True)

        # 策略网络
        self.fc1 = nn.Linear(hidden_size, 256)
        self.ln1 = nn.LayerNorm(256)
        self.fc2 = nn.Linear(256, 128)
        self.ln2 = nn.LayerNorm(128)

        # 输出层：均值和log标准差
        self.mean_layer = nn.Linear(128, action_dim)
        self.log_std_layer = nn.Linear(128, action_dim)

        self.hidden = None

    def forward(self, state, reset_hidden=False):
        batch_size = state.size(0)
        if reset_hidden or self.hidden is None or self.hidden.size(1) != batch_size:
            self.reset_hidden(batch_size)

        # 如果状态是2D，添加序列维度
        if len(state.shape) == 2:
            state = state.unsqueeze(1)

        gru_out, self.hidden = self.gru(state, self.hidden.to(state.device))
        x = gru_out[:, -1]  # 取最后一个时间步

        x = F.relu(self.ln1(self.fc1(x)))
        x = F.relu(self.ln2(self.fc2(x)))

        mean = torch.tanh(self.mean_layer(x)) * self.max_action
        log_std = self.log_std_layer(x)
        log_std = torch.clamp(log_std, -20, 2)  # 限制标准差范围
        std = torch.exp(log_std)

        return mean, std

    def sample(self, state, reset_hidden=False):
        mean, std = self.forward(state, reset_hidden)
        dist = Normal(mean, std)
        action = dist.sample()
        log_prob = dist.log_prob(action).sum(dim=-1, keepdim=True)
        return action, log_prob, mean

    def evaluate_actions(self, state, action, reset_hidden=False):
        mean, std = self.forward(state, reset_hidden)
        dist = Normal(mean, std)
        log_prob = dist.log_prob(action).sum(dim=-1, keepdim=True)
        entropy = dist.entropy().sum(dim=-1, keepdim=True)
        return log_prob, entropy

    def reset_hidden(self, batch_size=1):
        self.hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)


# PPO Critic网络（GRU + 价值网络）
class PPOCritic(nn.Module):
    def __init__(self, state_dim, hidden_size=HIDDEN_SIZE):
        super(PPOCritic, self).__init__()
        self.hidden_size = hidden_size

        # GRU层
        self.gru = nn.GRU(state_dim, hidden_size, batch_first=True)

        # 价值网络
        self.fc1 = nn.Linear(hidden_size, 256)
        self.ln1 = nn.LayerNorm(256)
        self.fc2 = nn.Linear(256, 128)
        self.ln2 = nn.LayerNorm(128)
        self.value_layer = nn.Linear(128, 1)

        self.hidden = None

    def forward(self, state, reset_hidden=False):
        batch_size = state.size(0)
        if reset_hidden or self.hidden is None or self.hidden.size(1) != batch_size:
            self.reset_hidden(batch_size)

        # 如果状态是2D，添加序列维度
        if len(state.shape) == 2:
            state = state.unsqueeze(1)

        gru_out, self.hidden = self.gru(state, self.hidden.to(state.device))
        x = gru_out[:, -1]  # 取最后一个时间步

        x = F.relu(self.ln1(self.fc1(x)))
        x = F.relu(self.ln2(self.fc2(x)))
        value = self.value_layer(x)

        return value

    def reset_hidden(self, batch_size=1):
        self.hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)


# PPO经验缓冲区
class PPOBuffer:
    def __init__(self, max_size=BUFFER_SIZE):
        self.states = []
        self.actions = []
        self.rewards = []
        self.values = []
        self.log_probs = []
        self.dones = []
        self.advantages = []
        self.returns = []

    def add(self, state, action, reward, value, log_prob, done):
        self.states.append(state)
        self.actions.append(action)
        self.rewards.append(reward)
        self.values.append(value)
        self.log_probs.append(log_prob)
        self.dones.append(done)

    def compute_gae(self, next_value):
        # 计算GAE优势和回报
        rewards = np.array(self.rewards)
        values = np.array(self.values + [next_value])
        dones = np.array(self.dones)

        advantages = np.zeros_like(rewards)
        gae = 0

        for t in reversed(range(len(rewards))):
            if t == len(rewards) - 1:
                next_non_terminal = 1.0 - dones[t]
                next_value = values[t + 1]
            else:
                next_non_terminal = 1.0 - dones[t]
                next_value = values[t + 1]

            delta = rewards[t] + GAMMA * next_value * next_non_terminal - values[t]
            gae = delta + GAMMA * GAE_LAMBDA * next_non_terminal * gae
            advantages[t] = gae

        returns = advantages + values[:-1]

        self.advantages = advantages.tolist()
        self.returns = returns.tolist()

    def get_batch(self, batch_size):
        indices = np.random.choice(len(self.states), batch_size, replace=False)

        states = torch.FloatTensor([self.states[i] for i in indices]).to(device)
        actions = torch.FloatTensor([self.actions[i] for i in indices]).to(device)
        old_log_probs = torch.FloatTensor([self.log_probs[i] for i in indices]).to(device)
        advantages = torch.FloatTensor([self.advantages[i] for i in indices]).to(device)
        returns = torch.FloatTensor([self.returns[i] for i in indices]).to(device)
        values = torch.FloatTensor([self.values[i] for i in indices]).to(device)

        # 标准化优势
        advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)

        return states, actions, old_log_probs, advantages, returns, values

    def clear(self):
        self.states.clear()
        self.actions.clear()
        self.rewards.clear()
        self.values.clear()
        self.log_probs.clear()
        self.dones.clear()
        self.advantages.clear()
        self.returns.clear()

    def __len__(self):
        return len(self.states)


# EWC类（保持不变）
class EWC:
    def __init__(self, model, fisher_sample_size=FISHER_SAMPLE_SIZE):
        self.model = model
        self.fisher_sample_size = fisher_sample_size
        self.importance = {}
        self.old_params = {}

    def _calculate_fisher_info(self, buffer):
        fisher = {}
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                fisher[name] = torch.zeros_like(param).to(device)

        self.model.train()
        samples_count = min(self.fisher_sample_size, len(buffer))
        if samples_count <= 0:
            return fisher

        for _ in range(samples_count):
            # 随机采样状态
            idx = np.random.randint(0, len(buffer.states))
            state = torch.FloatTensor(buffer.states[idx]).unsqueeze(0).to(device)

            self.model.zero_grad()
            if isinstance(self.model, PPOActor):
                self.model.reset_hidden(1)
                mean, std = self.model(state)
                # 使用策略的熵作为Fisher信息的代理
                dist = Normal(mean, std)
                loss = -dist.entropy().sum()
            else:  # PPOCritic
                self.model.reset_hidden(1)
                value = self.model(state)
                loss = value.mean()

            loss.backward()

            for name, param in self.model.named_parameters():
                if param.requires_grad and param.grad is not None:
                    fisher[name] += param.grad.pow(2) / samples_count

        return fisher

    def store_task_parameters(self, task_id, buffer):
        print(f"为任务 {task_id} 存储参数并计算Fisher信息矩阵")
        self.old_params = {}
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                self.old_params[name] = param.data.clone()

        self.importance = self._calculate_fisher_info(buffer)
        print(f"存储了 {len(self.old_params)} 个参数并计算了Fisher矩阵")

    def calculate_ewc_loss(self, lam=EWC_LAMBDA):
        loss = 0
        if not self.old_params or not self.importance:
            return loss

        for name, param in self.model.named_parameters():
            if name in self.old_params and name in self.importance and param.requires_grad:
                loss += torch.sum(self.importance[name] * (param - self.old_params[name]).pow(2))

        return lam * loss


# PPO算法实现
class PPO:
    def __init__(self, state_dim, action_dim, max_action):
        self.actor = PPOActor(state_dim, action_dim, max_action).to(device)
        self.critic = PPOCritic(state_dim).to(device)

        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=ACTOR_LR)
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=CRITIC_LR)

        self.max_action = max_action
        self.buffer = PPOBuffer()
        self.total_it = 0

        # EWC持续学习
        self.ewc_actor = EWC(self.actor)
        self.ewc_critic = EWC(self.critic)
        self.current_task = 1

        # 学习率调度器
        self.actor_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.actor_optimizer, mode='max', factor=0.5, patience=100, verbose=True)
        self.critic_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.critic_optimizer, mode='max', factor=0.5, patience=100, verbose=True)

    def select_action(self, state, evaluate=False):
        if len(state.shape) == 1:
            state = np.expand_dims(state, 0)
        state = torch.FloatTensor(state).to(device)

        with torch.no_grad():
            if evaluate:
                # 评估时使用确定性动作（均值）
                mean, _ = self.actor(state)
                return mean.cpu().data.numpy().flatten(), None, None
            else:
                # 训练时使用随机动作
                action, log_prob, _ = self.actor.sample(state)
                value = self.critic(state)
                return action.cpu().data.numpy().flatten(), log_prob.cpu().data.numpy().item(), value.cpu().data.numpy().item()

    def switch_task(self, task_id):
        print(f"\n切换到任务 {task_id}")
        if self.current_task > 0 and len(self.buffer) > 0:
            self.ewc_actor.store_task_parameters(self.current_task, self.buffer)
            self.ewc_critic.store_task_parameters(self.current_task, self.buffer)

        print("为新任务清空经验缓冲区")
        self.buffer.clear()
        self.current_task = task_id

        # 重置隐藏状态
        self.actor.reset_hidden()
        self.critic.reset_hidden()
        print(f"为新任务 {task_id} 重置GRU状态")

    def train(self):
        if len(self.buffer) < BATCH_SIZE:
            return {"actor_loss": 0.0, "critic_loss": 0.0, "entropy": 0.0}

        # 计算最后状态的价值（用于GAE计算）
        with torch.no_grad():
            last_state = torch.FloatTensor(self.buffer.states[-1]).unsqueeze(0).to(device)
            self.critic.reset_hidden(1)
            next_value = self.critic(last_state).item()

        # 计算GAE优势和回报
        self.buffer.compute_gae(next_value)

        # PPO训练
        actor_losses = []
        critic_losses = []
        entropies = []

        for epoch in range(PPO_EPOCHS):
            # 获取批次数据
            states, actions, old_log_probs, advantages, returns, old_values = self.buffer.get_batch(
                min(BATCH_SIZE, len(self.buffer)))

            # 训练Critic
            self.critic.reset_hidden(states.size(0))
            values = self.critic(states).squeeze()
            critic_loss = F.mse_loss(values, returns)

            # 添加EWC正则化
            if self.current_task > 1:
                critic_ewc_loss = self.ewc_critic.calculate_ewc_loss()
                critic_loss += critic_ewc_loss

            self.critic_optimizer.zero_grad()
            critic_loss.backward()
            torch.nn.utils.clip_grad_norm_(self.critic.parameters(), MAX_GRAD_NORM)
            self.critic_optimizer.step()

            # 训练Actor
            self.actor.reset_hidden(states.size(0))
            log_probs, entropy = self.actor.evaluate_actions(states, actions)

            # 计算重要性采样比率
            ratio = torch.exp(log_probs - old_log_probs)

            # PPO截断目标
            surr1 = ratio * advantages
            surr2 = torch.clamp(ratio, 1.0 - CLIP_EPSILON, 1.0 + CLIP_EPSILON) * advantages
            actor_loss = -torch.min(surr1, surr2).mean() - ENTROPY_COEF * entropy.mean()

            # 添加EWC正则化
            if self.current_task > 1:
                actor_ewc_loss = self.ewc_actor.calculate_ewc_loss()
                actor_loss += actor_ewc_loss

            self.actor_optimizer.zero_grad()
            actor_loss.backward()
            torch.nn.utils.clip_grad_norm_(self.actor.parameters(), MAX_GRAD_NORM)
            self.actor_optimizer.step()

            actor_losses.append(actor_loss.item())
            critic_losses.append(critic_loss.item())
            entropies.append(entropy.mean().item())

        # 清空缓冲区
        self.buffer.clear()

        return {
            "actor_loss": np.mean(actor_losses),
            "critic_loss": np.mean(critic_losses),
            "entropy": np.mean(entropies)
        }

    def update_lr_schedulers(self, reward):
        self.actor_scheduler.step(reward)
        self.critic_scheduler.step(reward)


def train():
    os.makedirs("results", exist_ok=True)
    env = Environment()

    # 状态维度
    state_dim = 2 + NUM_USERS * 4 + 1
    action_dim = 2
    max_action = 1

    agent = PPO(state_dim, action_dim, max_action)
    total_episodes = 600
    episodes_per_task = 200
    eval_freq = 50

    # 记录训练历史
    rewards_history = []
    smoothed_rewards = []
    collection_history = []
    energy_history = []
    delay_history = []
    best_reward = -float('inf')
    best_collection = 0
    losses = {"actor": [], "critic": [], "entropy": []}

    start_time = time.time()

    for phase in range(1, 4):
        env.update_task_generating_users(phase)
        agent.switch_task(phase)

        for episode in range(1, episodes_per_task + 1):
            global_episode = (phase - 1) * episodes_per_task + episode
            state = env.reset()
            agent.actor.reset_hidden()
            agent.critic.reset_hidden()

            episode_reward = 0
            last_collection = 0
            episode_losses = {"actor": [], "critic": [], "entropy": []}

            for step in range(1, MAX_STEPS + 1):
                action, log_prob, value = agent.select_action(state, evaluate=False)
                next_state, reward, done, info = env.step(action)

                # 存储到经验缓冲区
                agent.buffer.add(state, action, reward, value, log_prob, done)

                state = next_state
                episode_reward += reward
                last_collection = info["collected_required"]

                if done:
                    if global_episode % eval_freq == 0:
                        print(f"--- Episode {global_episode} 结束，生成最终轨迹图 ---")
                        env.render(global_episode)
                    break
            # 训练PPO
            loss_info = agent.train()
            if loss_info:
                episode_losses["actor"].append(loss_info["actor_loss"])
                episode_losses["critic"].append(loss_info["critic_loss"])
                episode_losses["entropy"].append(loss_info["entropy"])

            # 记录历史数据
            rewards_history.append(episode_reward)
            collection_history.append(last_collection)
            energy_history.append(info["energy"])
            delay_history.append(info["delay"])

            # 计算平滑奖励
            if len(rewards_history) >= 10:
                smoothed_rewards.append(np.mean(rewards_history[-10:]))
            else:
                smoothed_rewards.append(episode_reward)

            # 记录损失
            if episode_losses["actor"]:
                losses["actor"].append(np.mean(episode_losses["actor"]))
            if episode_losses["critic"]:
                losses["critic"].append(np.mean(episode_losses["critic"]))
            if episode_losses["entropy"]:
                losses["entropy"].append(np.mean(episode_losses["entropy"]))

            # 更新学习率调度器
            agent.update_lr_schedulers(episode_reward)

            # 保存最佳模型
            current_required = info["total_required"]
            collection_ratio = last_collection / current_required if current_required > 0 else 0
            if collection_ratio > best_collection or (
                    collection_ratio == best_collection and episode_reward > best_reward):
                best_reward = episode_reward
                best_collection = collection_ratio
                torch.save(agent.actor.state_dict(), f"results/best_actor_phase_{phase}.pth")

            # 计算运行时间
            elapsed_time = time.time() - start_time

            # 获取信息用于打印
            collected_required = info.get("collected_required", 0)
            total_required = info.get("total_required", 1)

            # 计算平均损失
            avg_actor_loss = np.mean(episode_losses["actor"]) if episode_losses["actor"] else 0.0
            avg_critic_loss = np.mean(episode_losses["critic"]) if episode_losses["critic"] else 0.0
            avg_entropy = np.mean(episode_losses["entropy"]) if episode_losses["entropy"] else 0.0

            # 构建奖励分解字符串
            reward_str = ""
            if 'reward_breakdown' in info:
                rb = info['reward_breakdown']
                reward_str = (f"Rwd(C:{rb['collection_reward']:.1f} P:{rb['proximity_reward']:.1f} "
                              f"B:{rb['completion_bonus']:.1f} ObjP:{rb['objective_penalty']:.1f})")

            # 构建能耗分解字符串
            energy_str = ""
            if 'flight_energy' in info and 'comp_energy' in info:
                energy_str = f"E(F:{info['flight_energy']:.1f} C:{info['comp_energy']:.1f})"

            # 构建延迟分解字符串
            delay_str = ""
            if 'delay_breakdown' in info:
                db = info['delay_breakdown']
                delay_str = f"D(Tot:{db['total_delay']:.2f}s AvgOff:{db['avg_offloading_delay']:.3f}s)"

            # 打印训练信息（保持与原来完全一致的格式）
            print(
                f"Phase {phase} Ep {episode:3d}/{episodes_per_task} "
                f"Tasks {collected_required:2d}/{total_required:2d} "
                f"Steps {env.step_count:3d} "
                f"Loss(A/C/E) {avg_actor_loss:.3f}/{avg_critic_loss:.3f}/{avg_entropy:.3f} | "
                f"Total Rwd: {episode_reward:.2f} "
                f"[{reward_str}] | "
                f"Total E: {info.get('energy', 0):.1f} "
                f"[{energy_str}] | "
                f"Avg D: {info.get('delay', 0):.3f}s "
                f"[{delay_str}] | "
                f"Time: {elapsed_time:.1f}s"
            )

            # 定期绘制训练曲线和保存检查点
            if global_episode % eval_freq == 0 or global_episode == total_episodes:
                plt.figure(figsize=(24, 5))

                # 奖励曲线
                plt.subplot(1, 5, 1)
                plt.plot(rewards_history, alpha=0.3, color='blue', label='Raw')
                plt.plot(smoothed_rewards, color='red', label='Smoothed')
                plt.axvline(x=episodes_per_task, color='green', linestyle='--', label='Phase 1->2')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--', label='Phase 2->3')
                plt.title("Reward")
                plt.xlabel("Episode")
                plt.ylabel("Reward")
                plt.legend()
                plt.grid(True)

                # 收集任务数曲线
                plt.subplot(1, 5, 2)
                plt.plot(collection_history)
                plt.axvline(x=episodes_per_task, color='green', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.title("Collected Tasks")
                plt.xlabel("Episode")
                plt.ylabel("Number of Tasks")
                plt.grid(True)

                # 能耗曲线
                plt.subplot(1, 5, 3)
                plt.plot(energy_history)
                plt.axvline(x=episodes_per_task, color='green', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.title("Total Energy")
                plt.xlabel("Episode")
                plt.ylabel("Energy")
                plt.grid(True)

                # 延迟曲线
                plt.subplot(1, 5, 4)
                plt.plot(delay_history)
                plt.axvline(x=episodes_per_task, color='green', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.title("Avg Delay")
                plt.xlabel("Episode")
                plt.ylabel("Delay (s)")
                plt.grid(True)

                # 损失曲线
                plt.subplot(1, 5, 5)
                if losses["actor"]:
                    plt.plot(losses["actor"], label='Actor Loss')
                if losses["critic"]:
                    plt.plot(losses["critic"], label='Critic Loss')
                if losses["entropy"]:
                    plt.plot(losses["entropy"], label='Entropy')
                plt.axvline(x=episodes_per_task, color='green', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.title("Training Loss & Entropy")
                plt.xlabel("Episode")
                plt.ylabel("Loss/Entropy")
                plt.legend()
                plt.grid(True)

                plt.tight_layout()
                plt.savefig(f"results/training_curves_episode_{global_episode}.png")
                plt.close()

                # 保存检查点
                torch.save({
                    'actor_state_dict': agent.actor.state_dict(),
                    'critic_state_dict': agent.critic.state_dict(),
                    'actor_optimizer': agent.actor_optimizer.state_dict(),
                    'critic_optimizer': agent.critic_optimizer.state_dict(),
                    'episode': global_episode,
                    'phase': phase,
                    'rewards_history': rewards_history,
                    'collection_history': collection_history,
                    'energy_history': energy_history,
                    'delay_history': delay_history,
                    'best_reward': best_reward,
                    'best_collection': best_collection
                }, f"results/checkpoint_episode_{global_episode}.pt")

        # 保存每个阶段的模型
        torch.save(agent.actor.state_dict(), f"results/actor_phase_{phase}.pth")
        torch.save(agent.critic.state_dict(), f"results/critic_phase_{phase}.pth")

    print(f"训练完成！最佳结果：{best_collection * 100:.1f}% 任务收集，奖励：{best_reward:.2f}")
    return agent, env


def test_and_visualize(agent, env, model_path="results/actor_phase_3.pth", phase=3):
    """测试并可视化训练好的模型"""
    agent.actor.load_state_dict(torch.load(model_path))
    agent.actor.eval()

    env.update_task_generating_users(phase)
    state = env.reset()
    agent.actor.reset_hidden()

    total_reward = 0
    step_rewards = []
    trajectory = [env.uav_position.copy()]
    collection_times = np.zeros(NUM_USERS)
    collection_order = []

    for step in range(1, MAX_STEPS + 1):
        action, _, _ = agent.select_action(state, evaluate=True)
        trajectory.append(env.uav_position.copy())

        # 记录收集前的状态
        collected_before = env.collected_tasks.copy()
        next_state, reward, done, info = env.step(action)

        # 记录任务收集时间
        for i in range(NUM_USERS):
            if (env.task_generating_users[i] and
                    env.collected_tasks[i] and
                    not collected_before[i]):
                collection_times[i] = step
                collection_order.append(i)

        total_reward += reward
        step_rewards.append(reward)
        state = next_state

        # 定期渲染
        if step % 5 == 0 or done:
            env.render(step)

        if done:
            break

    # 绘制最终轨迹图
    trajectory = np.array(trajectory)
    plt.figure(figsize=(12, 10))

    # 绘制用户位置和状态
    for i, (x, y) in enumerate(env.user_positions):
        if env.task_generating_users[i]:
            if env.collected_tasks[i]:
                color = 'green'
                plt.scatter(x, y, s=150, c=color, marker='o')
                plt.annotate(f"用户 {i + 1}\n(步数 {int(collection_times[i])})",
                             (x, y), textcoords="offset points",
                             xytext=(0, 10), ha='center', fontsize=10)
            else:
                color = 'red'
                plt.scatter(x, y, s=150, c=color, marker='o')
                plt.annotate(f"用户 {i + 1}\n(未收集)",
                             (x, y), textcoords="offset points",
                             xytext=(0, 10), ha='center', fontsize=10)
        else:
            color = 'gray'
            plt.scatter(x, y, s=100, c=color, marker='o')
            plt.annotate(f"用户 {i + 1}\n(不产生任务)",
                         (x, y), textcoords="offset points",
                         xytext=(0, 10), ha='center', fontsize=10)

    # 绘制UAV轨迹
    plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', label='UAV轨迹', alpha=0.7)
    plt.scatter(trajectory[0, 0], trajectory[0, 1], s=200, c='blue', marker='^', label='起点')
    plt.scatter(trajectory[-1, 0], trajectory[-1, 1], s=200, c='purple', marker='*', label='终点')

    # 添加步数标记
    for i in range(0, len(trajectory), 10):
        plt.annotate(f"{i}", (trajectory[i, 0], trajectory[i, 1]),
                     fontsize=8, ha='center', va='center',
                     bbox=dict(boxstyle="circle,pad=0.2", fc="white", alpha=0.7))

    # 绘制收集连线
    for i in range(NUM_USERS):
        if env.task_generating_users[i] and env.collected_tasks[i]:
            step = int(collection_times[i])
            if step < len(trajectory):
                uav_pos = trajectory[step]
                plt.plot([uav_pos[0], env.user_positions[i, 0]],
                         [uav_pos[1], env.user_positions[i, 1]],
                         'g--', alpha=0.5)

    plt.title(
        f"UAV任务收集轨迹 (阶段{phase}: 收集 {sum(env.collected_tasks & env.task_generating_users)}/{sum(env.task_generating_users)} 任务, 步数: {env.step_count})")
    plt.xlabel("X坐标 (m)")
    plt.ylabel("Y坐标 (m)")
    plt.grid(True)
    plt.legend()
    plt.xlim(0, AREA_SIZE)
    plt.ylim(0, AREA_SIZE)
    plt.savefig(f"results/final_uav_trajectory_phase_{phase}.png")
    plt.close()

    # 绘制奖励分析图
    plt.figure(figsize=(15, 5))

    plt.subplot(1, 2, 1)
    plt.plot(step_rewards)
    plt.title("步奖励")
    plt.xlabel("步数")
    plt.ylabel("奖励")
    plt.grid(True)

    plt.subplot(1, 2, 2)
    plt.plot(np.cumsum(step_rewards))
    plt.title("累计奖励")
    plt.xlabel("步数")
    plt.ylabel("累计奖励")
    plt.grid(True)

    plt.tight_layout()
    plt.savefig(f"results/test_rewards_phase_{phase}.png")
    plt.close()

    # 打印测试结果
    print(f"\n测试结果 (阶段 {phase}):")
    collected_count = sum(env.collected_tasks & env.task_generating_users)
    total_count = sum(env.task_generating_users)
    percentage = collected_count / total_count * 100 if total_count > 0 else 0
    print(f"收集任务: {collected_count}/{total_count} ({percentage:.1f}%)")
    print(f"总奖励: {total_reward:.2f}")
    print(f"总能耗: {info['energy']:.2f}")
    print(f"总延迟: {info['delay']:.2f}")
    print(f"总步数: {env.step_count}")

    print("\n任务收集详情:")
    # 按收集顺序排序
    collection_indices = [(i, int(collection_times[i])) for i in range(NUM_USERS)
                          if env.task_generating_users[i] and env.collected_tasks[i]]
    collection_indices.sort(key=lambda x: x[1])

    for i, step in collection_indices:
        print(f"用户 {i + 1}: 在步数 {step} 收集")

    # 显示未收集的任务
    for i in range(NUM_USERS):
        if env.task_generating_users[i] and not env.collected_tasks[i]:
            print(f"用户 {i + 1}: 未收集")


if __name__ == "__main__":
    print("=" * 60)
    print("开始使用PPO算法训练UAV-MEC任务收集系统")
    print("=" * 60)
    print("主要特点：")
    print("1. 使用PPO算法进行策略优化")
    print("2. 集成GRU网络处理序列信息")
    print("3. 采用GAE计算优势函数")
    print("4. 支持EWC持续学习")
    print("5. 保持所有原有的训练和可视化功能")
    print("=" * 60)

    # 开始训练
    agent, env = train()

    print("\n" + "=" * 60)
    print("训练完成！开始测试各阶段模型性能...")
    print("=" * 60)

    # 测试各个阶段的性能
    for phase in range(1, 4):
        print(f"\n{'=' * 20} 测试阶段 {phase} {'=' * 20}")
        test_and_visualize(agent, env,
                           model_path=f"results/actor_phase_{phase}.pth",
                           phase=phase)

    print("\n" + "=" * 60)
    print("PPO算法的主要优势：")
    print("1. 训练稳定性：截断重要性采样避免策略崩溃")
    print("2. 样本利用：通过多轮更新充分利用收集的经验")
    print("3. 策略保守：渐进式策略更新，适合复杂环境")
    print("4. 长期规划：on-policy特性适合需要一致性的任务")
    print("5. 实现简单：参数少，易于调试和优化")
    print("=" * 60)
    print("结果文件保存在 results/ 目录中：")
    print("- 训练曲线: training_curves_episode_*.png")
    print("- 轨迹图: final_uav_trajectory_phase_*.png")
    print("- 模型检查点: checkpoint_episode_*.pt")
    print("- 各阶段模型: actor_phase_*.pth, critic_phase_*.pth")
    print("=" * 60)
