import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Normal
import numpy as np
import matplotlib.pyplot as plt
import random
from collections import deque
import os
import time
import matplotlib

'''使用没加时延能耗等的目标函数，对比没有重新学习的强化学习'''
SEED = 45
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)

# 检查GPU可用性
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

EPISODES_PER_TASK = 200

# 环境参数
AREA_SIZE = 100  # 区域大小 100m x 100m
NUM_USERS = 15  # 用户数量
MAX_STEPS = 200  # 每个episode的最大步数
MAX_DISTANCE_COLLECT = 15  # UAV可收集任务的最大距离

# UAV参数
UAV_SPEED = 20.0  # UAV速度 (m/s)
UAV_ENERGY_PER_METER = 0.1  # 每米能耗
UAV_HOVER_ENERGY = 0.5  # 悬停能耗

# 任务参数
TASK_SIZE = [10, 50]  # 任务大小范围 (MB)

# PPO参数
ACTOR_LR = 3e-4
CRITIC_LR = 3e-4
GAMMA = 0.99
GAE_LAMBDA = 0.95
CLIP_RATIO = 0.2
VALUE_CLIP_RATIO = 0.2
ENTROPY_COEFF = 0.01
VALUE_COEFF = 0.5
MAX_GRAD_NORM = 0.5
PPO_EPOCHS = 4
MINI_BATCH_SIZE = 64
BUFFER_SIZE = 2048
REWARD_SCALE = 0.1

# EWC参数
EWC_LAMBDA = 2  # EWC正则化强度
FISHER_SAMPLE_SIZE = 2000  # 计算Fisher信息矩阵的样本数

# GRU参数
SEQUENCE_LENGTH = 10  # GRU序列长度
HIDDEN_SIZE = 128  # GRU隐藏层大小


class Environment:
    def __init__(self):
        # 初始化用户位置 (固定)
        self.user_positions = np.random.uniform(0, AREA_SIZE, size=(NUM_USERS, 2))

        # 初始化任务大小 (固定)
        self.task_sizes = np.random.uniform(TASK_SIZE[0], TASK_SIZE[1], size=NUM_USERS)

        # 任务生成状态 - 初始所有用户都生成任务
        self.task_generating_users = np.ones(NUM_USERS, dtype=bool)

        # UAV初始位置
        self.uav_position = np.array([AREA_SIZE / 2, AREA_SIZE / 2], dtype=float)

        # 任务收集状态
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)

        # 步数计数器
        self.step_count = 0

        # 总延迟和能耗
        self.total_delay = 0
        self.total_energy = 0

        # 历史轨迹
        self.trajectory = [self.uav_position.copy()]

        # 上一次的距离，用于计算奖励
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])

        # 存储观测历史用于GRU
        self.observation_history = deque(maxlen=SEQUENCE_LENGTH)

        # 当前环境阶段
        self.current_phase = 1

    def update_task_generating_users(self, phase):
        """根据训练阶段更新生成任务的用户"""
        self.current_phase = phase

        if phase == 1:
            # 第一阶段：所有15个用户都产生任务
            self.task_generating_users = np.ones(NUM_USERS, dtype=bool)
            print(f"Phase {phase}: All {NUM_USERS} users are generating tasks")

        elif phase == 2:
            # 第二阶段：随机选择11个用户产生任务
            indices = np.random.choice(NUM_USERS, 11, replace=False)
            self.task_generating_users = np.zeros(NUM_USERS, dtype=bool)
            self.task_generating_users[indices] = True

            # 保存第二阶段的用户选择，供第三阶段使用
            self.phase2_generating_users = set(indices)
            self.phase2_non_generating_users = set(range(NUM_USERS)) - self.phase2_generating_users

            print(f"Phase {phase}: {len(self.phase2_generating_users)} users are generating tasks")
            print(f"Task generating users: {sorted(list(self.phase2_generating_users))}")
            print(f"Non-generating users: {sorted(list(self.phase2_non_generating_users))}")

        else:  # phase == 3
            # 第三阶段：7个用户产生任务
            # 规则：第二阶段中未产生任务的4个用户在第三阶段中都会产生任务
            # 再从第二阶段产生任务的11个用户中随机选择3个

            # 确保第二阶段的非任务生成用户都参与第三阶段
            phase3_users = set(self.phase2_non_generating_users)  # 4个用户

            # 从第二阶段的任务生成用户中随机选择3个
            additional_users = np.random.choice(list(self.phase2_generating_users), 3, replace=False)
            phase3_users.update(additional_users)

            # 设置第三阶段的任务生成用户
            self.task_generating_users = np.zeros(NUM_USERS, dtype=bool)
            for user_id in phase3_users:
                self.task_generating_users[user_id] = True

            print(f"Phase {phase}: {len(phase3_users)} users are generating tasks")
            print(f"Task generating users: {sorted(list(phase3_users))}")
            print(f"From phase 2 non-generating: {sorted(list(self.phase2_non_generating_users))}")
            print(f"From phase 2 generating: {sorted(list(additional_users))}")

        print(f"Final task generating users for phase {phase}: {np.where(self.task_generating_users)[0]}")
        print("-" * 50)

    def reset(self):
        # 重置UAV位置到中心点
        self.uav_position = np.array([AREA_SIZE / 2, AREA_SIZE / 2], dtype=float)

        # 重置任务收集状态
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)

        # 重置步数
        self.step_count = 0

        # 重置总延迟和能耗
        self.total_delay = 0
        self.total_energy = 0

        # 重置轨迹
        self.trajectory = [self.uav_position.copy()]

        # 重置上一次的距离
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])

        # 重置观测历史
        self.observation_history = deque(maxlen=SEQUENCE_LENGTH)

        # 初始状态
        state = self._get_state()

        # 填充观测历史
        for _ in range(SEQUENCE_LENGTH):
            self.observation_history.append(state)

        return self._get_gru_state()

    def step(self, action):
        # 更新UAV位置 (action是相对移动，范围[-1,1])
        action = np.clip(action, -1, 1)
        movement = action * UAV_SPEED
        prev_position = self.uav_position.copy()
        self.uav_position += movement
        self.uav_position = np.clip(self.uav_position, 0, AREA_SIZE)

        # 记录轨迹
        self.trajectory.append(self.uav_position.copy())

        # 计算移动距离和能耗
        distance_moved = np.linalg.norm(self.uav_position - prev_position)
        energy_consumed = distance_moved * UAV_ENERGY_PER_METER

        # 计算与所有用户的新距离
        new_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])

        # 收集任务
        newly_collected = 0
        collected_indices = []

        for i in range(NUM_USERS):
            # 只收集生成任务的用户的任务
            if self.task_generating_users[i] and not self.collected_tasks[i]:
                if new_distances[i] <= MAX_DISTANCE_COLLECT:
                    self.collected_tasks[i] = True
                    newly_collected += 1
                    collected_indices.append(i)

                    # 计算任务延迟
                    delay = new_distances[i] * self.task_sizes[i] / 10
                    self.total_delay += delay

                    # 悬停能耗
                    energy_consumed += UAV_HOVER_ENERGY

        # 累计总能耗
        self.total_energy += energy_consumed

        # 更新步数
        self.step_count += 1

        # 计算奖励
        reward = self._calculate_reward(newly_collected, energy_consumed, collected_indices, new_distances,
                                        self.last_distances)

        # 更新上一次的距离
        self.last_distances = new_distances

        # 判断是否结束 - 收集完所有任务生成用户的任务或达到最大步数
        total_tasks_to_collect = sum(self.task_generating_users)
        collected_required_tasks = sum(self.collected_tasks & self.task_generating_users)
        done = (self.step_count >= MAX_STEPS) or (collected_required_tasks == total_tasks_to_collect)

        # 获取当前状态
        state = self._get_state()

        # 更新观测历史
        self.observation_history.append(state)

        return self._get_gru_state(), reward, done, {
            "collected": sum(self.collected_tasks),
            "collected_required": collected_required_tasks,
            "total_required": total_tasks_to_collect,
            "energy": self.total_energy,
            "delay": self.total_delay,
            "newly_collected": newly_collected,
            "total_users": NUM_USERS
        }

    def _get_state(self):
        # 状态表示: UAV位置, 与每个用户的距离, 任务收集状态, 任务生成状态, 归一化步数
        state = np.zeros(2 + NUM_USERS * 3 + 1)

        # UAV位置 (归一化)
        state[0:2] = self.uav_position / AREA_SIZE

        # 与每个用户的距离, 任务收集状态和任务生成状态
        for i in range(NUM_USERS):
            dist = np.linalg.norm(self.uav_position - self.user_positions[i])
            idx = 2 + i * 3
            state[idx] = dist / np.sqrt(2 * AREA_SIZE ** 2)  # 归一化距离
            state[idx + 1] = float(self.collected_tasks[i])
            state[idx + 2] = float(self.task_generating_users[i])

        # 归一化步数
        state[-1] = self.step_count / MAX_STEPS

        return state

    def _get_gru_state(self):
        """返回用于GRU的序列状态"""
        # 确保观测历史已满
        while len(self.observation_history) < SEQUENCE_LENGTH:
            self.observation_history.append(self._get_state())

        return np.array(list(self.observation_history))

    def _calculate_reward(self, newly_collected, energy_consumed, collected_indices, new_distances, old_distances):
        # --- 核心修改 ---
        # 1. 增加一个固定的时间/步数惩罚，鼓励效率
        time_penalty = 0.1  # 每走一步就有一个小的惩罚

        # 2. 将 progress_reward 的概念融入 collection_reward
        # 当收集到新任务时，给予更大的奖励
        collected_required = sum(self.collected_tasks & self.task_generating_users)
        total_required = sum(self.task_generating_users)

        # 基础收集奖励保持不变
        collection_reward = newly_collected * 20

        # 当收集到任务时，给予额外的进度奖励
        # 任务越往后，完成的价值越大
        if newly_collected > 0 and total_required > 0:
            progress_bonus = (collected_required / total_required) * 15  # 例如，收集第9个任务(共10个)时，奖励 0.9*15
            collection_reward += progress_bonus

        # 能耗惩罚 - 可以稍微加大一点点，让长途飞行代价更高
        energy_penalty = energy_consumed * 0.8  # 从 0.5 增加到 0.8

        # 接近​未收集任务的奖励 (基本保持，但权重可以调整)
        proximity_reward = 0
        uncollected_tasks_count = 0
        for i in range(NUM_USERS):
            if self.task_generating_users[i] and not self.collected_tasks[i]:
                uncollected_tasks_count += 1
                dist_diff = old_distances[i] - new_distances[i]
                # 简化 proximity_reward, 使其更专注于"接近"这件事本身
                # 移除复杂的 progress_factor，因为进度奖励已经改为一次性事件
                proximity_factor = max(0, 1 - (new_distances[i] / (AREA_SIZE)) ** 2)  # 用区域大小归一化
                proximity_reward += dist_diff * 0.5 * proximity_factor  # 降低权重，避免其主导奖励

        # 如果没有未收集任务，不提供接近奖励
        if uncollected_tasks_count == 0:
            proximity_reward = 0

        # 4. 最终完成奖励，这个值必须足够大，是智能体的"终极目标"
        completion_reward = 0
        if total_required > 0 and collected_required == total_required:
            # 完成奖励 = 一个巨大的正数 - (步数 * 步数惩罚因子)，鼓励用更少的步数完成
            completion_reward = 200 - self.step_count * 0.2

        # --- 移除有问题的奖励项 ---
        # 移除原有的 progress_reward 和 efficiency_penalty，因为它们已被新的机制替代且容易导致问题
        # efficiency_penalty 逻辑复杂，可能引入不必要的干扰，先简化掉

        # 综合奖励
        # reward = collection_reward + proximity_reward + completion_reward - energy_penalty - time_penalty
        # print(f"Step {self.step_count}: C_Rew={collection_reward:.2f}, P_Rew={proximity_reward:.2f}, Comp_Rew={completion_reward:.2f}, E_Pen={energy_penalty:.2f}, T_Pen={time_penalty:.2f}")

        reward = collection_reward + proximity_reward + completion_reward - energy_penalty - time_penalty

        # 奖励缩放
        return reward * REWARD_SCALE

    def render(self, episode=0, clear_output=True):
        """可视化当前环境状态"""
        plt.figure(figsize=(10, 10))

        # 绘制用户位置和任务状态
        for i, pos in enumerate(self.user_positions):
            if self.task_generating_users[i]:
                # 生成任务的用户
                if self.collected_tasks[i]:
                    color = 'green'  # 已收集
                else:
                    color = 'red'  # 未收集
            else:
                # 不生成任务的用户
                color = 'gray'  # 灰色表示不生成任务

            plt.scatter(pos[0], pos[1], s=100, c=color)
            plt.annotate(f"{i + 1}", (pos[0], pos[1]), fontsize=12)

        # 绘制UAV当前位置和轨迹
        trajectory = np.array(self.trajectory)
        plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', alpha=0.5)
        plt.scatter(self.uav_position[0], self.uav_position[1], s=200, c='blue', marker='*')

        # 绘制收集范围
        circle = plt.Circle((self.uav_position[0], self.uav_position[1]),
                            MAX_DISTANCE_COLLECT, color='blue', fill=False, alpha=0.3)
        plt.gca().add_patch(circle)

        plt.xlim(0, AREA_SIZE)
        plt.ylim(0, AREA_SIZE)

        # 添加任务状态信息
        title = f"Episode {episode}, Step {self.step_count}\n"
        title += f"收集: {sum(self.collected_tasks & self.task_generating_users)}/{sum(self.task_generating_users)} 任务"
        plt.title(title)
        plt.grid(True)

        plt.savefig(f" results/step_{episode}_{self.step_count}.png")
        plt.close()


class GRUActor(nn.Module):
    def __init__(self, state_dim, action_dim, max_action):
        super(GRUActor, self).__init__()

        self.state_dim = state_dim
        self.seq_len = SEQUENCE_LENGTH
        self.hidden_size = HIDDEN_SIZE
        self.action_dim = action_dim
        self.max_action = max_action

        # GRU层
        self.gru = nn.GRU(
            input_size=state_dim,
            hidden_size=self.hidden_size,
            num_layers=1,
            batch_first=True
        )

        # 全连接层 - 输出动作均值
        self.layer1 = nn.Linear(self.hidden_size, 256)
        self.layer2 = nn.Linear(256, 128)
        self.layer3 = nn.Linear(128, action_dim)

        self.ln1 = nn.LayerNorm(256)
        self.ln2 = nn.LayerNorm(128)

        # 动作标准差（可学习参数）
        self.log_std = nn.Parameter(torch.zeros(action_dim))

        # 存储GRU隐藏状态
        self.hidden = None

        # 初始化网络权重
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0.0)

    def forward(self, state, reset_hidden=False):
        # 输入形状: [batch_size, seq_len, state_dim]

        # 如果需要重置隐藏状态或隐藏状态不存在
        if reset_hidden or self.hidden is None:
            self.reset_hidden(state.size(0))

        # GRU处理序列
        gru_out, self.hidden = self.gru(state, self.hidden)

        # 我们只使用序列中的最后一个输出
        x = gru_out[:, -1]

        # 全连接层
        x = self.ln1(torch.relu(self.layer1(x)))
        x = self.ln2(torch.relu(self.layer2(x)))
        action_mean = torch.tanh(self.layer3(x)) * self.max_action

        # 动作标准差
        action_std = torch.exp(self.log_std)

        return action_mean, action_std

    def get_action_and_log_prob(self, state, reset_hidden=False):
        action_mean, action_std = self.forward(state, reset_hidden)
        dist = Normal(action_mean, action_std)
        action = dist.sample()
        log_prob = dist.log_prob(action).sum(dim=-1)
        return action, log_prob

    def evaluate_actions(self, state, action, reset_hidden=False):
        action_mean, action_std = self.forward(state, reset_hidden)
        dist = Normal(action_mean, action_std)
        log_prob = dist.log_prob(action).sum(dim=-1)
        entropy = dist.entropy().sum(dim=-1)
        return log_prob, entropy

    def reset_hidden(self, batch_size=1):
        """重置GRU的隐藏状态"""
        self.hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)


class GRUCritic(nn.Module):
    def __init__(self, state_dim):
        super(GRUCritic, self).__init__()

        self.state_dim = state_dim
        self.seq_len = SEQUENCE_LENGTH
        self.hidden_size = HIDDEN_SIZE

        # GRU层
        self.gru = nn.GRU(
            input_size=state_dim,
            hidden_size=self.hidden_size,
            num_layers=1,
            batch_first=True
        )

        # 价值网络
        self.layer1 = nn.Linear(self.hidden_size, 256)
        self.layer2 = nn.Linear(256, 128)
        self.layer3 = nn.Linear(128, 1)

        self.ln1 = nn.LayerNorm(256)
        self.ln2 = nn.LayerNorm(128)

        # 存储GRU隐藏状态
        self.hidden = None

        # 初始化网络权重
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0.01)

    def forward(self, state, reset_hidden=False):
        # 状态输入形状: [batch_size, seq_len, state_dim]

        # 如果需要重置隐藏状态或隐藏状态不存在
        if reset_hidden or self.hidden is None:
            self.reset_hidden(state.size(0))

        # GRU处理序列
        gru_out, self.hidden = self.gru(state, self.hidden)

        # 提取序列中的最后一个输出
        x = gru_out[:, -1]

        # 价值网络
        x = self.ln1(torch.relu(self.layer1(x)))
        x = self.ln2(torch.relu(self.layer2(x)))
        value = self.layer3(x)

        return value.squeeze(-1)

    def reset_hidden(self, batch_size=1):
        """重置GRU的隐藏状态"""
        self.hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)


class PPOBuffer:
    def __init__(self, buffer_size, state_dim, action_dim):
        self.buffer_size = buffer_size
        self.ptr = 0
        self.size = 0

        self.states = np.zeros((buffer_size, SEQUENCE_LENGTH, state_dim), dtype=np.float32)
        self.actions = np.zeros((buffer_size, action_dim), dtype=np.float32)
        self.rewards = np.zeros(buffer_size, dtype=np.float32)
        self.values = np.zeros(buffer_size, dtype=np.float32)
        self.log_probs = np.zeros(buffer_size, dtype=np.float32)
        self.dones = np.zeros(buffer_size, dtype=np.float32)

        self.advantages = np.zeros(buffer_size, dtype=np.float32)
        self.returns = np.zeros(buffer_size, dtype=np.float32)

    def add(self, state, action, reward, value, log_prob, done):
        self.states[self.ptr] = state
        self.actions[self.ptr] = action
        self.rewards[self.ptr] = reward
        self.values[self.ptr] = value
        self.log_probs[self.ptr] = log_prob
        self.dones[self.ptr] = done

        self.ptr = (self.ptr + 1) % self.buffer_size
        self.size = min(self.size + 1, self.buffer_size)

    def compute_gae(self, last_value):
        """计算GAE优势估计"""
        self.advantages.fill(0)
        self.returns.fill(0)

        gae = 0
        for t in reversed(range(self.size)):
            if t == self.size - 1:
                next_value = last_value
                next_done = 0
            else:
                next_value = self.values[t + 1]
                next_done = self.dones[t + 1]

            delta = self.rewards[t] + GAMMA * next_value * (1 - next_done) - self.values[t]
            gae = delta + GAMMA * GAE_LAMBDA * (1 - next_done) * gae
            self.advantages[t] = gae
            self.returns[t] = gae + self.values[t]

    def get_batch(self):
        """获取所有数据"""
        # 标准化优势
        self.advantages = (self.advantages - self.advantages.mean()) / (self.advantages.std() + 1e-8)

        return (torch.FloatTensor(self.states[:self.size]),
                torch.FloatTensor(self.actions[:self.size]),
                torch.FloatTensor(self.advantages[:self.size]),
                torch.FloatTensor(self.returns[:self.size]),
                torch.FloatTensor(self.log_probs[:self.size]))

    def clear(self):
        self.ptr = 0
        self.size = 0

    def __len__(self):
        return self.size


class EWC:
    def __init__(self, model, fisher_sample_size=FISHER_SAMPLE_SIZE):
        self.model = model
        self.fisher_sample_size = fisher_sample_size
        self.importance = {}
        self.old_params = {}
        self.task_importances = {}  # 存储多个任务的重要性

    def _calculate_fisher_info(self, buffer):
        """改进的Fisher信息矩阵计算"""
        fisher = {}
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                fisher[name] = torch.zeros_like(param).to(device)

        if len(buffer) == 0:
            return fisher

        # 增加采样数量和多样性
        samples_count = min(self.fisher_sample_size, len(buffer))

        self.model.train()

        # 从缓冲区获取数据
        states, actions, _, returns, _ = buffer.get_batch()
        states = states.to(device)
        actions = actions.to(device)
        returns = returns.to(device)

        # 随机采样
        indices = torch.randperm(states.size(0))[:samples_count]
        sample_states = states[indices]
        sample_actions = actions[indices]

        for i in range(samples_count):
            state = sample_states[i:i + 1]
            action = sample_actions[i:i + 1]

            self.model.zero_grad()

            if isinstance(self.model, GRUActor):
                self.model.reset_hidden(1)
                log_probs, _ = self.model.evaluate_actions(state, action)
                log_likelihood = log_probs.sum()
            else:  # Critic
                self.model.reset_hidden(1)
                values = self.model(state)
                log_likelihood = -F.mse_loss(values, returns[indices[i:i + 1]].to(device))

            # 计算梯度
            log_likelihood.backward()

            # 累加梯度的平方
            for name, param in self.model.named_parameters():
                if param.requires_grad and param.grad is not None:
                    fisher[name] += param.grad.pow(2) / samples_count

        return fisher

    def calculate_ewc_loss(self, lam=EWC_LAMBDA):
        """计算多任务EWC损失"""
        loss = 0

        # 对所有之前的任务计算损失
        for task_id, (old_params, importance) in self.task_importances.items():
            task_loss = 0
            for name, param in self.model.named_parameters():
                if name in old_params and name in importance and param.requires_grad:
                    # 增加重要性权重
                    importance_weight = importance[name] + 1e-8  # 避免除零
                    param_diff = (param - old_params[name]).pow(2)
                    task_loss += torch.sum(importance_weight * param_diff)
            loss += task_loss

        return lam * loss

    def store_task_parameters(self, task_id, buffer):
        """存储任务参数，支持多任务"""
        print(f"Storing parameters for task {task_id}")

        # 存储当前参数
        old_params = {}
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                old_params[name] = param.data.clone()

        # 计算Fisher信息矩阵
        importance = self._calculate_fisher_info(buffer)

        # 存储到任务字典中
        self.task_importances[task_id] = (old_params, importance)

        print(f"Stored parameters for {len(old_params)} layers")


class PPO:
    """PPO算法和弹性权重巩固(EWC)的结合"""

    def __init__(self, state_dim, action_dim, max_action, use_ewc=True):
        self.actor = GRUActor(state_dim, action_dim, max_action).to(device)
        self.critic = GRUCritic(state_dim).to(device)

        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=ACTOR_LR)
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=CRITIC_LR)

        self.max_action = max_action
        self.buffer = PPOBuffer(BUFFER_SIZE, state_dim, action_dim)
        self.use_ewc = use_ewc
        self.adaptive_ewc_lambda = EWC_LAMBDA

        # EWC相关组件
        if self.use_ewc:
            self.ewc_actor = EWC(self.actor)
            self.ewc_critic = EWC(self.critic)
        self.current_task = 1  # 初始任务ID

        # 学习率调度器
        self.actor_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.actor_optimizer, mode='max', factor=0.5, patience=100, verbose=True
        )
        self.critic_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.critic_optimizer, mode='max', factor=0.5, patience=100, verbose=True
        )

    def select_action(self, state, deterministic=False):
        # 确保状态是GRU所需的序列格式 [batch, seq_len, feature]
        if len(state.shape) == 2:
            state = np.expand_dims(state, 0)  # 添加batch维度

        state = torch.FloatTensor(state).to(device)

        # 显式重置隐藏状态为批次大小1
        self.actor.reset_hidden(1)
        self.critic.reset_hidden(1)

        with torch.no_grad():
            if deterministic:
                action_mean, _ = self.actor(state)
                action = action_mean
                log_prob = torch.zeros(1)
                value = self.critic(state)
            else:
                action, log_prob = self.actor.get_action_and_log_prob(state)
                value = self.critic(state)

        return action.cpu().numpy().flatten(), log_prob.cpu().numpy().flatten(), value.cpu().numpy().flatten()

    def switch_task(self, task_id):
        """切换到新任务"""
        print(f"\nSwitching to task {task_id} (EWC: {self.use_ewc})")

        # 存储旧任务参数和计算Fisher信息矩阵
        if self.use_ewc and self.current_task > 0 and len(self.buffer) > 0:
            self.ewc_actor.store_task_parameters(self.current_task, self.buffer)
            self.ewc_critic.store_task_parameters(self.current_task, self.buffer)

        # 为新任务清空缓冲区，避免数据污染
        print(f"Clearing buffer for new task.")
        self.buffer.clear()

        # 更新当前任务
        self.current_task = task_id

        # 重置Actor和Critic的GRU状态
        self.actor.reset_hidden()
        self.critic.reset_hidden()

        print(f"Reset GRU states for new task {task_id}")

    def update(self):
        """PPO更新"""
        if len(self.buffer) < BUFFER_SIZE:
            return {"actor_loss": 0, "critic_loss": 0, "entropy": 0}

        # 计算最后一个状态的价值（用于GAE）
        last_state = torch.FloatTensor(self.buffer.states[self.buffer.size - 1:self.buffer.size]).to(device)
        self.critic.reset_hidden(1)
        with torch.no_grad():
            last_value = self.critic(last_state).item()

        # 计算GAE
        self.buffer.compute_gae(last_value)

        # 获取批次数据
        states, actions, advantages, returns, old_log_probs = self.buffer.get_batch()
        states = states.to(device)
        actions = actions.to(device)
        advantages = advantages.to(device)
        returns = returns.to(device)
        old_log_probs = old_log_probs.to(device)

        # PPO更新
        total_actor_loss = 0
        total_critic_loss = 0
        total_entropy = 0

        for epoch in range(PPO_EPOCHS):
            # 随机打乱数据
            indices = torch.randperm(states.size(0))

            for start in range(0, states.size(0), MINI_BATCH_SIZE):
                end = start + MINI_BATCH_SIZE
                batch_indices = indices[start:end]

                batch_states = states[batch_indices]
                batch_actions = actions[batch_indices]
                batch_advantages = advantages[batch_indices]
                batch_returns = returns[batch_indices]
                batch_old_log_probs = old_log_probs[batch_indices]

                # 重置隐藏状态
                self.actor.reset_hidden(batch_states.size(0))
                self.critic.reset_hidden(batch_states.size(0))

                # 计算当前策略的log_prob和价值
                new_log_probs, entropy = self.actor.evaluate_actions(batch_states, batch_actions)
                values = self.critic(batch_states)

                # PPO策略损失
                ratio = torch.exp(new_log_probs - batch_old_log_probs)
                surr1 = ratio * batch_advantages
                surr2 = torch.clamp(ratio, 1 - CLIP_RATIO, 1 + CLIP_RATIO) * batch_advantages
                actor_loss = -torch.min(surr1, surr2).mean()

                # 价值损失
                value_clipped = values + torch.clamp(values - values, -VALUE_CLIP_RATIO, VALUE_CLIP_RATIO)
                value_loss1 = F.mse_loss(values, batch_returns)
                value_loss2 = F.mse_loss(value_clipped, batch_returns)
                critic_loss = torch.max(value_loss1, value_loss2)

                # 熵损失
                entropy_loss = entropy.mean()

                # 添加EWC正则化
                if self.use_ewc and self.current_task > 1:
                    # 根据任务数量调整EWC强度
                    task_factor = min(self.current_task * 0.5, 2.0)
                    adaptive_lambda = self.adaptive_ewc_lambda * task_factor

                    actor_ewc_loss = self.ewc_actor.calculate_ewc_loss(adaptive_lambda)
                    critic_ewc_loss = self.ewc_critic.calculate_ewc_loss(adaptive_lambda)

                    actor_loss += actor_ewc_loss
                    critic_loss += critic_ewc_loss

                # Actor更新
                total_actor_loss_batch = actor_loss - ENTROPY_COEFF * entropy_loss
                self.actor_optimizer.zero_grad()
                total_actor_loss_batch.backward()
                torch.nn.utils.clip_grad_norm_(self.actor.parameters(), MAX_GRAD_NORM)
                self.actor_optimizer.step()

                # Critic更新
                self.critic_optimizer.zero_grad()
                critic_loss.backward()
                torch.nn.utils.clip_grad_norm_(self.critic.parameters(), MAX_GRAD_NORM)
                self.critic_optimizer.step()

                total_actor_loss += actor_loss.item()
                total_critic_loss += critic_loss.item()
                total_entropy += entropy_loss.item()

        # 清空缓冲区
        self.buffer.clear()

        num_updates = PPO_EPOCHS * (states.size(0) // MINI_BATCH_SIZE)
        return {
            "actor_loss": total_actor_loss / num_updates,
            "critic_loss": total_critic_loss / num_updates,
            "entropy": total_entropy / num_updates
        }

    def update_lr_schedulers(self, reward):
        """更新学习率调度器"""
        self.actor_scheduler.step(reward)
        self.critic_scheduler.step(reward)


def train():
    """训练PPO智能体"""

    # 创建保存结果的目录
    os.makedirs(" results", exist_ok=True)

    # 初始化环境
    env = Environment()

    # 计算状态和动作维度
    state_dim = 2 + NUM_USERS * 3 + 1  # UAV位置(2) + 用户信息(3*NUM_USERS) + 步数(1)
    action_dim = 2
    max_action = 1

    # 初始化两个PPO智能体：有EWC和无EWC
    agent_with_ewc = PPO(state_dim, action_dim, max_action, use_ewc=True)
    agent_without_ewc = PPO(state_dim, action_dim, max_action, use_ewc=False)

    # 训练参数
    total_episodes = 600  # 总共600轮
    episodes_per_task = 200  # 每个任务200轮
    eval_freq = 50  # 每50轮次生成一次图表

    # 记录训练过程 - 分别记录两个智能体
    rewards_history_ewc = []
    rewards_history_no_ewc = []
    smoothed_rewards_ewc = []
    smoothed_rewards_no_ewc = []
    best_reward_ewc = -float('inf')
    best_reward_no_ewc = -float('inf')

    start_time = time.time()

    for phase in range(1, 4):
        # 更新任务生成状态
        env.update_task_generating_users(phase)

        # 切换任务
        agent_with_ewc.switch_task(phase)
        agent_without_ewc.switch_task(phase)

        for episode in range(1, episodes_per_task + 1):
            global_episode = (phase - 1) * episodes_per_task + episode

            # 训练有EWC的智能体
            state = env.reset()
            episode_reward_ewc = 0

            for step in range(1, MAX_STEPS + 1):
                action, log_prob, value = agent_with_ewc.select_action(state)
                next_state, reward, done, info = env.step(action)
                agent_with_ewc.buffer.add(state, action, reward, value[0], log_prob[0], done)
                state = next_state
                episode_reward_ewc += reward
                if done:
                    break

            # PPO更新（当缓冲区满时）
            if len(agent_with_ewc.buffer) >= BUFFER_SIZE:
                train_info = agent_with_ewc.update()

            # 训练无EWC的智能体
            state = env.reset()
            episode_reward_no_ewc = 0

            for step in range(1, MAX_STEPS + 1):
                action, log_prob, value = agent_without_ewc.select_action(state)
                next_state, reward, done, info = env.step(action)
                agent_without_ewc.buffer.add(state, action, reward, value[0], log_prob[0], done)
                state = next_state
                episode_reward_no_ewc += reward
                if done:
                    break

            # PPO更新（当缓冲区满时）
            if len(agent_without_ewc.buffer) >= BUFFER_SIZE:
                agent_without_ewc.update()

            # 记录历史数据
            rewards_history_ewc.append(episode_reward_ewc)
            rewards_history_no_ewc.append(episode_reward_no_ewc)

            # 平滑奖励曲线
            if len(rewards_history_ewc) >= 10:
                smoothed_rewards_ewc.append(np.mean(rewards_history_ewc[-10:]))
                smoothed_rewards_no_ewc.append(np.mean(rewards_history_no_ewc[-10:]))
            else:
                smoothed_rewards_ewc.append(episode_reward_ewc)
                smoothed_rewards_no_ewc.append(episode_reward_no_ewc)

            # 更新最佳结果
            if episode_reward_ewc > best_reward_ewc:
                best_reward_ewc = episode_reward_ewc
                torch.save(agent_with_ewc.actor.state_dict(), f" results/best_actor_ewc_phase_{phase}.pth")

            if episode_reward_no_ewc > best_reward_no_ewc:
                best_reward_no_ewc = episode_reward_no_ewc
                torch.save(agent_without_ewc.actor.state_dict(), f" results/best_actor_no_ewc_phase_{phase}.pth")

            # 更新学习率调度器
            agent_with_ewc.update_lr_schedulers(episode_reward_ewc)
            agent_without_ewc.update_lr_schedulers(episode_reward_no_ewc)

            # 打印训练信息
            elapsed_time = time.time() - start_time
            print(f"Phase: {phase} | Episode: {episode}/{episodes_per_task} | "
                  f"Global Episode: {global_episode}/{total_episodes} | "
                  f"Reward EWC: {episode_reward_ewc:.2f} | "
                  f"Reward No-EWC: {episode_reward_no_ewc:.2f} | "
                  f"Time: {elapsed_time:.2f}s")

            # 每隔一定轮次生成图表
            if global_episode % eval_freq == 0 or global_episode == total_episodes:
                plt.figure(figsize=(15, 5))

                # 奖励曲线对比
                plt.subplot(1, 2, 1)
                plt.plot(rewards_history_ewc, alpha=0.3, color='blue', label='EWC Raw')
                plt.plot(smoothed_rewards_ewc, color='blue', linewidth=2, label='EWC Smoothed')
                plt.plot(rewards_history_no_ewc, alpha=0.3, color='red', label='No-EWC Raw')
                plt.plot(smoothed_rewards_no_ewc, color='red', linewidth=2, label='No-EWC Smoothed')
                plt.axvline(x=episodes_per_task, color='green', linestyle='--', label='Phase 1->2')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--', label='Phase 2->3')
                plt.title("奖励对比 (EWC vs No-EWC)")
                plt.xlabel("Episode")
                plt.ylabel("Reward")
                plt.legend()
                plt.grid(True)

                # 生成轨迹图（使用有EWC的智能体）
                plt.subplot(1, 2, 2)

                # 测试有EWC的智能体并生成轨迹
                state = env.reset()
                trajectory = [env.uav_position.copy()]

                for step in range(1, MAX_STEPS + 1):
                    action, _, _ = agent_with_ewc.select_action(state, deterministic=True)
                    next_state, reward, done, info = env.step(action)
                    trajectory.append(env.uav_position.copy())
                    state = next_state
                    if done:
                        break

                trajectory = np.array(trajectory)

                # 绘制用户位置
                for i, pos in enumerate(env.user_positions):
                    if env.task_generating_users[i]:
                        color = 'green' if env.collected_tasks[i] else 'red'
                    else:
                        color = 'gray'
                    plt.scatter(pos[0], pos[1], s=100, c=color)
                    plt.annotate(f"{i + 1}", (pos[0], pos[1]), fontsize=10)

                # 绘制UAV轨迹
                plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', alpha=0.7, label='UAV轨迹')
                plt.scatter(trajectory[0, 0], trajectory[0, 1], s=200, c='blue', marker='^', label='起点')
                plt.scatter(trajectory[-1, 0], trajectory[-1, 1], s=200, c='purple', marker='*', label='终点')

                plt.title(f"UAV轨迹 (Episode {global_episode})")
                plt.xlabel("X坐标 (m)")
                plt.ylabel("Y坐标 (m)")
                plt.xlim(0, AREA_SIZE)
                plt.ylim(0, AREA_SIZE)
                plt.grid(True)
                plt.legend()

                plt.tight_layout()
                plt.savefig(f" results/comparison_episode_{global_episode}.png")
                plt.close()

                # 保存检查点
                torch.save({
                    'actor_ewc_state_dict': agent_with_ewc.actor.state_dict(),
                    'actor_no_ewc_state_dict': agent_without_ewc.actor.state_dict(),
                    'episode': global_episode,
                    'phase': phase,
                    'rewards_history_ewc': rewards_history_ewc,
                    'rewards_history_no_ewc': rewards_history_no_ewc,
                    'best_reward_ewc': best_reward_ewc,
                    'best_reward_no_ewc': best_reward_no_ewc
                }, f" results/checkpoint_episode_{global_episode}.pt")

        # 每个任务阶段结束后保存模型
        torch.save(agent_with_ewc.actor.state_dict(), f" results/actor_ewc_phase_{phase}.pth")
        torch.save(agent_without_ewc.actor.state_dict(), f" results/actor_no_ewc_phase_{phase}.pth")

    print(f"Training completed!")
    print(f"Best result with EWC: {best_reward_ewc:.2f}")
    print(f"Best result without EWC: {best_reward_no_ewc:.2f}")

    return agent_with_ewc, agent_without_ewc, env


def test_and_visualize(agent_ewc, agent_no_ewc, env, phase=3):
    """测试训练好的模型并生成可视化结果"""

    # 加载最佳模型
    agent_ewc.actor.load_state_dict(torch.load(f" results/best_actor_ewc_phase_{phase}.pth"))
    agent_no_ewc.actor.load_state_dict(torch.load(f" results/best_actor_no_ewc_phase_{phase}.pth"))
    agent_ewc.actor.eval()
    agent_no_ewc.actor.eval()

    # 设置环境阶段
    env.update_task_generating_users(phase)

    def test_agent(agent, agent_name):
        # 重置环境
        state = env.reset()

        total_reward = 0
        trajectory = [env.uav_position.copy()]

        for step in range(1, MAX_STEPS + 1):
            action, _, _ = agent.select_action(state, deterministic=True)
            trajectory.append(env.uav_position.copy())
            next_state, reward, done, info = env.step(action)
            total_reward += reward
            state = next_state
            if done:
                break

        return total_reward, np.array(trajectory), info

    # 测试两个智能体
    reward_ewc, trajectory_ewc, info_ewc = test_agent(agent_ewc, "EWC")
    reward_no_ewc, trajectory_no_ewc, info_no_ewc = test_agent(agent_no_ewc, "No-EWC")

    # 绘制对比轨迹图
    plt.figure(figsize=(15, 6))

    # EWC智能体轨迹
    plt.subplot(1, 2, 1)
    env.update_task_generating_users(phase)
    state = env.reset()
    reward_ewc, trajectory_ewc, info_ewc = test_agent(agent_ewc, "EWC")

    for i, pos in enumerate(env.user_positions):
        if env.task_generating_users[i]:
            color = 'green' if env.collected_tasks[i] else 'red'
        else:
            color = 'gray'
        plt.scatter(pos[0], pos[1], s=100, c=color)
        plt.annotate(f"{i + 1}", (pos[0], pos[1]), fontsize=10)

    plt.plot(trajectory_ewc[:, 0], trajectory_ewc[:, 1], 'b-', alpha=0.7)
    plt.scatter(trajectory_ewc[0, 0], trajectory_ewc[0, 1], s=200, c='blue', marker='^')
    plt.scatter(trajectory_ewc[-1, 0], trajectory_ewc[-1, 1], s=200, c='purple', marker='*')
    plt.title(
        f"有EWC智能体轨迹\n收集: {sum(env.collected_tasks & env.task_generating_users)}/{sum(env.task_generating_users)}, 奖励: {reward_ewc:.2f}")
    plt.xlabel("X坐标 (m)")
    plt.ylabel("Y坐标 (m)")
    plt.xlim(0, AREA_SIZE)
    plt.ylim(0, AREA_SIZE)
    plt.grid(True)

    # 无EWC智能体轨迹
    plt.subplot(1, 2, 2)
    env.update_task_generating_users(phase)
    state = env.reset()
    reward_no_ewc, trajectory_no_ewc, info_no_ewc = test_agent(agent_no_ewc, "No-EWC")

    for i, pos in enumerate(env.user_positions):
        if env.task_generating_users[i]:
            color = 'green' if env.collected_tasks[i] else 'red'
        else:
            color = 'gray'
        plt.scatter(pos[0], pos[1], s=100, c=color)
        plt.annotate(f"{i + 1}", (pos[0], pos[1]), fontsize=10)

    plt.plot(trajectory_no_ewc[:, 0], trajectory_no_ewc[:, 1], 'r-', alpha=0.7)
    plt.scatter(trajectory_no_ewc[0, 0], trajectory_no_ewc[0, 1], s=200, c='blue', marker='^')
    plt.scatter(trajectory_no_ewc[-1, 0], trajectory_no_ewc[-1, 1], s=200, c='purple', marker='*')
    plt.title(
        f"无EWC智能体轨迹\n收集: {sum(env.collected_tasks & env.task_generating_users)}/{sum(env.task_generating_users)}, 奖励: {reward_no_ewc:.2f}")
    plt.xlabel("X坐标 (m)")
    plt.ylabel("Y坐标 (m)")
    plt.xlim(0, AREA_SIZE)
    plt.ylim(0, AREA_SIZE)
    plt.grid(True)

    plt.tight_layout()
    plt.savefig(f" results/final_comparison_phase_{phase}.png")
    plt.close()

    # 打印测试结果
    print(f"\n测试结果对比 (阶段 {phase}):")
    print(f"有EWC智能体 - 奖励: {reward_ewc:.2f}")
    print(f"无EWC智能体 - 奖励: {reward_no_ewc:.2f}")
    print(f"EWC优势: {reward_ewc - reward_no_ewc:.2f}")


if __name__ == "__main__":
    # 训练智能体
    agent_ewc, agent_no_ewc, env = train()

    # 测试各阶段的性能
    for phase in range(1, 4):
        print(f"\n测试阶段 {phase} 的模型性能对比:")
        test_and_visualize(agent_ewc, agent_no_ewc, env, phase=phase)
