import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Normal
import matplotlib.pyplot as plt
from collections import deque
import random
import warnings
import os

warnings.filterwarnings('ignore')
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False


class UAVTaskEnvironment:
    def __init__(self, area_size=100, num_users=10, max_battery=1000.0):
        self.area_size = area_size
        self.num_users = num_users
        self.max_battery = max_battery
        self.max_velocity = 12.0  # 降低最大飞行速度
        self.service_range = 12.0  # 增大服务范围，便于收集任务
        self.mec_position = np.array([50.0, 50.0])  # MEC服务器位置
        self.reset()

    def reset(self):
        # 无人机从左下角出发
        self.uav_position = np.array([10.0, 10.0], dtype=np.float32)
        self.uav_velocity = np.zeros(2, dtype=np.float32)
        self.battery_level = self.max_battery

        # 生成用户位置，确保分布更合理
        self.user_positions = []
        self.user_tasks = []
        self.tasks_collected = []  # 改为collected而不是completed
        self.tasks_completed = []  # 实际完成状态

        # 分区域生成用户，确保分布均匀
        grid_size = 3
        cell_size = (self.area_size - 20) / grid_size
        used_cells = set()

        for i in range(self.num_users):
            while True:
                # 随机选择网格
                grid_x = np.random.randint(0, grid_size)
                grid_y = np.random.randint(0, grid_size)

                if (grid_x, grid_y) not in used_cells or len(used_cells) >= grid_size * grid_size:
                    # 在网格内随机生成位置
                    x = 15 + grid_x * cell_size + np.random.uniform(5, cell_size - 5)
                    y = 15 + grid_y * cell_size + np.random.uniform(5, cell_size - 5)
                    user_pos = np.array([x, y], dtype=np.float32)

                    # 确保距离起始点有一定距离
                    if np.linalg.norm(user_pos - self.uav_position) > 15:
                        self.user_positions.append(user_pos)
                        used_cells.add((grid_x, grid_y))
                        break

        self.user_positions = np.array(self.user_positions, dtype=np.float32)

        # 简化任务属性
        for i in range(self.num_users):
            task = {
                'data_size': np.random.uniform(1, 5),  # 1-5MB
                'priority': np.random.uniform(0.5, 1.0),  # 优先级
                'reward_value': np.random.uniform(10, 30)  # 任务价值
            }
            self.user_tasks.append(task)
            self.tasks_collected.append(False)
            self.tasks_completed.append(False)

        self.step_count = 0
        self.total_flight_distance = 0.0
        self.total_energy_consumed = 0.0
        self.trajectory = [self.uav_position.copy()]
        self.last_distances = np.linalg.norm(self.user_positions - self.uav_position, axis=1)

        return self._get_state()

    def _get_state(self):
        state = []

        # 无人机状态 (5维)
        state.extend([
            self.uav_position[0] / self.area_size,
            self.uav_position[1] / self.area_size,
            self.uav_velocity[0] / self.max_velocity,
            self.uav_velocity[1] / self.max_velocity,
            self.battery_level / self.max_battery
        ])

        # 到MEC服务器的距离 (1维)
        distance_to_mec = np.linalg.norm(self.uav_position - self.mec_position)
        state.append(min(distance_to_mec / (self.area_size * np.sqrt(2)), 1.0))

        # 找到未收集的用户，按距离排序，取最近的8个 (8*5=40维)
        uncollected_users = []
        for i, collected in enumerate(self.tasks_collected):
            if not collected:
                distance = np.linalg.norm(self.uav_position - self.user_positions[i])
                uncollected_users.append((i, distance))

        # 按距离排序
        uncollected_users.sort(key=lambda x: x[1])
        closest_users = uncollected_users[:8]

        # 填充状态信息
        for i in range(8):
            if i < len(closest_users):
                user_idx, distance = closest_users[i]
                user_pos = self.user_positions[user_idx]
                task = self.user_tasks[user_idx]

                # 相对位置和距离
                relative_pos = (user_pos - self.uav_position) / self.area_size
                normalized_distance = min(distance / (self.area_size * np.sqrt(2)), 1.0)

                state.extend([
                    relative_pos[0],
                    relative_pos[1],
                    normalized_distance,
                    task['priority'],
                    task['reward_value'] / 30.0
                ])
            else:
                # 填充零值
                state.extend([0.0] * 5)

        # 全局信息 (4维)
        collected_ratio = sum(self.tasks_collected) / self.num_users
        completed_ratio = sum(self.tasks_completed) / self.num_users
        state.extend([
            collected_ratio,
            completed_ratio,
            self.step_count / 500.0,
            len(uncollected_users) / self.num_users
        ])

        state = np.array(state, dtype=np.float32)
        state = np.clip(state, -2.0, 2.0)

        return state

    def step(self, action, dt=1.0):
        # 动作处理
        action = np.clip(action, -1.0, 1.0)
        vx_action, vy_action = action[:2]

        # 计算目标速度
        target_vx = vx_action * self.max_velocity
        target_vy = vy_action * self.max_velocity

        # 平滑速度变化
        alpha = 0.3  # 速度变化率
        self.uav_velocity[0] = (1 - alpha) * self.uav_velocity[0] + alpha * target_vx
        self.uav_velocity[1] = (1 - alpha) * self.uav_velocity[1] + alpha * target_vy

        # 限制速度
        velocity_magnitude = np.linalg.norm(self.uav_velocity)
        if velocity_magnitude > self.max_velocity:
            self.uav_velocity = self.uav_velocity / velocity_magnitude * self.max_velocity

        # 更新位置
        new_position = self.uav_position + self.uav_velocity * dt
        new_position = np.clip(new_position, 3, self.area_size - 3)

        # 计算飞行距离和能耗
        flight_distance = np.linalg.norm(new_position - self.uav_position)
        self.total_flight_distance += flight_distance

        # 简化能耗模型
        energy_consumed = 0.8 + 0.1 * velocity_magnitude + 0.05 * flight_distance
        self.total_energy_consumed += energy_consumed
        self.battery_level = max(0, self.battery_level - energy_consumed)

        self.uav_position = new_position
        self.trajectory.append(self.uav_position.copy())

        # 任务收集逻辑 - 简化为进入服务范围即收集
        newly_collected = 0
        newly_completed = 0

        for i in range(self.num_users):
            distance_to_user = np.linalg.norm(self.uav_position - self.user_positions[i])

            # 收集任务：进入服务范围且未收集
            if distance_to_user <= self.service_range and not self.tasks_collected[i]:
                self.tasks_collected[i] = True
                newly_collected += 1

            # 完成任务：已收集且在MEC附近
            if self.tasks_collected[i] and not self.tasks_completed[i]:
                distance_to_mec = np.linalg.norm(self.uav_position - self.mec_position)
                if distance_to_mec <= 15.0:  # MEC服务范围
                    self.tasks_completed[i] = True
                    newly_completed += 1

        self.step_count += 1

        # 计算当前距离（用于接近奖励）
        current_distances = np.linalg.norm(self.user_positions - self.uav_position, axis=1)

        # 判断结束条件
        all_completed = sum(self.tasks_completed) == self.num_users
        battery_depleted = self.battery_level <= 50
        max_steps_reached = self.step_count >= 500
        no_uncollected = sum(self.tasks_collected) == self.num_users

        done = all_completed or battery_depleted or max_steps_reached

        info = {
            'tasks_collected': sum(self.tasks_collected),
            'tasks_completed': sum(self.tasks_completed),
            'total_tasks': self.num_users,
            'collection_ratio': sum(self.tasks_collected) / self.num_users,
            'completion_ratio': sum(self.tasks_completed) / self.num_users,
            'flight_distance': flight_distance,
            'total_flight_distance': self.total_flight_distance,
            'energy_consumed': energy_consumed,
            'total_energy_consumed': self.total_energy_consumed,
            'battery_level': self.battery_level,
            'step_count': self.step_count,
            'velocity_magnitude': velocity_magnitude,
            'newly_collected': newly_collected,
            'newly_completed': newly_completed,
            'all_completed': all_completed,
            'all_collected': no_uncollected,
            'uav_position': self.uav_position.copy(),
            'trajectory': self.trajectory.copy(),
            'current_distances': current_distances,
            'last_distances': self.last_distances.copy()
        }

        # 更新上一步距离
        self.last_distances = current_distances.copy()

        return self._get_state(), 0, done, info


class PPONetwork(nn.Module):
    def __init__(self, state_dim, action_dim, hidden_dim=256):
        super(PPONetwork, self).__init__()

        self.shared_layers = nn.Sequential(
            nn.Linear(state_dim, hidden_dim),
            nn.LayerNorm(hidden_dim),
            nn.Tanh(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.LayerNorm(hidden_dim),
            nn.Tanh(),
            nn.Linear(hidden_dim, hidden_dim // 2),
            nn.LayerNorm(hidden_dim // 2),
            nn.Tanh()
        )

        self.actor_mean = nn.Linear(hidden_dim // 2, action_dim)
        self.actor_log_std = nn.Parameter(torch.full((action_dim,), -1.0))

        self.critic = nn.Sequential(
            nn.Linear(hidden_dim // 2, hidden_dim // 4),
            nn.Tanh(),
            nn.Linear(hidden_dim // 4, 1)
        )

        self.apply(self._init_weights)

    def _init_weights(self, m):
        if isinstance(m, nn.Linear):
            torch.nn.init.orthogonal_(m.weight, gain=0.5)
            if m.bias is not None:
                torch.nn.init.constant_(m.bias, 0)

    def forward(self, state):
        if torch.isnan(state).any() or torch.isinf(state).any():
            state = torch.nan_to_num(state, nan=0.0, posinf=1.0, neginf=-1.0)

        shared_features = self.shared_layers(state)

        if torch.isnan(shared_features).any():
            shared_features = torch.nan_to_num(shared_features)

        action_mean = self.actor_mean(shared_features)
        action_mean = torch.clamp(action_mean, -3.0, 3.0)

        action_log_std = torch.clamp(self.actor_log_std, -2.0, 1.0)
        action_std = torch.exp(action_log_std)

        value = self.critic(shared_features)
        value = torch.clamp(value, -200.0, 200.0)

        return action_mean, action_std, value

    def get_action(self, state, deterministic=False):
        action_mean, action_std, value = self.forward(state)

        if deterministic:
            action = action_mean
        else:
            if torch.isnan(action_mean).any() or torch.isnan(action_std).any():
                action_mean = torch.nan_to_num(action_mean)
                action_std = torch.nan_to_num(action_std, nan=0.1)

            dist = Normal(action_mean, action_std + 1e-8)
            action = dist.sample()

        action = torch.tanh(action)
        return action, value


class ContinualLearningPPO:
    def __init__(self, state_dim, action_dim, lr=1e-4, gamma=0.99, lam=0.95,
                 clip_eps=0.2, entropy_coef=0.01, value_coef=0.5, ewc_lambda=50):
        self.state_dim = state_dim
        self.action_dim = action_dim
        self.gamma = gamma
        self.lam = lam
        self.clip_eps = clip_eps
        self.entropy_coef = entropy_coef
        self.value_coef = value_coef
        self.ewc_lambda = ewc_lambda
        self.initial_ewc_lambda = ewc_lambda

        self.policy_net = PPONetwork(state_dim, action_dim)
        self.optimizer = optim.Adam(self.policy_net.parameters(), lr=lr, eps=1e-8)

        self.fisher_info = {}
        self.optimal_params = {}
        self.task_id = 0
        self.task_performance = []
        self.buffer = []
        self.replay_buffer = deque(maxlen=2000)

    def get_action(self, state, deterministic=False):
        state = np.nan_to_num(state, nan=0.0, posinf=1.0, neginf=-1.0)
        state_tensor = torch.FloatTensor(state).unsqueeze(0)

        with torch.no_grad():
            action, value = self.policy_net.get_action(state_tensor, deterministic)
        return action.squeeze(0).numpy(), value.item()

    def store_transition(self, state, action, reward, next_state, done, log_prob):
        if np.isnan(reward) or np.isinf(reward):
            reward = 0.0
        reward = np.clip(reward, -100.0, 200.0)

        transition = (state, action, reward, next_state, done, log_prob)
        self.buffer.append(transition)

        if len(self.replay_buffer) < self.replay_buffer.maxlen:
            self.replay_buffer.append(transition)
        else:
            idx = random.randint(0, len(self.replay_buffer) - 1)
            self.replay_buffer[idx] = transition

    def compute_gae(self, rewards, values, next_values, dones):
        advantages = []
        gae = 0

        for i in reversed(range(len(rewards))):
            if i == len(rewards) - 1:
                next_value = next_values[i]
            else:
                next_value = values[i + 1]

            delta = rewards[i] + self.gamma * next_value * (1 - dones[i]) - values[i]
            gae = delta + self.gamma * self.lam * (1 - dones[i]) * gae
            advantages.insert(0, gae)

        return advantages

    def update_policy(self, epochs=3):
        if len(self.buffer) < 32:
            return {}

        states, actions, rewards, next_states, dones, old_log_probs = zip(*self.buffer)

        states = np.array(states)
        states = np.nan_to_num(states, nan=0.0)
        states = torch.FloatTensor(states)

        actions = np.array(actions)
        actions = np.nan_to_num(actions, nan=0.0)
        actions = torch.FloatTensor(actions)

        rewards = np.array(rewards)
        rewards = np.nan_to_num(rewards, nan=0.0)
        rewards = np.clip(rewards, -100, 200)
        rewards = torch.FloatTensor(rewards)

        dones = torch.BoolTensor(dones)
        old_log_probs = np.array(old_log_probs)
        old_log_probs = np.nan_to_num(old_log_probs, nan=0.0)
        old_log_probs = torch.FloatTensor(old_log_probs)

        with torch.no_grad():
            _, _, values = self.policy_net(states)
            values = values.squeeze()

            if len(next_states) > 0:
                next_states_array = np.array(next_states)
                next_states_array = np.nan_to_num(next_states_array, nan=0.0)
                next_states_tensor = torch.FloatTensor(next_states_array)
                _, _, next_values = self.policy_net(next_states_tensor)
                next_values = next_values.squeeze()
            else:
                next_values = torch.zeros_like(values)

        advantages = self.compute_gae(rewards.tolist(), values.tolist(),
                                      next_values.tolist(), dones.tolist())
        advantages = torch.FloatTensor(advantages)
        returns = advantages + values

        if advantages.std() > 1e-8:
            advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)

        losses = {'total_loss': []}

        for epoch in range(epochs):
            try:
                action_mean, action_std, current_values = self.policy_net(states)
                current_values = current_values.squeeze()

                if torch.isnan(action_mean).any() or torch.isnan(action_std).any():
                    break

                dist = Normal(action_mean, action_std + 1e-8)
                new_log_probs = dist.log_prob(actions).sum(dim=1)
                entropy = dist.entropy().sum(dim=1)

                if torch.isnan(new_log_probs).any():
                    break

                ratio = torch.exp(torch.clamp(new_log_probs - old_log_probs, -10, 10))
                surr1 = ratio * advantages
                surr2 = torch.clamp(ratio, 1 - self.clip_eps, 1 + self.clip_eps) * advantages
                policy_loss = -torch.min(surr1, surr2).mean()

                value_loss = F.mse_loss(current_values, returns)
                entropy_loss = -entropy.mean()

                total_loss = (policy_loss +
                              self.value_coef * value_loss +
                              self.entropy_coef * entropy_loss)

                if torch.isnan(total_loss) or torch.isinf(total_loss):
                    break

                if self.fisher_info and self.optimal_params:
                    ewc_loss = self._compute_ewc_loss()
                    total_loss += self.ewc_lambda * ewc_loss

                self.optimizer.zero_grad()
                total_loss.backward()

                grad_norm = torch.nn.utils.clip_grad_norm_(self.policy_net.parameters(), 1.0)
                if torch.isnan(grad_norm) or grad_norm > 10.0:
                    self.optimizer.zero_grad()
                    continue

                self.optimizer.step()
                losses['total_loss'].append(total_loss.item())

            except Exception as e:
                break

        self.buffer.clear()
        return {k: np.mean(v) if v else 0.0 for k, v in losses.items()}

    def _compute_ewc_loss(self):
        ewc_loss = 0
        for name, param in self.policy_net.named_parameters():
            if name in self.fisher_info:
                fisher = self.fisher_info[name]
                optimal = self.optimal_params[name]
                ewc_loss += (fisher * (param - optimal) ** 2).sum()
        return ewc_loss

    def compute_fisher_information(self, env, num_samples=200):
        self.policy_net.eval()
        fisher_info = {}
        for name, param in self.policy_net.named_parameters():
            fisher_info[name] = torch.zeros_like(param)

        for _ in range(num_samples):
            try:
                state = env.reset()
                state_tensor = torch.FloatTensor(state).unsqueeze(0)
                action_mean, action_std, value = self.policy_net(state_tensor)

                if torch.isnan(action_mean).any() or torch.isnan(action_std).any():
                    continue

                dist = Normal(action_mean, action_std + 1e-8)
                action = dist.sample()
                log_prob = dist.log_prob(action).sum()

                self.optimizer.zero_grad()
                log_prob.backward()

                for name, param in self.policy_net.named_parameters():
                    if param.grad is not None and not torch.isnan(param.grad).any():
                        fisher_info[name] += param.grad ** 2
            except:
                continue

        for name in fisher_info:
            fisher_info[name] /= num_samples

        self.fisher_info = fisher_info
        self.policy_net.train()

    def save_optimal_params(self):
        self.optimal_params = {}
        for name, param in self.policy_net.named_parameters():
            self.optimal_params[name] = param.clone().detach()

    def switch_task(self, new_task_id):
        if len(self.buffer) > 0:
            recent_rewards = [t[2] for t in self.buffer[-50:]]
            self.task_performance.append(np.mean(recent_rewards))

        if len(self.task_performance) > 0:
            if self.task_performance[-1] < 0:
                self.ewc_lambda *= 0.8
            else:
                self.ewc_lambda = min(self.ewc_lambda * 1.2, self.initial_ewc_lambda * 2)

        self.task_id = new_task_id
        print(f"切换到任务 {new_task_id}, EWC强度: {self.ewc_lambda:.1f}")


class StandardPPO:
    def __init__(self, state_dim, action_dim, lr=1e-4, gamma=0.99, lam=0.95,
                 clip_eps=0.2, entropy_coef=0.01, value_coef=0.5):
        self.gamma = gamma
        self.lam = lam
        self.clip_eps = clip_eps
        self.entropy_coef = entropy_coef
        self.value_coef = value_coef

        self.policy_net = PPONetwork(state_dim, action_dim)
        self.optimizer = optim.Adam(self.policy_net.parameters(), lr=lr, eps=1e-8)
        self.buffer = []

    def get_action(self, state, deterministic=False):
        state = np.nan_to_num(state, nan=0.0, posinf=1.0, neginf=-1.0)
        state_tensor = torch.FloatTensor(state).unsqueeze(0)
        with torch.no_grad():
            action, value = self.policy_net.get_action(state_tensor, deterministic)
        return action.squeeze(0).numpy(), value.item()

    def store_transition(self, state, action, reward, next_state, done, log_prob):
        if np.isnan(reward) or np.isinf(reward):
            reward = 0.0
        reward = np.clip(reward, -100.0, 200.0)
        self.buffer.append((state, action, reward, next_state, done, log_prob))

    def compute_gae(self, rewards, values, next_values, dones):
        advantages = []
        gae = 0
        for i in reversed(range(len(rewards))):
            if i == len(rewards) - 1:
                next_value = next_values[i]
            else:
                next_value = values[i + 1]
            delta = rewards[i] + self.gamma * next_value * (1 - dones[i]) - values[i]
            gae = delta + self.gamma * self.lam * (1 - dones[i]) * gae
        self.lam * (1 - dones[i]) * gae
        advantages.insert(0, gae)

        return advantages


def update_policy(self, epochs=3):
    if len(self.buffer) < 32:
        return {}

    states, actions, rewards, next_states, dones, old_log_probs = zip(*self.buffer)

    states = np.array(states)
    states = np.nan_to_num(states, nan=0.0)
    states = torch.FloatTensor(states)

    actions = np.array(actions)
    actions = np.nan_to_num(actions, nan=0.0)
    actions = torch.FloatTensor(actions)

    rewards = np.array(rewards)
    rewards = np.nan_to_num(rewards, nan=0.0)
    rewards = np.clip(rewards, -100, 200)
    rewards = torch.FloatTensor(rewards)

    dones = torch.BoolTensor(dones)
    old_log_probs = np.array(old_log_probs)
    old_log_probs = np.nan_to_num(old_log_probs, nan=0.0)
    old_log_probs = torch.FloatTensor(old_log_probs)

    with torch.no_grad():
        _, _, values = self.policy_net(states)
        values = values.squeeze()
        if len(next_states) > 0:
            next_states_array = np.array(next_states)
            next_states_array = np.nan_to_num(next_states_array, nan=0.0)
            next_states_tensor = torch.FloatTensor(next_states_array)
            _, _, next_values = self.policy_net(next_states_tensor)
            next_values = next_values.squeeze()
        else:
            next_values = torch.zeros_like(values)

    advantages = self.compute_gae(rewards.tolist(), values.tolist(),
                                  next_values.tolist(), dones.tolist())
    advantages = torch.FloatTensor(advantages)
    returns = advantages + values

    if advantages.std() > 1e-8:
        advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)

    for epoch in range(epochs):
        try:
            action_mean, action_std, current_values = self.policy_net(states)
            current_values = current_values.squeeze()

            if torch.isnan(action_mean).any() or torch.isnan(action_std).any():
                break

            dist = Normal(action_mean, action_std + 1e-8)
            new_log_probs = dist.log_prob(actions).sum(dim=1)
            entropy = dist.entropy().sum(dim=1)

            if torch.isnan(new_log_probs).any():
                break

            ratio = torch.exp(torch.clamp(new_log_probs - old_log_probs, -10, 10))
            surr1 = ratio * advantages
            surr2 = torch.clamp(ratio, 1 - self.clip_eps, 1 + self.clip_eps) * advantages
            policy_loss = -torch.min(surr1, surr2).mean()

            value_loss = F.mse_loss(current_values, returns)
            entropy_loss = -entropy.mean()

            total_loss = (policy_loss +
                          self.value_coef * value_loss +
                          self.entropy_coef * entropy_loss)

            if torch.isnan(total_loss) or torch.isinf(total_loss):
                break

            self.optimizer.zero_grad()
            total_loss.backward()

            grad_norm = torch.nn.utils.clip_grad_norm_(self.policy_net.parameters(), 1.0)
            if torch.isnan(grad_norm) or grad_norm > 10.0:
                self.optimizer.zero_grad()
                continue

            self.optimizer.step()

        except Exception as e:
            break

    self.buffer.clear()
    return {}


class TaskRewardCalculator:
    @staticmethod
    def time_first_reward(info, state):
        """时间优先任务：最小化完成时间"""

        # === 基础步数惩罚 - 微小，鼓励尽快完成 ===
        step_penalty = -0.02  # 每步微小惩罚

        # === 接近奖励 - 最小，只为引导方向 ===
        proximity_reward = 0.0
        current_distances = info['current_distances']
        last_distances = info['last_distances']

        # 计算向未收集用户靠近的奖励
        for i in range(len(current_distances)):
            if not info.get('tasks_collected_list', [False] * len(current_distances))[i]:
                distance_improvement = last_distances[i] - current_distances[i]
                if distance_improvement > 0:  # 靠近了
                    proximity_reward += distance_improvement * 0.01  # 非常小的奖励

        proximity_reward = np.clip(proximity_reward, 0, 0.5)  # 限制最大接近奖励

        # === 收集奖励 - 中等，远大于接近奖励 ===
        collect_reward = info['newly_collected'] * 15.0  # 每收集一个任务15分

        # === 完成奖励 - 最大，在episode结束时给予 ===
        completion_reward = 0.0
        if info.get('step_count', 0) >= 499 or info.get('all_completed', False) or info.get('all_collected', False):
            # Episode结束时计算完成奖励
            completion_ratio = info['completion_ratio']

            # 基础完成奖励（远大于单次收集奖励）
            completion_reward = completion_ratio * 120.0  # 最大120分

            # 全部完成额外奖励
            if info.get('all_completed', False):
                completion_reward += 80.0  # 额外大奖励

                # 时间奖励：越快完成奖励越高
                time_bonus = max(0, (500 - info['step_count']) * 0.3)
                completion_reward += time_bonus

        # === 速度奖励 - 时间优先任务鼓励快速移动 ===
        velocity_bonus = min(info['velocity_magnitude'], 8.0) * 0.1

        # === 电池管理 ===
        battery_penalty = 0.0
        if info['battery_level'] < 100:
            battery_penalty = -0.5  # 电池不足惩罚

        total_reward = (step_penalty + proximity_reward + collect_reward +
                        completion_reward + velocity_bonus + battery_penalty)

        return np.clip(total_reward, -10, 200)

    @staticmethod
    def energy_first_reward(info, state):
        """能耗优先任务：最小化能耗"""

        # === 基础步数惩罚 ===
        step_penalty = -0.02

        # === 接近奖励 - 最小 ===
        proximity_reward = 0.0
        current_distances = info['current_distances']
        last_distances = info['last_distances']

        for i in range(len(current_distances)):
            if not info.get('tasks_collected_list', [False] * len(current_distances))[i]:
                distance_improvement = last_distances[i] - current_distances[i]
                if distance_improvement > 0:
                    proximity_reward += distance_improvement * 0.01

        proximity_reward = np.clip(proximity_reward, 0, 0.5)

        # === 收集奖励 - 中等 ===
        collect_reward = info['newly_collected'] * 12.0  # 能耗任务稍微降低收集奖励

        # === 完成奖励 - 最大，强调节能 ===
        completion_reward = 0.0
        if info.get('step_count', 0) >= 499 or info.get('all_completed', False) or info.get('all_collected', False):
            completion_ratio = info['completion_ratio']

            # 基础完成奖励
            completion_reward = completion_ratio * 100.0

            # 全部完成奖励
            if info.get('all_completed', False):
                completion_reward += 60.0

                # 节能奖励：剩余电池越多奖励越高
                battery_bonus = (info['battery_level'] / 1000.0) * 50.0
                completion_reward += battery_bonus

        # === 能耗惩罚 - 能耗优先任务的核心 ===
        energy_penalty = -info['energy_consumed'] * 0.8  # 当前步能耗惩罚

        # === 速度惩罚 - 鼓励低速节能飞行 ===
        velocity_penalty = -max(0, info['velocity_magnitude'] - 6.0) * 0.3  # 超过6m/s惩罚

        # === 电池奖励 ===
        battery_bonus = (info['battery_level'] / 1000.0) * 0.2  # 电池越多越好

        total_reward = (step_penalty + proximity_reward + collect_reward +
                        completion_reward + energy_penalty + velocity_penalty + battery_bonus)

        return np.clip(total_reward, -10, 200)

    @staticmethod
    def balanced_reward(info, state):
        """平衡任务：平衡时间和能耗"""

        # === 基础步数惩罚 ===
        step_penalty = -0.02

        # === 接近奖励 - 最小 ===
        proximity_reward = 0.0
        current_distances = info['current_distances']
        last_distances = info['last_distances']

        for i in range(len(current_distances)):
            if not info.get('tasks_collected_list', [False] * len(current_distances))[i]:
                distance_improvement = last_distances[i] - current_distances[i]
                if distance_improvement > 0:
                    proximity_reward += distance_improvement * 0.01

        proximity_reward = np.clip(proximity_reward, 0, 0.5)

        # === 收集奖励 - 中等 ===
        collect_reward = info['newly_collected'] * 13.5  # 平衡的收集奖励

        # === 完成奖励 - 最大，平衡时间和能耗 ===
        completion_reward = 0.0
        if info.get('step_count', 0) >= 499 or info.get('all_completed', False) or info.get('all_collected', False):
            completion_ratio = info['completion_ratio']

            # 基础完成奖励
            completion_reward = completion_ratio * 110.0

            # 全部完成奖励
            if info.get('all_completed', False):
                completion_reward += 70.0

                # 平衡奖励：时间和能耗都表现良好
                time_efficiency = max(0, (400 - info['step_count']) / 400.0) * 30.0
                energy_efficiency = max(0, (info['battery_level'] - 200) / 800.0) * 30.0
                completion_reward += time_efficiency + energy_efficiency

        # === 中等能耗惩罚 ===
        energy_penalty = -info['energy_consumed'] * 0.4

        # === 适中速度奖励 ===
        optimal_velocity = 7.0
        velocity_bonus = max(0, 3.0 - abs(info['velocity_magnitude'] - optimal_velocity)) * 0.05

        # === 电池管理 ===
        battery_bonus = (info['battery_level'] / 1000.0) * 0.1

        total_reward = (step_penalty + proximity_reward + collect_reward +
                        completion_reward + energy_penalty + velocity_bonus + battery_bonus)

        return np.clip(total_reward, -10, 200)


def plot_trajectory(env_info, episode, task_name, agent_type="CL", save_dir="trajectories"):
    """绘制无人机轨迹图"""
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    trajectory = env_info['trajectory']
    user_positions = env_info.get('user_positions', [])
    mec_position = env_info.get('mec_position', [50, 50])

    plt.figure(figsize=(12, 10))

    # 绘制地图边界
    plt.xlim(0, 100)
    plt.ylim(0, 100)
    plt.grid(True, alpha=0.3)

    # 绘制MEC服务器
    plt.scatter(mec_position[0], mec_position[1], c='red', s=300, marker='s',
                label='MEC服务器', edgecolors='black', linewidth=2)

    # 绘制MEC服务范围
    mec_circle = plt.Circle(mec_position, 15.0, fill=False, color='red',
                            linestyle='--', alpha=0.6, linewidth=2)
    plt.gca().add_patch(mec_circle)

    # 绘制用户位置
    if len(user_positions) > 0:
        user_x = [pos[0] for pos in user_positions]
        user_y = [pos[1] for pos in user_positions]
        plt.scatter(user_x, user_y, c='blue', s=150, marker='o',
                    label='用户任务', edgecolors='black', linewidth=1)

        # 绘制服务范围（只绘制前几个用户的范围）
        for i, pos in enumerate(user_positions[:5]):
            circle = plt.Circle(pos, 12.0, fill=False, color='lightblue',
                                linestyle=':', alpha=0.4)
            plt.gca().add_patch(circle)

    # 绘制无人机轨迹
    if len(trajectory) > 1:
        traj_x = [pos[0] for pos in trajectory]
        traj_y = [pos[1] for pos in trajectory]

        # 绘制轨迹线，使用渐变色表示时间
        for i in range(len(trajectory) - 1):
            alpha = 0.3 + 0.7 * (i / len(trajectory))
            plt.plot([traj_x[i], traj_x[i + 1]], [traj_y[i], traj_y[i + 1]],
                     'green', linewidth=2, alpha=alpha)

        # 绘制起始点
        plt.scatter(traj_x[0], traj_y[0], c='orange', s=200, marker='^',
                    label='起始点', edgecolors='black', linewidth=2, zorder=5)

        # 绘制结束点
        plt.scatter(traj_x[-1], traj_y[-1], c='purple', s=200, marker='v',
                    label='结束点', edgecolors='black', linewidth=2, zorder=5)

        # 绘制轨迹关键点
        key_points = range(0, len(trajectory), max(1, len(trajectory) // 15))
        for i in key_points:
            plt.scatter(traj_x[i], traj_y[i], c='green', s=40, alpha=0.6, zorder=3)

    plt.title(f'{agent_type} - {task_name} - Episode {episode}\n'
              f'轨迹长度: {len(trajectory)} 步', fontsize=14, fontweight='bold')
    plt.xlabel('X坐标 (m)', fontsize=12)
    plt.ylabel('Y坐标 (m)', fontsize=12)
    plt.legend(loc='upper right', fontsize=10)

    # 添加详细统计信息
    if 'total_flight_distance' in env_info:
        stats_text = f"飞行距离: {env_info['total_flight_distance']:.1f}m\n"
        stats_text += f"总能耗: {env_info['total_energy_consumed']:.1f}\n"
        stats_text += f"收集任务: {env_info['tasks_collected']}/{env_info['total_tasks']}\n"
        stats_text += f"完成任务: {env_info['tasks_completed']}/{env_info['total_tasks']}\n"
        stats_text += f"剩余电量: {env_info['battery_level']:.1f}"

        plt.text(0.02, 0.98, stats_text, transform=plt.gca().transAxes,
                 fontsize=10, verticalalignment='top',
                 bbox=dict(boxstyle="round,pad=0.3", facecolor='white', alpha=0.9))

    plt.tight_layout()

    # 保存图片
    filename = f"{save_dir}/{agent_type}_{task_name}_episode_{episode}.png"
    plt.savefig(filename, dpi=300, bbox_inches='tight')
    plt.close()

    print(f"轨迹图已保存: {filename}")


def train_agent(env, agent, num_episodes, task_name, reward_func, update_interval=10, plot_interval=200):
    episode_rewards = []
    episode_collections = []
    episode_completions = []
    episode_energies = []

    print(f"开始训练任务: {task_name}")

    for episode in range(num_episodes):
        state = env.reset()
        episode_reward = 0
        episode_energy = 0
        step_count = 0

        while True:
            action, value = agent.get_action(state)
            next_state, _, done, info = env.step(action)

            # 添加任务收集状态到info中，用于奖励计算
            info['tasks_collected_list'] = env.tasks_collected

            reward = reward_func(info, state)
            episode_reward += reward
            episode_energy += info['energy_consumed']
            step_count += 1

            # 计算log_prob用于训练
            state_tensor = torch.FloatTensor(state).unsqueeze(0)
            action_tensor = torch.FloatTensor(action).unsqueeze(0)
            with torch.no_grad():
                action_mean, action_std, _ = agent.policy_net(state_tensor)
                dist = Normal(action_mean, action_std + 1e-8)
                log_prob = dist.log_prob(action_tensor).sum(dim=1).item()

            agent.store_transition(state, action, reward, next_state, done, log_prob)

            state = next_state
            if done:
                break

        episode_rewards.append(episode_reward)
        episode_collections.append(info['collection_ratio'])
        episode_completions.append(info['completion_ratio'])
        episode_energies.append(episode_energy)

        # 详细的episode信息
        if episode % 25 == 0 or episode < 10:
            print(f"Episode {episode:4d} | "
                  f"Reward: {episode_reward:8.2f} | "
                  f"Steps: {step_count:3d} | "
                  f"Collected: {info['tasks_collected']:2d}/{info['total_tasks']:2d} | "
                  f"Completed: {info['tasks_completed']:2d}/{info['total_tasks']:2d} | "
                  f"Energy: {episode_energy:6.2f} | "
                  f"Battery: {info['battery_level']:6.1f}")

        # 绘制轨迹图
        if episode % plot_interval == 0 and episode > 0:
            env_info = {
                'trajectory': info['trajectory'],
                'user_positions': env.user_positions,
                'mec_position': env.mec_position,
                'total_flight_distance': info['total_flight_distance'],
                'total_energy_consumed': info['total_energy_consumed'],
                'tasks_collected': info['tasks_collected'],
                'tasks_completed': info['tasks_completed'],
                'total_tasks': info['total_tasks'],
                'battery_level': info['battery_level']
            }

            agent_type = "CL" if hasattr(agent, 'ewc_lambda') else "STD"
            plot_trajectory(env_info, episode, task_name, agent_type)

        # 定期更新策略
        if episode % update_interval == 0 and episode > 0:
            losses = agent.update_policy()
            if losses and episode % (update_interval * 8) == 0:
                print(f"    训练损失: {losses.get('total_loss', 0.0):.4f}")

    return episode_rewards, episode_collections, episode_completions, episode_energies


def train_continual_learning_agent(num_episodes_per_task=500, update_interval=10):
    env = UAVTaskEnvironment()
    state_dim = len(env.reset())
    action_dim = 2

    agent = ContinualLearningPPO(state_dim, action_dim, lr=2e-4)

    tasks = [
        ("时间优先", TaskRewardCalculator.time_first_reward),
        ("能耗优先", TaskRewardCalculator.energy_first_reward),
        ("平衡优化", TaskRewardCalculator.balanced_reward)
    ]

    all_rewards = {}
    all_collections = {}
    all_completions = {}
    all_energies = {}

    for task_id, (task_name, reward_func) in enumerate(tasks):
        print(f"\n=== 训练任务 {task_id}: {task_name} ===")

        if task_id > 0:
            agent.compute_fisher_information(env, num_samples=100)
            agent.save_optimal_params()
            agent.switch_task(task_id)

        rewards, collections, completions, energies = train_agent(
            env, agent, num_episodes_per_task, task_name, reward_func, update_interval
        )

        all_rewards[task_name] = rewards
        all_collections[task_name] = collections
        all_completions[task_name] = completions
        all_energies[task_name] = energies

        # 任务完成总结
        final_avg_reward = np.mean(rewards[-50:])
        final_avg_collection = np.mean(collections[-50:])
        final_avg_completion = np.mean(completions[-50:])
        final_avg_energy = np.mean(energies[-50:])

        print(f"\n{task_name} 训练完成:")
        print(f"  最终平均奖励: {final_avg_reward:.2f}")
        print(f"  最终平均收集率: {final_avg_collection:.2%}")
        print(f"  最终平均完成率: {final_avg_completion:.2%}")
        print(f"  最终平均能耗: {final_avg_energy:.2f}")

    return all_rewards, all_collections, all_completions, all_energies, agent


def train_standard_agents(num_episodes_per_task=500, update_interval=10):
    env = UAVTaskEnvironment()
    state_dim = len(env.reset())
    action_dim = 2

    tasks = [
        ("时间优先", TaskRewardCalculator.time_first_reward),
        ("能耗优先", TaskRewardCalculator.energy_first_reward),
        ("平衡优化", TaskRewardCalculator.balanced_reward)
    ]

    all_rewards = {}
    all_collections = {}
    all_completions = {}
    all_energies = {}
    agents = {}

    for task_name, reward_func in tasks:
        print(f"\n=== 训练标准PPO: {task_name} ===")

        agent = StandardPPO(state_dim, action_dim, lr=2e-4)

        rewards, collections, completions, energies = train_agent(
            env, agent, num_episodes_per_task, task_name, reward_func, update_interval
        )

        all_rewards[task_name] = rewards
        all_collections[task_name] = collections
        all_completions[task_name] = completions
        all_energies[task_name] = energies
        agents[task_name] = agent

        # 任务完成总结
        final_avg_reward = np.mean(rewards[-50:])
        final_avg_collection = np.mean(collections[-50:])
        final_avg_completion = np.mean(completions[-50:])
        final_avg_energy = np.mean(energies[-50:])

        print(f"\n{task_name} 标准PPO训练完成:")
        print(f"  最终平均奖励: {final_avg_reward:.2f}")
        print(f"  最终平均收集率: {final_avg_collection:.2%}")
        print(f"  最终平均完成率: {final_avg_completion:.2%}")
        print(f"  最终平均能耗: {final_avg_energy:.2f}")

    return all_rewards, all_collections, all_completions, all_energies, agents


def plot_training_comparison(cl_rewards, std_rewards, cl_collections, std_collections,
                             cl_completions, std_completions, num_episodes_per_task=500):
    """绘制训练过程对比图"""
    fig, axes = plt.subplots(2, 3, figsize=(18, 12))

    task_names = list(cl_rewards.keys())
    colors = ['#1f77b4', '#ff7f0e', '#2ca02c']

    # 1. 奖励对比
    ax = axes[0, 0]
    cl_all_rewards = []
    std_all_rewards = []
    task_boundaries = []

    for i, task_name in enumerate(task_names):
        cl_all_rewards.extend(cl_rewards[task_name])
        std_all_rewards.extend(std_rewards[task_name])
        if i > 0:
            task_boundaries.append(i * num_episodes_per_task)

    window_size = 30
    cl_smooth = np.convolve(cl_all_rewards, np.ones(window_size) / window_size, mode='valid')
    std_smooth = np.convolve(std_all_rewards, np.ones(window_size) / window_size, mode='valid')

    ax.plot(cl_smooth, label='持续学习PPO', color='blue', linewidth=2)
    ax.plot(std_smooth, label='标准PPO', color='red', linewidth=2)

    for boundary in task_boundaries:
        ax.axvline(x=boundary, color='gray', linestyle='--', alpha=0.8)

    for i, task_name in enumerate(task_names):
        start_x = i * num_episodes_per_task
        end_x = (i + 1) * num_episodes_per_task
        mid_x = (start_x + end_x) / 2
        ax.axvspan(start_x, end_x, alpha=0.1, color=colors[i])
        ax.text(mid_x, ax.get_ylim()[1] * 0.9, task_name, ha='center', fontweight='bold')

    ax.set_title('训练奖励对比', fontsize=14, fontweight='bold')
    ax.set_xlabel('Episode')
    ax.set_ylabel('平均奖励')
    ax.legend()
    ax.grid(True, alpha=0.3)

    # 2. 任务收集率对比
    ax = axes[0, 1]
    cl_all_collections = []
    std_all_collections = []

    for task_name in task_names:
        cl_all_collections.extend(cl_collections[task_name])
        std_all_collections.extend(std_collections[task_name])

    cl_coll_smooth = np.convolve(cl_all_collections, np.ones(window_size) / window_size, mode='valid')
    std_coll_smooth = np.convolve(std_all_collections, np.ones(window_size) / window_size, mode='valid')

    ax.plot(cl_coll_smooth, label='持续学习PPO', color='blue', linewidth=2)
    ax.plot(std_coll_smooth, label='标准PPO', color='red', linewidth=2)

    for boundary in task_boundaries:
        ax.axvline(x=boundary, color='gray', linestyle='--', alpha=0.8)

    ax.set_title('任务收集率对比', fontsize=14, fontweight='bold')
    ax.set_xlabel('Episode')
    ax.set_ylabel('收集率')
    ax.legend()
    ax.grid(True, alpha=0.3)

    # 3. 任务完成率对比
    ax = axes[0, 2]
    cl_all_completions = []
    std_all_completions = []

    for task_name in task_names:
        cl_all_completions.extend(cl_completions[task_name])
        std_all_completions.extend(std_completions[task_name])

    cl_comp_smooth = np.convolve(cl_all_completions, np.ones(window_size) / window_size, mode='valid')
    std_comp_smooth = np.convolve(std_all_completions, np.ones(window_size) / window_size, mode='valid')

    ax.plot(cl_comp_smooth, label='持续学习PPO', color='blue', linewidth=2)
    ax.plot(std_comp_smooth, label='标准PPO', color='red', linewidth=2)

    for boundary in task_boundaries:
        ax.axvline(x=boundary, color='gray', linestyle='--', alpha=0.8)

    ax.set_title('任务完成率对比', fontsize=14, fontweight='bold')
    ax.set_xlabel('Episode')
    ax.set_ylabel('完成率')
    ax.legend()
    ax.grid(True, alpha=0.3)

    # 4. 各任务最终性能对比 - 奖励
    ax = axes[1, 0]
    x = np.arange(len(task_names))
    width = 0.35

    cl_final_rewards = [np.mean(cl_rewards[task][-50:]) for task in task_names]
    std_final_rewards = [np.mean(std_rewards[task][-50:]) for task in task_names]

    ax.bar(x - width / 2, cl_final_rewards, width, label='持续学习PPO', color='blue', alpha=0.7)
    ax.bar(x + width / 2, std_final_rewards, width, label='标准PPO', color='red', alpha=0.7)

    ax.set_title('最终奖励对比', fontsize=14, fontweight='bold')
    ax.set_xlabel('任务')
    ax.set_ylabel('平均奖励')
    ax.set_xticks(x)
    ax.set_xticklabels(task_names)
    ax.legend()
    ax.grid(True, alpha=0.3)

    # 添加数值标签
    for i, (cl_val, std_val) in enumerate(zip(cl_final_rewards, std_final_rewards)):
        ax.text(i - width / 2, cl_val + 1, f'{cl_val:.1f}', ha='center', va='bottom')
        ax.text(i + width / 2, std_val + 1, f'{std_val:.1f}', ha='center', va='bottom')

    # 5. 各任务最终性能对比 - 收集率
    ax = axes[1, 1]

    cl_final_collections = [np.mean(cl_collections[task][-50:]) for task in task_names]
    std_final_collections = [np.mean(std_collections[task][-50:]) for task in task_names]

    ax.bar(x - width / 2, cl_final_collections, width, label='持续学习PPO', color='blue', alpha=0.7)
    ax.bar(x + width / 2, std_final_collections, width, label='标准PPO', color='red', alpha=0.7)

    ax.set_title('最终收集率对比', fontsize=14, fontweight='bold')
    ax.set_xlabel('任务')
    ax.set_ylabel('收集率')
    ax.set_xticks(x)
    ax.set_xticklabels(task_names)
    ax.legend()
    ax.grid(True, alpha=0.3)

    # 添加数值标签
    for i, (cl_val, std_val) in enumerate(zip(cl_final_collections, std_final_collections)):
        ax.text(i - width / 2, cl_val + 0.02, f'{cl_val:.2f}', ha='center', va='bottom')
        ax.text(i + width / 2, std_val + 0.02, f'{std_val:.2f}', ha='center', va='bottom')

    # 6. 各任务最终性能对比 - 完成率
    ax = axes[1, 2]

    cl_final_completions = [np.mean(cl_completions[task][-50:]) for task in task_names]
    std_final_completions = [np.mean(std_completions[task][-50:]) for task in task_names]

    ax.bar(x - width / 2, cl_final_completions, width, label='持续学习PPO', color='blue', alpha=0.7)
    ax.bar(x + width / 2, std_final_completions, width, label='标准PPO', color='red', alpha=0.7)

    ax.set_title('最终完成率对比', fontsize=14, fontweight='bold')
    ax.set_xlabel('任务')
    ax.set_ylabel('完成率')
    ax.set_xticks(x)
    ax.set_xticklabels(task_names)
    ax.legend()
    ax.grid(True, alpha=0.3)

    # 添加数值标签
    for i, (cl_val, std_val) in enumerate(zip(cl_final_completions, std_final_completions)):
        ax.text(i - width / 2, cl_val + 0.02, f'{cl_val:.2f}', ha='center', va='bottom')
        ax.text(i + width / 2, std_val + 0.02, f'{std_val:.2f}', ha='center', va='bottom')

    plt.tight_layout()
    plt.savefig('training_comparison.png', dpi=300, bbox_inches='tight')
    plt.show()


def evaluate_task_switching(cl_agent, standard_agents, num_eval_episodes=30):
    """评估任务切换性能"""
    env = UAVTaskEnvironment()

    task_sequence = [
        ("时间优先", TaskRewardCalculator.time_first_reward),
        ("能耗优先", TaskRewardCalculator.energy_first_reward),
        ("平衡优化", TaskRewardCalculator.balanced_reward),
        ("时间优先", TaskRewardCalculator.time_first_reward),  # 重复任务测试
        ("能耗优先", TaskRewardCalculator.energy_first_reward),
    ]

    cl_results = {'rewards': [], 'collections': [], 'completions': [], 'energies': []}
    std_results = {'rewards': [], 'collections': [], 'completions': [], 'energies': []}

    print("\n开始任务切换评估...")

    for task_idx, (task_name, reward_func) in enumerate(task_sequence):
        print(f"\n=== 评估任务 {task_idx + 1}: {task_name} ===")

        # 评估持续学习智能体
        cl_task_rewards = []
        cl_task_collections = []
        cl_task_completions = []
        cl_task_energies = []

        for episode in range(num_eval_episodes):
            state = env.reset()
            episode_reward = 0

            while True:
                action, _ = cl_agent.get_action(state, deterministic=True)
                next_state, _, done, info = env.step(action)

                # 添加任务收集状态
                info['tasks_collected_list'] = env.tasks_collected

                reward = reward_func(info, state)
                episode_reward += reward

                state = next_state
                if done:
                    break

            cl_task_rewards.append(episode_reward)
            cl_task_collections.append(info['collection_ratio'])
            cl_task_completions.append(info['completion_ratio'])
            cl_task_energies.append(info['total_energy_consumed'])

        cl_results['rewards'].extend(cl_task_rewards)
        cl_results['collections'].extend(cl_task_collections)
        cl_results['completions'].extend(cl_task_completions)
        cl_results['energies'].extend(cl_task_energies)

        # 评估标准PPO智能体
        std_agent = standard_agents[task_name]
        std_task_rewards = []
        std_task_collections = []
        std_task_completions = []
        std_task_energies = []

        for episode in range(num_eval_episodes):
            state = env.reset()
            episode_reward = 0

            while True:
                action, _ = std_agent.get_action(state, deterministic=True)
                next_state, _, done, info = env.step(action)

                # 添加任务收集状态
                info['tasks_collected_list'] = env.tasks_collected

                reward = reward_func(info, state)
                episode_reward += reward

                state = next_state
                if done:
                    break

            std_task_rewards.append(episode_reward)
            std_task_collections.append(info['collection_ratio'])
            std_task_completions.append(info['completion_ratio'])
            std_task_energies.append(info['total_energy_consumed'])

        std_results['rewards'].extend(std_task_rewards)
        std_results['collections'].extend(std_task_collections)
        std_results['completions'].extend(std_task_completions)
        std_results['energies'].extend(std_task_energies)

        # 打印任务评估结果
        cl_avg_reward = np.mean(cl_task_rewards)
        std_avg_reward = np.mean(std_task_rewards)
        cl_avg_collection = np.mean(cl_task_collections)
        std_avg_collection = np.mean(std_task_collections)
        cl_avg_completion = np.mean(cl_task_completions)
        std_avg_completion = np.mean(std_task_completions)

        reward_improvement = ((cl_avg_reward - std_avg_reward) / abs(std_avg_reward) * 100) if abs(
            std_avg_reward) > 1e-6 else 0
        collection_improvement = ((cl_avg_collection - std_avg_collection) / abs(std_avg_collection) * 100) if abs(
            std_avg_collection) > 1e-6 else 0
        completion_improvement = ((cl_avg_completion - std_avg_completion) / abs(std_avg_completion) * 100) if abs(
            std_avg_completion) > 1e-6 else 0

        print(f"  {task_name} 评估结果:")
        print(
            f"    奖励    - 持续学习: {cl_avg_reward:6.2f}, 标准PPO: {std_avg_reward:6.2f}, 提升: {reward_improvement:+5.1f}%")
        print(
            f"    收集率  - 持续学习: {cl_avg_collection:6.2%}, 标准PPO: {std_avg_collection:6.2%}, 提升: {collection_improvement:+5.1f}%")
        print(
            f"    完成率  - 持续学习: {cl_avg_completion:6.2%}, 标准PPO: {std_avg_completion:6.2%}, 提升: {completion_improvement:+5.1f}%")

    return cl_results, std_results


def run_final_demonstration(cl_agent, std_agents):
    """运行最终演示，展示三个任务的执行效果"""
    env = UAVTaskEnvironment()

    tasks = [
        ("时间优先", TaskRewardCalculator.time_first_reward),
        ("能耗优先", TaskRewardCalculator.energy_first_reward),
        ("平衡优化", TaskRewardCalculator.balanced_reward)
    ]

    print("\n" + "=" * 60)
    print("                   最终演示")
    print("=" * 60)

    for task_name, reward_func in tasks:
        print(f"\n=== {task_name}任务演示 ===")

        # 持续学习智能体演示
        state = env.reset()
        cl_trajectory = []
        cl_total_reward = 0

        while True:
            action, _ = cl_agent.get_action(state, deterministic=True)
            next_state, _, done, info = env.step(action)

            info['tasks_collected_list'] = env.tasks_collected
            reward = reward_func(info, state)
            cl_total_reward += reward

            state = next_state
            if done:
                cl_trajectory = info['trajectory']
                cl_info = info.copy()
                break

        # 标准PPO智能体演示
        state = env.reset()
        std_agent = std_agents[task_name]
        std_trajectory = []
        std_total_reward = 0

        while True:
            action, _ = std_agent.get_action(state, deterministic=True)
            next_state, _, done, info = env.step(action)

            info['tasks_collected_list'] = env.tasks_collected
            reward = reward_func(info, state)
            std_total_reward += reward

            state = next_state
            if done:
                std_trajectory = info['trajectory']
                std_info = info.copy()
                break

        # 生成对比轨迹图
        env_info_cl = {
            'trajectory': cl_trajectory,
            'user_positions': env.user_positions,
            'mec_position': env.mec_position,
            'total_flight_distance': cl_info['total_flight_distance'],
            'total_energy_consumed': cl_info['total_energy_consumed'],
            'tasks_collected': cl_info['tasks_collected'],
            'tasks_completed': cl_info['tasks_completed'],
            'total_tasks': cl_info['total_tasks'],
            'battery_level': cl_info['battery_level']
        }

        env_info_std = {
            'trajectory': std_trajectory,
            'user_positions': env.user_positions,
            'mec_position': env.mec_position,
            'total_flight_distance': std_info['total_flight_distance'],
            'total_energy_consumed': std_info['total_energy_consumed'],
            'tasks_collected': std_info['tasks_collected'],
            'tasks_completed': std_info['tasks_completed'],
            'total_tasks': std_info['total_tasks'],
            'battery_level': std_info['battery_level']
        }

        plot_trajectory(env_info_cl, "Final_Demo", task_name, "CL")
        plot_trajectory(env_info_std, "Final_Demo", task_name, "STD")

        # 打印对比结果
        print(f"持续学习PPO:")
        print(f"  总奖励: {cl_total_reward:8.2f}")
        print(f"  收集率: {cl_info['collection_ratio']:8.2%}")
        print(f"  完成率: {cl_info['completion_ratio']:8.2%}")
        print(f"  飞行距离: {cl_info['total_flight_distance']:6.1f}m")
        print(f"  总能耗: {cl_info['total_energy_consumed']:8.1f}")
        print(f"  剩余电量: {cl_info['battery_level']:6.1f}")

        print(f"标准PPO:")
        print(f"  总奖励: {std_total_reward:8.2f}")
        print(f"  收集率: {std_info['collection_ratio']:8.2%}")
        print(f"  完成率: {std_info['completion_ratio']:8.2%}")
        print(f"  飞行距离: {std_info['total_flight_distance']:6.1f}m")
        print(f"  总能耗: {std_info['total_energy_consumed']:8.1f}")
        print(f"  剩余电量: {std_info['battery_level']:6.1f}")


def main():
    """主函数"""
    print("无人机MEC任务执行持续强化学习系统")
    print("=" * 70)
    print("环境设置: 100x100m地图，10个随机分布用户任务")
    print("目标: 优化无人机飞行轨迹，收集并完成用户任务")
    print("奖励层次: 完成奖励 >> 收集奖励 >> 接近奖励")
    print("=" * 70)

    # 设置随机种子
    torch.manual_seed(42)
    np.random.seed(42)
    random.seed(42)

    # 创建保存目录
    if not os.path.exists("trajectories"):
        os.makedirs("trajectories")

    # 设置训练参数
    num_episodes_per_task = 500  # 每个任务的训练轮数
    update_interval = 8

    try:
        print("\n第1阶段: 训练持续学习智能体...")
        cl_rewards, cl_collections, cl_completions, cl_energies, cl_agent = train_continual_learning_agent(
            num_episodes_per_task=num_episodes_per_task,
            update_interval=update_interval
        )

        print("\n第2阶段: 训练标准PPO智能体...")
        std_rewards, std_collections, std_completions, std_energies, std_agents = train_standard_agents(
            num_episodes_per_task=num_episodes_per_task,
            update_interval=update_interval
        )

        print("\n第3阶段: 绘制训练对比图...")
        plot_training_comparison(cl_rewards, std_rewards, cl_collections, std_collections,
                                 cl_completions, std_completions, num_episodes_per_task)

        print("\n第4阶段: 评估任务切换性能...")
        cl_switch_results, std_switch_results = evaluate_task_switching(
            cl_agent, std_agents, num_eval_episodes=25
        )

        print("\n第5阶段: 运行最终演示...")
        run_final_demonstration(cl_agent, std_agents)

        # 生成性能总结报告
        print("\n" + "=" * 70)
        print("                        性能总结报告")
        print("=" * 70)

        task_names = list(cl_rewards.keys())
        print(f"\n训练配置:")
        print(f"  任务序列: {' -> '.join(task_names)}")
        print(f"  每任务训练: {num_episodes_per_task} episodes")
        print(f"  状态维度: {len(env.reset())} 维")
        print(f"  动作维度: 2 维 (速度控制)")

        print(f"\n训练阶段性能对比:")
        print(f"{'任务':<12} {'指标':<8} {'持续学习':<12} {'标准PPO':<12} {'改进率':<10}")
        print("-" * 65)

        for task in task_names:
            # 奖励对比
            cl_reward = np.mean(cl_rewards[task][-50:])
            std_reward = np.mean(std_rewards[task][-50:])
            reward_improvement = ((cl_reward - std_reward) / abs(std_reward) * 100) if abs(std_reward) > 1e-6 else 0

            # 收集率对比
            cl_collection = np.mean(cl_collections[task][-50:])
            std_collection = np.mean(std_collections[task][-50:])
            collection_improvement = ((cl_collection - std_collection) / abs(std_collection) * 100) if abs(
                std_collection) > 1e-6 else 0

            # 完成率对比
            cl_completion = np.mean(cl_completions[task][-50:])
            std_completion = np.mean(std_completions[task][-50:])
            completion_improvement = ((cl_completion - std_completion) / abs(std_completion) * 100) if abs(
                std_completion) > 1e-6 else 0

            print(f"{task:<12} {'奖励':<8} {cl_reward:<12.1f} {std_reward:<12.1f} {reward_improvement:<9.1f}%")
            print(
                f"{'':<12} {'收集率':<8} {cl_collection:<12.2%} {std_collection:<12.2%} {collection_improvement:<9.1f}%")
            print(
                f"{'':<12} {'完成率':<8} {cl_completion:<12.2%} {std_completion:<12.2%} {completion_improvement:<9.1f}%")
            print("-" * 65)

        # 任务切换性能
        cl_switch_avg_reward = np.mean(cl_switch_results['rewards'])
        std_switch_avg_reward = np.mean(std_switch_results['rewards'])
        cl_switch_avg_collection = np.mean(cl_switch_results['collections'])
        std_switch_avg_collection = np.mean(std_switch_results['collections'])
        cl_switch_avg_completion = np.mean(cl_switch_results['completions'])
        std_switch_avg_completion = np.mean(std_switch_results['completions'])

        switch_reward_improvement = (
                    (cl_switch_avg_reward - std_switch_avg_reward) / abs(std_switch_avg_reward) * 100) if abs(
            std_switch_avg_reward) > 1e-6 else 0
        switch_collection_improvement = ((cl_switch_avg_collection - std_switch_avg_collection) / abs(
            std_switch_avg_collection) * 100) if abs(std_switch_avg_collection) > 1e-6 else 0
        switch_completion_improvement = ((cl_switch_avg_completion - std_switch_avg_completion) / abs(
            std_switch_avg_completion) * 100) if abs(std_switch_avg_completion) > 1e-6 else 0

        print(f"\n任务切换阶段性能:")
        print(
            f"  奖励    - 持续学习: {cl_switch_avg_reward:6.1f}, 标准PPO: {std_switch_avg_reward:6.1f}, 提升: {switch_reward_improvement:+5.1f}%")
        print(
            f"  收集率  - 持续学习: {cl_switch_avg_collection:6.1%}, 标准PPO: {std_switch_avg_collection:6.1%}, 提升: {switch_collection_improvement:+5.1f}%")
        print(
            f"  完成率  - 持续学习: {cl_switch_avg_completion:6.1%}, 标准PPO: {std_switch_avg_completion:6.1%}, 提升: {switch_completion_improvement:+5.1f}%")

        print(f"\n关键发现:")
        best_task = max(task_names, key=lambda t: np.mean(cl_rewards[t][-50:]) - np.mean(std_rewards[t][-50:]))
        print(f"  • 持续学习在 '{best_task}' 任务上表现最优")

        avg_collection_rate = np.mean([np.mean(cl_collections[task][-50:]) for task in task_names])
        avg_completion_rate = np.mean([np.mean(cl_completions[task][-50:]) for task in task_names])
        print(f"  • 平均任务收集率: {avg_collection_rate:.1%}")
        print(f"  • 平均任务完成率: {avg_completion_rate:.1%}")

        if avg_collection_rate > 0.7:
            print(f"  • ✓ 任务收集机制运行良好")
        else:
            print(f"  • ⚠ 任务收集率偏低，需要调整奖励函数")

        print(f"\n文件输出:")
        print(f"  • 训练对比图: training_comparison.png")
        print(f"  • 轨迹图文件夹: trajectories/")
        print(f"  • 每200个episode自动生成轨迹图")

        print(f"\n✓ 系统运行完成！")
        print(f"持续学习智能体在多任务场景中展现出更好的适应性和性能。")

    except Exception as e:
        print(f"\n✗ 训练过程中出现错误: {str(e)}")
        import traceback
        traceback.print_exc()


if __name__ == "__main__":
    try:
        main()
    except KeyboardInterrupt:
        print("\n程序被用户中断")
    except Exception as e:
        print(f"\n程序运行出现错误: {str(e)}")
        import traceback

        traceback.print_exc()
