import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import random
from collections import deque
import os
import time
import copy
'''SD3算法 - 基于分位数回归的分布式价值函数 - 加入基站卸载决策'''
SEED = 45
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False


# 环境参数
AREA_SIZE = 200
NUM_USERS = 12
MAX_STEPS = 300
MAX_DISTANCE_COLLECT = 15

# UAV参数
UAV_HEIGHT = 30.0
UAV_COMPUTE_CAPACITY = 1e10
MIN_UAV_SPEED = 2.0   # 最小速度
MAX_UAV_SPEED = 20.0  # 最大速度
DEFAULT_UAV_SPEED = 10.0  # 默认速度（用于初始化等）


# 【新增】基站参数
BASE_STATION_POSITION = np.array([0.0, 0.0])
BASE_STATION_COMPUTE_CAPACITY = 2e11  # 远超无人机的算力
BASE_STATION_HEIGHT = 10.0  # 基站高度（假设基站有一定高度）

# SD3参数 (相比TD3增加了分位数相关参数) shishi 2/3
ACTOR_LR = 3e-4
CRITIC_LR = 3e-4  # 更小的学习率适应复杂网络
GAMMA = 0.99
TAU = 0.005
BUFFER_SIZE = 200000
# BATCH_SIZE = 64
BATCH_SIZE = 128
EXPLORATION_NOISE_START = 0.35
EXPLORATION_NOISE_END = 0.05
REWARD_SCALE = 0.1

# 【新增】SD3分布式价值函数参数
NUM_QUANTILES = 51  # 分位数数量，通常使用奇数以包含中位数
QUANTILE_TAU = torch.FloatTensor([(2 * i - 1) / (2.0 * NUM_QUANTILES) for i in range(1, NUM_QUANTILES + 1)]).to(device)
KAPPA = 1.0  # 分位数损失的系数

# EWC参数
EWC_LAMBDA = 0.0
FISHER_SAMPLE_SIZE = 0

# clear
SI_LAMBDA = 1e-3
SI_EPSILON = 1e-3
BC_ALPHA = 1e-2 # 行为克隆权重
OLD_BATCH_RATIO = 0.5 # 旧样本占 actor 批量比例
OLD_REPLAY_TAIL = 20000 # 切换时复制到旧库的尾段样本数
KEEP_NEW_TAIL = 5000 # 切换后新库保留的尾段样本数

# GRU参数
SEQUENCE_LENGTH = 20
HIDDEN_SIZE = 128

# 通信参数
BANDWIDTH = 1e6
USER_TRANSMIT_POWER = 0.1
CHANNEL_GAIN_REF_DB = 30.0
CHANNEL_GAIN_REF_LINEAR = 10 ** (CHANNEL_GAIN_REF_DB / 10)
PATH_LOSS_EXPONENT = 2.5
BOLTZMANN_CONSTANT = 1.38e-23
TEMPERATURE_KELVIN = 290
NOISE_POWER = BOLTZMANN_CONSTANT * TEMPERATURE_KELVIN * BANDWIDTH
RICE_FACTOR = 5

# 任务参数
TASK_SIZE_BITS = [1e6, 2e6]
TASK_CPU_CYCLES = [5e8, 15e8]

# UAV飞行能耗模型参数
UAV_WEIGHT_KG = 2.0
GRAVITY = 9.81
AIR_DENSITY = 1.225
ROTOR_RADIUS = 0.4
NUM_ROTORS = 4
P_INDUCED_COEFF = UAV_WEIGHT_KG * GRAVITY * np.sqrt(
    UAV_WEIGHT_KG * GRAVITY / (2 * AIR_DENSITY * np.pi * ROTOR_RADIUS ** 2))
P_PROFILE_COEFF = 0.012
P_PARASITE_COEFF = 0.6
EFFECTIVE_SWITCHED_CAPACITANCE = 1e-27

# 权重参数
DELAY_WEIGHT = 0.7
ENERGY_WEIGHT = 0.3
DELAY_SCALE = 100.0
ENERGY_SCALE = 0.001



class Environment:
    def __init__(self):
        self.user_positions = np.random.uniform(0, AREA_SIZE, size=(NUM_USERS, 2))
        self.task_cpu_cycles = np.random.uniform(TASK_CPU_CYCLES[0], TASK_CPU_CYCLES[1], size=NUM_USERS)
        self.task_sizes = np.random.uniform(TASK_SIZE_BITS[0], TASK_SIZE_BITS[1], size=NUM_USERS)
        self.task_generating_users = np.ones(NUM_USERS, dtype=bool)
        self.uav_position = np.array([0.0, 0.0], dtype=float)
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)
        self.step_count = 0

        self.user_completion_delays = np.zeros(NUM_USERS)
        self.user_offloading_delays = np.zeros(NUM_USERS)
        self.user_computation_delays = np.zeros(NUM_USERS)
        self.user_computation_energies = np.zeros(NUM_USERS)
        self.total_flight_energy = 0

        # 【新增】记录卸载决策
        self.user_offloading_ratios = np.zeros(NUM_USERS)  # 记录每个用户的卸载比例

        self.trajectory = [self.uav_position.copy()]
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])
        self.observation_history = deque(maxlen=SEQUENCE_LENGTH)
        self.current_phase = 1
        # 【新增】用于存储第一阶段的用户配置
        self.phase_1_users = None

        self.episode_reward_breakdown = {
            'collection_reward': 0.0,
            'proximity_reward': 0.0,
            'completion_reward': 0.0,
            'cost': 0.0,
            'step_penalty': 0.0
        }

    def _calculate_rice_channel_gain(self, distance_2d, height=UAV_HEIGHT):
        """计算莱斯信道增益"""
        distance_3d = np.sqrt(distance_2d ** 2 + height ** 2)
        if distance_3d < 1.0: distance_3d = 1.0
        path_loss = CHANNEL_GAIN_REF_LINEAR * (distance_3d ** (-PATH_LOSS_EXPONENT))
        K = RICE_FACTOR
        h_los = 1.0
        h_nlos_real = np.random.normal(0, 1)
        h_nlos_imag = np.random.normal(0, 1)
        h_nlos = (h_nlos_real + 1j * h_nlos_imag) / np.sqrt(2)
        h = np.sqrt(K / (K + 1)) * h_los + np.sqrt(1 / (K + 1)) * h_nlos
        fading_gain = abs(h) ** 2
        return path_loss * fading_gain

    def _calculate_offloading_delay(self, user_index, distance_2d):
        """计算用户到无人机的卸载时延"""
        channel_gain = self._calculate_rice_channel_gain(distance_2d)
        snr = (USER_TRANSMIT_POWER * channel_gain) / NOISE_POWER
        data_rate = BANDWIDTH * np.log2(1 + snr)
        return self.task_sizes[user_index] / data_rate

    def _calculate_uav_to_bs_delay(self, task_size):
        """计算无人机到基站的传输时延"""
        bs_distance_2d = np.linalg.norm(self.uav_position - BASE_STATION_POSITION)
        # 假设无人机到基站用不同的高度差
        height_diff = abs(UAV_HEIGHT - BASE_STATION_HEIGHT)
        channel_gain = self._calculate_rice_channel_gain(bs_distance_2d, height_diff)
        snr = (USER_TRANSMIT_POWER * channel_gain) / NOISE_POWER
        data_rate = BANDWIDTH * np.log2(1 + snr)
        return task_size / data_rate

    def _calculate_completion_delay(self, user_index, offloading_ratio):
        """
        【核心修改】计算总完成时延，考虑本地和基站处理的并行处理
        offloading_ratio: 0表示全部本地处理，1表示全部卸载到基站
        """
        total_task_size = self.task_sizes[user_index]
        total_cpu_cycles = self.task_cpu_cycles[user_index]

        # 用户到无人机的卸载时延（所有任务都需要先传到无人机）
        user_to_uav_delay = self._calculate_offloading_delay(user_index,
                                                             np.linalg.norm(
                                                                 self.uav_position - self.user_positions[user_index]))

        if offloading_ratio == 0:
            # 全部本地处理
            local_computation_delay = total_cpu_cycles / UAV_COMPUTE_CAPACITY
            total_delay = user_to_uav_delay + local_computation_delay

        elif offloading_ratio == 1:
            # 全部卸载到基站
            uav_to_bs_delay = self._calculate_uav_to_bs_delay(total_task_size)
            bs_computation_delay = total_cpu_cycles / BASE_STATION_COMPUTE_CAPACITY
            total_delay = user_to_uav_delay + uav_to_bs_delay + bs_computation_delay

        else:
            # 混合处理：本地处理部分和基站处理部分并行
            local_task_size = total_task_size * (1 - offloading_ratio)
            bs_task_size = total_task_size * offloading_ratio

            local_cpu_cycles = total_cpu_cycles * (1 - offloading_ratio)
            bs_cpu_cycles = total_cpu_cycles * offloading_ratio

            # 本地处理时延
            local_computation_delay = local_cpu_cycles / UAV_COMPUTE_CAPACITY
            local_total_delay = user_to_uav_delay + local_computation_delay

            # 基站处理时延
            uav_to_bs_delay = self._calculate_uav_to_bs_delay(bs_task_size)
            bs_computation_delay = bs_cpu_cycles / BASE_STATION_COMPUTE_CAPACITY
            bs_total_delay = user_to_uav_delay + uav_to_bs_delay + bs_computation_delay

            # 并行处理，取最大值
            total_delay = max(local_total_delay, bs_total_delay)

        return total_delay


    def _calculate_flight_energy(self, distance_moved, actual_speed=None, time_delta=1.0):
        """
        【修改】使用实际速度计算飞行能耗
        """
        if actual_speed is None:
            speed = distance_moved / time_delta
        else:
            speed = actual_speed

        power = P_INDUCED_COEFF * (
                np.sqrt(1 + (speed ** 4) / (4 * P_INDUCED_COEFF ** 2)) - (speed ** 2) / (2 * P_INDUCED_COEFF)) \
                + P_PROFILE_COEFF * (1 + 3 * (speed ** 2)) \
                + 0.5 * P_PARASITE_COEFF * AIR_DENSITY * speed ** 3
        return power * time_delta

    def _calculate_computation_energy(self, user_index, offloading_ratio):
        local_cpu_cycles = self.task_cpu_cycles[user_index] * (1 - offloading_ratio)
        working_frequency = UAV_COMPUTE_CAPACITY  # cycles/s = Hz
        energy = EFFECTIVE_SWITCHED_CAPACITANCE * (working_frequency ** 2) * local_cpu_cycles
        return energy

    def update_task_generating_users(self, phase):
        """【核心修改】根据四个阶段的要求更新活跃用户"""
        self.current_phase = phase
        # 首先将所有用户重置为不活跃
        self.task_generating_users = np.zeros(NUM_USERS, dtype=bool)

        if phase == 1:
            # 阶段一: 从12个用户中随机选择10个
            indices = np.random.choice(NUM_USERS, 10, replace=False)
            self.task_generating_users[indices] = True
            # 保存第一阶段的用户配置，以便第四阶段使用
            self.phase_1_users = self.task_generating_users.copy()
        elif phase == 2:
            # 阶段二: 全部12个用户都活跃
            self.task_generating_users = np.ones(NUM_USERS, dtype=bool)
        elif phase == 3:
            # 阶段三: 从12个用户中随机选择8个
            indices = np.random.choice(NUM_USERS, 8, replace=False)
            self.task_generating_users[indices] = True
        elif phase == 4:
            # 阶段四: 使用与第一阶段完全相同的10个用户
            if self.phase_1_users is None:
                # 这是一个备用逻辑，正常训练流程中不会触发
                print("错误: 未找到第一阶段的用户数据! 将重新为第四阶段生成10个用户。")
                indices = np.random.choice(NUM_USERS, 10, replace=False)
                self.task_generating_users[indices] = True
            else:
                self.task_generating_users = self.phase_1_users.copy()

        print(f"Phase {phase}: {sum(self.task_generating_users)} users are generating tasks")
        print(f"Task generating users: {np.where(self.task_generating_users)[0]}")

    def reset(self):
        self.uav_position = np.array([0.0, 0.0], dtype=float)
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)
        self.step_count = 0
        self.prev_total_energy = 0.0
        self.prev_total_delay = 0.0
        self.user_completion_delays.fill(0)
        self.user_offloading_delays.fill(0)
        self.user_computation_delays.fill(0)
        self.user_computation_energies.fill(0)
        self.user_offloading_ratios.fill(0)
        self.total_flight_energy = 0
        self.current_speed = DEFAULT_UAV_SPEED  # 重置速度

        self.trajectory = [self.uav_position.copy()]
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])
        self.observation_history.clear()

        # 【新增】重置累计奖励分解
        for key in self.episode_reward_breakdown:
            self.episode_reward_breakdown[key] = 0.0

        initial_state = self._get_state()
        for _ in range(SEQUENCE_LENGTH):
            self.observation_history.append(initial_state)
        return self._get_gru_state()

    def step(self, action):


        """【修复】处理4维动作：[direction_x, direction_y, speed, offloading_ratio]"""

        # 前两维是移动方向
        direction_raw = action[:2]

        # 【修复】确保方向向量为单位向量（如果不为零向量）
        direction_norm = np.linalg.norm(direction_raw)
        if direction_norm > 1e-6:  # 避免除零
            movement_direction = direction_raw / direction_norm
        else:
            movement_direction = np.array([0.0, 0.0])  # 零向量表示不移动

        # 第三维是速度，映射到[MIN_UAV_SPEED, MAX_UAV_SPEED]
        speed = (action[2] + 1) / 2 * (MAX_UAV_SPEED - MIN_UAV_SPEED) + MIN_UAV_SPEED

        # 第四维是卸载比例，映射到[0,1]
        offloading_ratio = (action[3] + 1) / 2

        # 【修复】计算实际移动向量：方向 × 速度
        movement = movement_direction * speed

        prev_position = self.uav_position.copy()
        self.uav_position += movement
        self.uav_position = np.clip(self.uav_position, 0, AREA_SIZE)
        self.trajectory.append(self.uav_position.copy())

        # 【修复】实际移动距离应该等于速度（如果没有边界约束）
        actual_movement = self.uav_position - prev_position
        distance_moved = np.linalg.norm(actual_movement)

        # 【修复】使用实际移动距离计算能耗，但速度用于功率计算
        if distance_moved > 0:
            # 如果有实际移动，使用计划速度
            actual_speed = speed if direction_norm > 1e-6 else 0
            flight_energy_step = self._calculate_flight_energy(distance_moved, actual_speed=actual_speed)
        else:
            # 悬停能耗
            flight_energy_step = self._calculate_flight_energy(0, actual_speed=0)

        self.total_flight_energy += flight_energy_step

        # 记录当前速度
        self.current_speed = speed if direction_norm > 1e-6 else 0



        new_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])



        newly_collected = 0
        for i in range(NUM_USERS):
            if self.task_generating_users[i] and not self.collected_tasks[i]:
                if new_distances[i] <= MAX_DISTANCE_COLLECT:
                    self.collected_tasks[i] = True
                    newly_collected += 1

                    # 【修改】使用新的时延和能耗计算方法
                    self.user_offloading_ratios[i] = offloading_ratio
                    self.user_offloading_delays[i] = self._calculate_offloading_delay(i, new_distances[i])
                    self.user_completion_delays[i] = self._calculate_completion_delay(i, offloading_ratio)
                    self.user_computation_delays[i] = self.user_completion_delays[i] - self.user_offloading_delays[i]
                    self.user_computation_energies[i] = self._calculate_computation_energy(i, offloading_ratio)

        self.step_count += 1

        completed_indices = np.where(self.collected_tasks & self.task_generating_users)[0]

        if len(completed_indices) > 0:
            total_delay = np.sum(self.user_completion_delays[completed_indices])
            total_comp_energy = np.sum(self.user_computation_energies[completed_indices])
            avg_total_delay = np.mean(self.user_completion_delays[completed_indices])
            avg_offloading_delay = np.mean(self.user_offloading_delays[completed_indices])
            avg_computation_delay = np.mean(self.user_computation_delays[completed_indices])
        else:
            total_delay = 0.0
            total_comp_energy = 0.0
            avg_total_delay, avg_offloading_delay, avg_computation_delay = 0.0, 0.0, 0.0

        total_energy = self.total_flight_energy + total_comp_energy

        reward_info = self.calculate_reward(newly_collected, total_energy, total_delay, new_distances,
                                                      self.last_distances)
        self.prev_total_energy = total_energy
        self.prev_total_delay = total_delay
        reward = reward_info['total_reward']
        # 【新增】累计奖励分解
        for key in self.episode_reward_breakdown:
            self.episode_reward_breakdown[key] += reward_info[key]

        self.last_distances = new_distances

        total_tasks_to_collect = sum(self.task_generating_users)
        collected_required_tasks = sum(self.collected_tasks & self.task_generating_users)
        done = (self.step_count >= MAX_STEPS) or (collected_required_tasks == total_tasks_to_collect)

        self.observation_history.append(self._get_state())

        return self._get_gru_state(), reward, done, {
            "collected_required": collected_required_tasks,
            "total_required": total_tasks_to_collect,
            "energy": total_energy,
            "delay": avg_total_delay,
            "reward_breakdown": reward_info,
            "flight_energy": self.total_flight_energy,
            "comp_energy": total_comp_energy,
            "delay_breakdown": {
                "avg_offloading_delay": avg_offloading_delay,
                "avg_computation_delay": avg_computation_delay,
                "avg_total_delay": avg_total_delay,
                "total_delay": total_delay,
            },
            "episode_reward_breakdown": self.episode_reward_breakdown  # 【新增】返回累计奖励分解
        }

    def _get_state(self):
        """【修改】状态空间加入基站距离信息"""
        # 原状态维度：2 + NUM_USERS * 4 + 1
        # 新状态维度：2 + NUM_USERS * 4 + 1 + 1 (加入到基站的距离)
        state = np.zeros(2 + NUM_USERS * 4 + 1 + 1)
        state[0:2] = self.uav_position / AREA_SIZE

        for i in range(NUM_USERS):
            dist = np.linalg.norm(self.uav_position - self.user_positions[i])
            idx = 2 + i * 4
            state[idx] = dist / np.sqrt(2 * AREA_SIZE ** 2)
            state[idx + 1] = float(self.collected_tasks[i])
            state[idx + 2] = float(self.task_generating_users[i])
            state[idx + 3] = self.task_cpu_cycles[i] / TASK_CPU_CYCLES[1]

        state[-2] = self.step_count / MAX_STEPS

        # 【新增】无人机到基站的距离
        bs_distance = np.linalg.norm(self.uav_position - BASE_STATION_POSITION)
        state[-1] = bs_distance / np.sqrt(2 * AREA_SIZE ** 2)

        return state

    def _get_gru_state(self):
        while len(self.observation_history) < SEQUENCE_LENGTH:
            self.observation_history.append(self._get_state())
        return np.array(list(self.observation_history))

    def calculate_reward(self, newly_collected, total_energy, total_delay, new_distances, old_distances):

        # 进度信息
        collected_required = sum(self.collected_tasks & self.task_generating_users)
        total_required = sum(self.task_generating_users) if sum(self.task_generating_users) > 0 else 1
        completion_rate = collected_required / total_required
        done = (self.step_count >= MAX_STEPS) or (collected_required == total_required)
        progress = self.step_count / MAX_STEPS
        # 1) 接近奖励：靠近最近未收集用户
        proximity_reward = 0.0
        uncollected_indices = np.where(self.task_generating_users & ~self.collected_tasks)[0]
        if len(uncollected_indices) > 0:
            d_old = old_distances[uncollected_indices]
            d_new = new_distances[uncollected_indices]
            k = np.argmin(d_new)
            dist_diff = d_old[k] - d_new[k]
            proximity_reward = dist_diff * (0.24 if d_new[k] < 50 else 0.12)

        # 2) 收集奖励：越早收集越高
        time_factor = max(0.0, 1.0 - progress)
        collection_reward = newly_collected * 30.0 * (1.0 + 0.5 * time_factor)

        # 3) 步惩罚：随时间线性增加（小幅）
        step_penalty = 0.15 + 0.3 * progress

        # 4) 即时成本 shaping（关键，降低稀疏性与方差）
        prev_E = getattr(self, "prev_total_energy", 0.0)
        prev_D = getattr(self, "prev_total_delay", 0.0)
        delta_energy = max(0.0, total_energy - prev_E)
        delta_delay = max(0.0, total_delay - prev_D)
        # 小权重即时惩罚（不会盖过收集奖励）
        shaping_cost = (DELAY_WEIGHT * 20.0 * delta_delay) + (ENERGY_WEIGHT * 0.0005 * delta_energy)

        # 5) 终止奖励与终止成本
        completion_reward = 0.0
        terminal_cost = 0.0
        if done:
            step_eff = max(0.1, 1.0 - progress)
            base_completion = completion_rate * 400.0
            bonus = 800.0 if completion_rate == 1.0 else 0.0  # 略低于原来的 1000
            completion_reward = (base_completion + bonus) * step_eff

            if collected_required > 0:
                # 略低于你原来的权重，减少回报方差
                delay_penalty = total_delay * DELAY_WEIGHT * 80.0
                energy_penalty = total_energy * ENERGY_WEIGHT * 0.0010
                terminal_cost = (delay_penalty + energy_penalty) / collected_required

        # 6) 总和
        total_reward = (collection_reward +
                        proximity_reward +
                        completion_reward -
                        step_penalty -
                        shaping_cost -
                        terminal_cost)

        return {
            "total_reward": total_reward * REWARD_SCALE,
            "proximity_reward": proximity_reward * REWARD_SCALE,
            "collection_reward": collection_reward * REWARD_SCALE,
            "completion_reward": completion_reward * REWARD_SCALE,
            # 兼容你现有的打印：cost 包含即时 + 终止成本（为负号）
            "cost": -(shaping_cost + terminal_cost) * REWARD_SCALE,
            "step_penalty": -step_penalty * REWARD_SCALE
        }


    def render(self, episode=0, clear_output=True):
        """【修改】可视化中加入基站显示"""
        plt.figure(figsize=(10, 10))

        # 【新增】在标题中显示当前速度
        title = f"Episode {episode}, Step {self.step_count}\n"
        title += f"收集: {sum(self.collected_tasks & self.task_generating_users)}/{sum(self.task_generating_users)} 任务"
        if hasattr(self, 'current_speed'):
            title += f", 当前速度: {self.current_speed:.1f} m/s"
        plt.title(title)

        # 【新增】绘制基站
        plt.scatter(BASE_STATION_POSITION[0], BASE_STATION_POSITION[1],
                    s=300, c='orange', marker='s', label='Base Station', edgecolors='black', linewidth=2)
        plt.annotate('BS\n(MEC)', BASE_STATION_POSITION,
                     textcoords="offset points", xytext=(0, -25),
                     ha='center', fontsize=10, fontweight='bold')

        for i, pos in enumerate(self.user_positions):
            if self.task_generating_users[i]:
                color = 'green' if self.collected_tasks[i] else 'red'
            else:
                color = 'gray'
            plt.scatter(pos[0], pos[1], s=100, c=color)

            # 【修改】显示卸载比例信息
            if self.task_generating_users[i] and self.collected_tasks[i]:
                offload_ratio = self.user_offloading_ratios[i]
                task_info = f"{i + 1}\n{self.task_sizes[i] / 1e6:.1f}Mb\n{self.task_cpu_cycles[i] / 1e9:.1f}Gcy\nOffload:{offload_ratio:.2f}"
            else:
                task_info = f"{i + 1}\n{self.task_sizes[i] / 1e6:.1f}Mb\n{self.task_cpu_cycles[i] / 1e9:.1f}Gcy"

            plt.annotate(task_info, (pos[0], pos[1]), fontsize=8, ha='center', va='bottom')

        trajectory = np.array(self.trajectory)
        plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', alpha=0.5)
        plt.scatter(self.uav_position[0], self.uav_position[1], s=200, c='blue', marker='*')
        circle = plt.Circle((self.uav_position[0], self.uav_position[1]), MAX_DISTANCE_COLLECT, color='blue',
                            fill=False, alpha=0.3)
        plt.gca().add_patch(circle)

        # 【新增】显示无人机到基站的连线
        plt.plot([self.uav_position[0], BASE_STATION_POSITION[0]],
                 [self.uav_position[1], BASE_STATION_POSITION[1]],
                 'orange', linestyle='--', alpha=0.5, linewidth=1)

        plt.xlim(0, AREA_SIZE)
        plt.ylim(0, AREA_SIZE)
        title = f"Episode {episode}, Step {self.step_count}\n"
        title += f"收集: {sum(self.collected_tasks & self.task_generating_users)}/{sum(self.task_generating_users)} 任务"
        plt.title(title)
        plt.grid(True)
        plt.legend()
        plt.savefig(f"results/step_{episode}_{self.step_count}.png")
        plt.close()


class GRUActor(nn.Module):
    def __init__(self, state_dim, action_dim, max_action):
        super(GRUActor, self).__init__()
        self.state_dim = state_dim
        self.seq_len = SEQUENCE_LENGTH
        self.hidden_size = HIDDEN_SIZE
        self.gru = nn.GRU(input_size=state_dim, hidden_size=self.hidden_size, num_layers=1, batch_first=True)
        self.layer1 = nn.Linear(self.hidden_size, 256)
        self.layer2 = nn.Linear(256, 128)
        self.layer3 = nn.Linear(128, action_dim)
        self.max_action = max_action
        self.ln1 = nn.LayerNorm(256)
        self.ln2 = nn.LayerNorm(128)
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0.0)

    def forward(self, state, h=None):
        # state: (B, T, state_dim); h: (1, B, hidden) or None
        if h is None:
            h = torch.zeros(1, state.size(0), self.hidden_size, device=state.device)
        gru_out, h_next = self.gru(state, h)
        x = gru_out[:, -1]
        x = self.ln1(torch.relu(self.layer1(x)))
        x = self.ln2(torch.relu(self.layer2(x)))
        action = torch.tanh(self.layer3(x)) * self.max_action
        return action, h_next

class SD3DistributionalCritic(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(SD3DistributionalCritic, self).__init__()
        self.state_dim = state_dim
        self.seq_len = SEQUENCE_LENGTH
        self.hidden_size = HIDDEN_SIZE
        self.num_quantiles = NUM_QUANTILES
        # Q1
        self.q1_gru = nn.GRU(input_size=state_dim, hidden_size=self.hidden_size, num_layers=1, batch_first=True)
        self.q1_layer1 = nn.Linear(self.hidden_size + action_dim, 256)
        self.q1_layer2 = nn.Linear(256, 128)
        self.q1_output = nn.Linear(128, self.num_quantiles)
        self.q1_ln1 = nn.LayerNorm(256)
        self.q1_ln2 = nn.LayerNorm(128)

        # Q2
        self.q2_gru = nn.GRU(input_size=state_dim, hidden_size=self.hidden_size, num_layers=1, batch_first=True)
        self.q2_layer1 = nn.Linear(self.hidden_size + action_dim, 256)
        self.q2_layer2 = nn.Linear(256, 128)
        self.q2_output = nn.Linear(128, self.num_quantiles)
        self.q2_ln1 = nn.LayerNorm(256)
        self.q2_ln2 = nn.LayerNorm(128)

        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0.01)

    def forward(self, state, action, h1=None, h2=None):
        # state: (B, T, D)
        B = state.size(0)
        if h1 is None:
            h1 = torch.zeros(1, B, self.hidden_size, device=state.device)
        if h2 is None:
            h2 = torch.zeros(1, B, self.hidden_size, device=state.device)

        q1_gru_out, h1_next = self.q1_gru(state, h1)
        q2_gru_out, h2_next = self.q2_gru(state, h2)

        q1_state = q1_gru_out[:, -1]
        q2_state = q2_gru_out[:, -1]

        q1_x = torch.cat([q1_state, action], dim=1)
        q2_x = torch.cat([q2_state, action], dim=1)

        q1 = self.q1_ln1(torch.relu(self.q1_layer1(q1_x)))
        q1 = self.q1_ln2(torch.relu(self.q1_layer2(q1)))
        q1_quantiles = self.q1_output(q1)

        q2 = self.q2_ln1(torch.relu(self.q2_layer1(q2_x)))
        q2 = self.q2_ln2(torch.relu(self.q2_layer2(q2)))
        q2_quantiles = self.q2_output(q2)

        return q1_quantiles, q2_quantiles, h1_next, h2_next

    def Q1(self, state, action, h=None):
        B = state.size(0)
        if h is None:
            h = torch.zeros(1, B, self.hidden_size, device=state.device)
        q1_gru_out, _ = self.q1_gru(state, h)
        q1_state = q1_gru_out[:, -1]
        q1_x = torch.cat([q1_state, action], dim=1)
        q1 = self.q1_ln1(torch.relu(self.q1_layer1(q1_x)))
        q1 = self.q1_ln2(torch.relu(self.q1_layer2(q1)))
        q1_quantiles = self.q1_output(q1)
        median_idx = NUM_QUANTILES // 2
        return q1_quantiles[:, median_idx:median_idx + 1]




def quantile_huber_loss(pred, target, taus, kappa=KAPPA):
    """
    pred/target: (B, N), taus: (N,)
    标准 QR-DQN pairwise 量化 Huber 损失。
    """
    B, N = pred.shape
    taus = taus.to(pred.device)
    pred = pred.unsqueeze(2)  # (B, N, 1)
    target = target.unsqueeze(1)  # (B, 1, N)
    u = target - pred  # (B, N, N)

    abs_u = torch.abs(u)
    huber = torch.where(abs_u <= kappa,
                        0.5 * u.pow(2),
                        kappa * (abs_u - 0.5 * kappa))
    tau = taus.view(1, N, 1)
    weight = torch.abs(tau - (u.detach() < 0).float())
    loss = (weight * huber).mean()
    return loss


# ReplayBuffer (保持不变)
class ReplayBuffer:
    def __init__(self, max_size=BUFFER_SIZE):
        self.buffer = deque(maxlen=max_size)

    def add(self, state, action, reward, next_state, done):
        self.buffer.append((state, action, reward, next_state, done))

    def sample(self, batch_size):
        batch = random.sample(self.buffer, min(len(self.buffer), batch_size))
        state, action, reward, next_state, done = map(
            lambda x: np.stack(x).astype(np.float32), zip(*batch)
        )
        return state, action, reward, next_state, done

    def __len__(self):
        return len(self.buffer)


# EWC (保持不变)
class EWC:
    def __init__(self, model, fisher_sample_size=FISHER_SAMPLE_SIZE):
        self.model = model
        self.fisher_sample_size = fisher_sample_size
        self.importance = {}
        self.old_params = {}
        self.fisher_diagonal = {}

    def _calculate_fisher_info(self, replay_buffer):
        fisher = {}
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                fisher[name] = torch.zeros_like(param).to(device)
        self.model.train()
        samples_count = min(self.fisher_sample_size, len(replay_buffer))
        if samples_count <= 0:
            return fisher

        for _ in range(samples_count):
            states, actions, _, _, _ = replay_buffer.sample(1)
            states = torch.FloatTensor(states).to(device)
            actions = torch.FloatTensor(actions).to(device)
            self.model.zero_grad()

            if isinstance(self.model, GRUActor):
                if hasattr(self.model, 'sample'):
                    _, _, mean = self.model.sample(states)  # 若有 SAC Actor
                    loss = ((mean - actions) ** 2).mean()
                else:
                    outputs, _ = self.model(states)  # 返回 (action, h_next)
                    loss = ((outputs - actions) ** 2).mean()
            else:
                # 分布式 Critic
                q1, q2, _, _ = self.model(states, actions)
                loss = 0.5 * (q1.mean() + q2.mean())

            loss.backward()
            for name, param in self.model.named_parameters():
                if param.requires_grad and param.grad is not None:
                    fisher[name] += param.grad.pow(2) / samples_count
        return fisher

    def store_task_parameters(self, task_id, replay_buffer):
        print(f"Storing parameters for task {task_id} and computing Fisher information matrix")
        self.old_params = {}
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                self.old_params[name] = param.data.clone()
        self.importance = self._calculate_fisher_info(replay_buffer)
        print(f"Stored {len(self.old_params)} parameters and computed Fisher matrices")

    def calculate_ewc_loss(self, lam=EWC_LAMBDA):
        loss = 0
        if not self.old_params or not self.importance: return loss
        for name, param in self.model.named_parameters():
            if name in self.old_params and name in self.importance and param.requires_grad:
                loss += torch.sum(self.importance[name] * (param - self.old_params[name]).pow(2))
        return lam * loss


# 【核心修改】SD3算法类
class SD3:
    def __init__(self, state_dim, action_dim, max_action):
        # Actor网络（保持不变）
        self.actor = GRUActor(state_dim, action_dim, max_action).to(device)
        self.actor_target = GRUActor(state_dim, action_dim, max_action).to(device)
        self.actor_target.load_state_dict(self.actor.state_dict())
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=ACTOR_LR)

        # 【核心修改】使用分布式Critic网络
        self.critic = SD3DistributionalCritic(state_dim, action_dim).to(device)
        self.critic_target = SD3DistributionalCritic(state_dim, action_dim).to(device)
        self.critic_target.load_state_dict(self.critic.state_dict())
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=CRITIC_LR)

        self.max_action = max_action
        self.memory = ReplayBuffer()
        self.policy_noise = 0.2 * max_action
        self.noise_clip = 0.5 * max_action
        self.policy_freq = 2
        self.total_it = 0

        # EWC组件
        self.ewc_actor = EWC(self.actor)
        self.ewc_critic = EWC(self.critic)
        self.current_task = 1

        # CLEAR: 冻结教师策略 + 历史回放库
        self.teacher_actor = copy.deepcopy(self.actor).to(device)
        self.teacher_actor.eval()
        for p in self.teacher_actor.parameters():
            p.requires_grad = False
        self.old_memory = ReplayBuffer(max_size=BUFFER_SIZE)
        self.bc_alpha = BC_ALPHA
        self.old_batch_ratio = OLD_BATCH_RATIO

        # SI: 在线路径积分（仅对 Actor）
        self.si_epsilon = SI_EPSILON
        self.si_lambda = SI_LAMBDA
        self._init_si_actor()

        # 学习率调度器
        self.actor_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.actor_optimizer, mode='max', factor=0.5, patience=100, verbose=True)
        self.critic_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.critic_optimizer, mode='max', factor=0.5, patience=100, verbose=True)
        self.act_hidden = None

    def _init_si_actor(self):
        self.si_actor = {}

        for name, p in self.actor.named_parameters():
            if p.requires_grad:
                self.si_actor[name] = {
                    'theta': p.data.clone().detach(),
                    'omega': torch.zeros_like(p.data),
                    'w': torch.zeros_like(p.data),
                }

    def _si_actor_penalty(self):
        reg = 0.0
        for name, p in self.actor.named_parameters():
            if p.requires_grad:
                d = p - self.si_actor[name]['theta']
                reg = reg + (self.si_actor[name]['omega'] * (d ** 2)).sum()
        return reg

    def _si_actor_accumulate(self, prev_params):

        # 在一次 actor 更新后调用：累积 w += -grad * delta
        for name, p in self.actor.named_parameters():
            if p.requires_grad and p.grad is not None:
                delta = p.data - prev_params[name]
                self.si_actor[name]['w'] += (-p.grad * delta).detach()

    def consolidate_si(self):
        # 在阶段切换时调用：把 w 归一化累加进 omega，并重置锚点
        for name, p in self.actor.named_parameters():
            if p.requires_grad:
                delta = p.data - self.si_actor[name]['theta']
                denom = self.si_epsilon + (delta ** 2)
                self.si_actor[name]['omega'] += (self.si_actor[name]['w'] / denom).detach()
                self.si_actor[name]['theta'] = p.data.clone().detach()
                self.si_actor[name]['w'].zero_()

    def select_action(self, state, noise_scale=EXPLORATION_NOISE_START, reset_hidden=False):
        if len(state.shape) == 2:
            state = np.expand_dims(state, 0)

        state_t = torch.FloatTensor(state).to(device)
        if reset_hidden or self.act_hidden is None or (
                self.act_hidden is not None and self.act_hidden.size(1) != state_t.size(0)):
            self.act_hidden = None  # 让 forward 内部用零隐状态

        with torch.no_grad():
            act_t, self.act_hidden = self.actor(state_t, self.act_hidden)

        action = act_t.cpu().data.numpy().flatten()
        if noise_scale > 0:
            noise = np.zeros_like(action)
            # 仅对方向与速度加较大噪声
            noise[:3] = np.random.normal(0, self.max_action * noise_scale, size=3)
            # 卸载比例维度小噪声
            noise[3] = np.random.normal(0, self.max_action * min(noise_scale, 0.05))
            action = action + noise
        return np.clip(action, -self.max_action, self.max_action)

    def switch_task(self, task_id):
        print(f"\nSwitching to task {task_id}")
        # 1) SI：在切换点固化当前阶段路径积分
        self.consolidate_si()

        # 2) CLEAR：把当前回放尾段拷贝进旧库，保鲜历史经验
        prev = list(self.memory.buffer)
        if len(prev) > 0:
            keep_old = min(OLD_REPLAY_TAIL, len(prev))
            tail_old = prev[-keep_old:]
            # 扩充旧库（超长会自动丢弃老的）
            self.old_memory.buffer.extend(tail_old)

        # 3) 新库保留一小段尾样本作为热启动，避免彻底冷启动
        keep_new = min(KEEP_NEW_TAIL, len(prev))
        self.memory.buffer = deque(prev[-keep_new:], maxlen=BUFFER_SIZE)

        # 4) 刷新教师策略（冻结）
        self.teacher_actor = copy.deepcopy(self.actor).to(device)
        self.teacher_actor.eval()
        for p in self.teacher_actor.parameters():
            p.requires_grad = False

        # 5) 硬同步 target，重置学习率与调度器
        self.actor_target.load_state_dict(self.actor.state_dict())
        self.critic_target.load_state_dict(self.critic.state_dict())
        for g in self.actor_optimizer.param_groups:
            g['lr'] = ACTOR_LR
        for g in self.critic_optimizer.param_groups:
            g['lr'] = CRITIC_LR
        self.actor_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.actor_optimizer, mode='max', factor=0.5, patience=100, verbose=True)
        self.critic_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.critic_optimizer, mode='max', factor=0.5, patience=100, verbose=True)

        self.current_task = task_id
        self.act_hidden = None
        print(f"Target synced, LR reset, teacher refreshed, replay kept tail. New task {task_id} ready.")

    # def switch_task(self, task_id):
    #     print(f"\nSwitching to task {task_id}")
    #     if self.current_task > 0 and len(self.memory) > 0:
    #         self.ewc_actor.store_task_parameters(self.current_task, self.memory)
    #         self.ewc_critic.store_task_parameters(self.current_task, self.memory)
    #     print(f"Clearing replay buffer for new task.")
    #     self.memory.buffer.clear()
    #     self.current_task = task_id
    #     print(f"Reset GRU states for new task {task_id}")
    #     self.act_hidden = None


    def train(self):
        self.total_it += 1

        if len(self.memory) < BATCH_SIZE:
            return

        state, action, reward, next_state, done = self.memory.sample(BATCH_SIZE)
        state = torch.FloatTensor(state).to(device)
        action = torch.FloatTensor(action).to(device)
        reward = torch.FloatTensor(reward.reshape(-1, 1)).to(device)
        next_state = torch.FloatTensor(next_state).to(device)
        done = torch.FloatTensor(done.reshape(-1, 1)).to(device)

        with torch.no_grad():
            # 目标动作（零隐状态）
            next_action_t, _ = self.actor_target(next_state, None)

            # 分维度噪声
            noise = torch.zeros_like(action)
            noise[:, :3].normal_(0, self.policy_noise)
            noise[:, :3] = noise[:, :3].clamp(-self.noise_clip, self.noise_clip)
            noise[:, 3].normal_(0, 0.05 * self.max_action)
            noise[:, 3] = noise[:, 3].clamp(-0.1 * self.max_action, 0.1 * self.max_action)

            next_action = (next_action_t + noise).clamp(-self.max_action, self.max_action)

            # 目标分位数
            target_q1_quantiles, target_q2_quantiles, _, _ = self.critic_target(next_state, next_action)
            target_q_quantiles = torch.min(target_q1_quantiles, target_q2_quantiles)

            reward_expanded = reward.expand(-1, NUM_QUANTILES)
            done_expanded = done.expand(-1, NUM_QUANTILES)
            target_quantiles = reward_expanded + (1 - done_expanded) * GAMMA * target_q_quantiles

        # 当前分位数
        current_q1_quantiles, current_q2_quantiles, _, _ = self.critic(state, action)

        critic_loss_q1 = quantile_huber_loss(current_q1_quantiles, target_quantiles, QUANTILE_TAU)
        critic_loss_q2 = quantile_huber_loss(current_q2_quantiles, target_quantiles, QUANTILE_TAU)
        critic_loss = critic_loss_q1 + critic_loss_q2

        critic_ewc_loss = 0
        if self.current_task > 1:
            critic_ewc_loss = self.ewc_critic.calculate_ewc_loss()
            critic_loss += critic_ewc_loss

        self.critic_optimizer.zero_grad()
        critic_loss.backward()
        torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 1.0)
        self.critic_optimizer.step()

        actor_loss = 0
        actor_ewc_loss = 0
        report_actor_loss = 0.0  # 新增：用于返回总的策略损失值

        if self.total_it % self.policy_freq == 0:
            # 策略用零隐状态
            actor_actions, _ = self.actor(state, None)
            actor_loss = -self.critic.Q1(state, actor_actions).mean()

            # === CLEAR: 对历史样本做教师策略行为克隆 ===
            bc_loss = 0.0
            bc_batch_size = 0
            if len(self.old_memory) > 0:
                bc_batch_size = int(BATCH_SIZE * self.old_batch_ratio)
                old_s, _, _, _, _ = self.old_memory.sample(bc_batch_size)
                old_s = torch.FloatTensor(old_s).to(device)
                with torch.no_grad():
                    teacher_a, _ = self.teacher_actor(old_s, None)
                curr_a, _ = self.actor(old_s, None)
                bc_loss = torch.mean((curr_a - teacher_a) ** 2)

            # === SI: 抗遗忘正则 ===
            si_reg = self._si_actor_penalty()

            # 合并损失
            total_actor_loss = actor_loss \
                               + (self.bc_alpha * bc_loss if bc_batch_size > 0 else 0.0) \
                               + (self.si_lambda * si_reg)

            # 记录 step 前参数用于 SI 累积
            prev_params = {n: p.data.clone() for n, p in self.actor.named_parameters() if p.requires_grad}
            report_actor_loss = total_actor_loss.item()
            self.actor_optimizer.zero_grad()
            total_actor_loss.backward()
            torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 1.0)
            self.actor_optimizer.step()

            # SI：累积路径积分
            self._si_actor_accumulate(prev_params)

            # 软更新 target（保持不变）
            for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)
            for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)

        return {
            "critic_loss": critic_loss.item(),
            "actor_loss": report_actor_loss,
            "critic_ewc_loss": critic_ewc_loss.item() if isinstance(critic_ewc_loss, torch.Tensor) else critic_ewc_loss,
            "actor_ewc_loss": actor_ewc_loss.item() if isinstance(actor_ewc_loss, torch.Tensor) else actor_ewc_loss
        }

    def update_lr_schedulers(self, reward):
        self.actor_scheduler.step(reward)
        self.critic_scheduler.step(reward)


# 训练函数（保持完全一致的输出格式）
def train():
    os.makedirs("results", exist_ok=True)
    env = Environment()

    # 【修改】状态维度和动作维度
    state_dim = 2 + NUM_USERS * 4 + 1 + 1  # 加入基站距离信息
    action_dim = 4  # x_movement, y_movement,speed, offloading_ratio
    max_action = 1

    # 【核心修改】使用SD3算法
    agent = SD3(state_dim, action_dim, max_action)
    total_episodes = 800
    episodes_per_task = 200
    eval_freq = 50

    rewards_history = []
    smoothed_rewards = []
    collection_history = []
    energy_history = []
    delay_history = []
    best_reward = -float('inf')
    best_collection = 0
    losses = {"critic": [], "actor": []}

    start_time = time.time()
    for phase in range(1, 5):
        env.update_task_generating_users(phase)
        agent.switch_task(phase)
        # 每个阶段都使用相同的噪声衰减方式，从0.2降到0.1
        phase_noise = np.linspace(EXPLORATION_NOISE_START, EXPLORATION_NOISE_END, episodes_per_task)

        for episode in range(1, episodes_per_task + 1):
            global_episode = (phase - 1) * episodes_per_task + episode
            state = env.reset()
            agent.act_hidden = None
            episode_reward = 0
            last_collection = 0
            episode_losses = {"critic": [], "actor": []}
            current_noise = phase_noise[episode - 1]

            for step in range(1, MAX_STEPS + 1):
                action = agent.select_action(state, noise_scale=current_noise)
                next_state, reward, done, info = env.step(action)
                agent.memory.add(state, action, reward, next_state, done)
                loss_info = agent.train()
                if loss_info:
                    episode_losses["critic"].append(loss_info["critic_loss"])
                    episode_losses["actor"].append(loss_info["actor_loss"])

                    # 新增：保存最近的EWC损失用于打印
                    agent.last_ewc_losses = {
                        'actor_ewc': loss_info.get("actor_ewc_loss", 0.0),
                        'critic_ewc': loss_info.get("critic_ewc_loss", 0.0)
                    }

                state = next_state
                episode_reward += reward
                last_collection = info["collected_required"]
                if done:
                    if global_episode % eval_freq == 0:
                        print(f"--- Episode {global_episode} finished. Generating final trajectory plot. ---")
                        env.render(global_episode)
                    break

            rewards_history.append(episode_reward)
            collection_history.append(last_collection)
            energy_history.append(info["energy"])
            delay_history.append(info["delay"])

            if len(rewards_history) >= 10:
                smoothed_rewards.append(np.mean(rewards_history[-10:]))
            else:
                smoothed_rewards.append(episode_reward)
            if episode_losses["critic"]: losses["critic"].append(np.mean(episode_losses["critic"]))
            if episode_losses["actor"]: losses["actor"].append(np.mean(episode_losses["actor"]))
            agent.update_lr_schedulers(smoothed_rewards[-1])

            current_required = info["total_required"]
            collection_ratio = last_collection / current_required if current_required > 0 else 0
            if collection_ratio > best_collection or (
                    collection_ratio == best_collection and episode_reward > best_reward):
                best_reward = episode_reward
                best_collection = collection_ratio
                torch.save(agent.actor.state_dict(), f"results/best_actor_phase_{phase}.pth")

            elapsed_time = time.time() - start_time

            collected_required = info.get("collected_required", 0)
            total_required = info.get("total_required", 1)

            # 计算平均卸载比例
            avg_offloading_ratio = 0.0
            completed_tasks_with_offload = []
            for i in range(NUM_USERS):
                if env.task_generating_users[i] and env.collected_tasks[i]:
                    completed_tasks_with_offload.append(env.user_offloading_ratios[i])

            if completed_tasks_with_offload:
                avg_offloading_ratio = np.mean(completed_tasks_with_offload)

            # 计算平均EWC损失
            avg_actor_loss = np.mean(episode_losses["actor"]) if episode_losses["actor"] else 0.0
            avg_critic_loss = np.mean(episode_losses["critic"]) if episode_losses["critic"] else 0.0

            # 计算平均EWC损失组件
            avg_actor_ewc_loss = 0.0
            avg_critic_ewc_loss = 0.0
            if episode_losses["actor"] and len(episode_losses) > 0:
                # 获取最近一次训练的EWC损失（从loss_info中）
                if hasattr(agent, 'last_ewc_losses'):
                    avg_actor_ewc_loss = agent.last_ewc_losses.get('actor_ewc', 0.0)
                    avg_critic_ewc_loss = agent.last_ewc_losses.get('critic_ewc', 0.0)

            # 在train()函数中，修改奖励分解字符串的构建
            reward_str = ""
            if 'reward_breakdown' in info:
                rb = info['reward_breakdown']
                # 【修复】显示所有奖励分解项
                reward_str = (f"Rwd(Col:{rb['collection_reward']:.1f} "
                              f"Pro:{rb['proximity_reward']:.1f} "
                              f"Comp:{rb['completion_reward']:.1f} "
                              f"Cost:{rb['cost']:.1f} "
                              f"Step:{rb['step_penalty']:.1f} ")

            # 构建能耗分解字符串
            energy_str = ""
            if 'flight_energy' in info and 'comp_energy' in info:
                energy_str = f"E(Flight:{info['flight_energy']:.1f} Comp:{info['comp_energy']:.1f})"

            # 构建延迟分解字符串
            delay_str = ""
            if 'delay_breakdown' in info:
                db = info['delay_breakdown']
                delay_str = f"D(Tot:{db['total_delay']:.2f}s AvgComp:{db['avg_computation_delay']:.3f}s AvgOff:{db['avg_offloading_delay']:.3f}s)"
            # 【修改】在episode结束后使用累计奖励分解
            rb = info["episode_reward_breakdown"]  # 获取累计奖励分解

            # 构建累计奖励分解字符串
            reward_str = (f"Pro:{rb['proximity_reward']:.1f} "
                          f"Col:{rb['collection_reward']:.1f} "
                          f"Comp:{rb['completion_reward']:.1f} "
                          f"Cost:{rb['cost']:.1f} "
                          f"Step:{rb['step_penalty']:.1f}")

            # 修改后的打印语句
            print(
                f"P:{phase} Ep {episode:3d}/{episodes_per_task} "
                f"Tasks:{collected_required:2d}/{total_required:2d} "
                f"Steps:{env.step_count:3d} "
                f"Speed:{env.current_speed:.1f} m/s "
                f"Noise:{current_noise:.3f} "  # 新增：探索噪声
                f"AvgOffload: {avg_offloading_ratio:.2f} "  # 新增：平均卸载比例
                f"Loss(A/C/EWC_A/EWC_C) {avg_actor_loss:.3f}/{avg_critic_loss:.3f}/{avg_actor_ewc_loss:.3f}/{avg_critic_ewc_loss:.3f} | "  # 修改：包含EWC损失
                f"Total Rwd: {episode_reward:.2f} "
                f"[{reward_str}] | "
                f"Total E: {info.get('energy', 0):.1f} "
                f"[{energy_str}] | "
                f"Avg D: {info.get('delay', 0):.3f}s "
                f"[{delay_str}] | "
                f"Time: {elapsed_time:.1f}s"
            )

            if global_episode % eval_freq == 0 or global_episode == total_episodes:
                # 【保持完全一致的图表绘制】
                plt.figure(figsize=(25, 5))

                plt.subplot(1, 5, 1)
                plt.plot(rewards_history, alpha=0.3, color='blue', label='Raw')
                plt.plot(smoothed_rewards, color='red', label='Smoothed')
                plt.axvline(x=episodes_per_task, color='green', linestyle='--', label='Phase 1->2')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--', label='Phase 2->3')
                plt.axvline(x=3 * episodes_per_task, color='orange', linestyle='--', label='Phase 3->4')

                plt.title("Reward")
                plt.xlabel("Episode")
                plt.ylabel("Reward")
                plt.legend()
                plt.grid(True)

                plt.subplot(1, 5, 2)
                plt.plot(collection_history)
                plt.axvline(x=episodes_per_task, color='green', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.title("Collected Tasks")
                plt.xlabel("Episode")
                plt.ylabel("Number of Tasks")
                plt.grid(True)

                plt.subplot(1, 5, 3)
                plt.plot(energy_history)
                plt.axvline(x=episodes_per_task, color='green', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.title("Total Energy")
                plt.xlabel("Episode")
                plt.ylabel("Energy")
                plt.grid(True)

                plt.subplot(1, 5, 4)
                plt.plot(delay_history)
                plt.axvline(x=episodes_per_task, color='green', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.title("Avg Delay")
                plt.xlabel("Episode")
                plt.ylabel("Delay (s)")
                plt.grid(True)

                plt.subplot(1, 5, 5)
                if losses["critic"]: plt.plot(losses["critic"], label='Critic Loss')
                if losses["actor"]: plt.plot(losses["actor"], label='Actor Loss')
                plt.axvline(x=episodes_per_task, color='green', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.title("Training Loss")
                plt.xlabel("Episode")
                plt.ylabel("Loss")
                plt.legend()
                plt.grid(True)

                plt.tight_layout()
                plt.savefig(f"results/training_curves_episode_{global_episode}.png")
                plt.close()

                torch.save({
                    'actor_state_dict': agent.actor.state_dict(),
                    'critic_state_dict': agent.critic.state_dict(),
                    'actor_optimizer': agent.actor_optimizer.state_dict(),
                    'critic_optimizer': agent.critic_optimizer.state_dict(),
                    'episode': global_episode,
                    'phase': phase,
                    'rewards_history': rewards_history,
                    'collection_history': collection_history,
                    'energy_history': energy_history,
                    'delay_history': delay_history,
                    'best_reward': best_reward,
                    'best_collection': best_collection
                }, f"results/checkpoint_episode_{global_episode}.pt")

        torch.save(agent.actor.state_dict(), f"results/actor_phase_{phase}.pth")
        torch.save(agent.critic.state_dict(), f"results/critic_phase_{phase}.pth")

    print(f"Training completed! Best result: {best_collection * 100:.1f}% tasks, Reward: {best_reward:.2f}")
    return agent, env


# 测试和可视化函数（保持不变）
def test_and_visualize(agent, env, model_path="results/actor_phase_4.pth", phase=4):
    agent.actor.load_state_dict(torch.load(model_path))
    agent.actor.eval()
    env.update_task_generating_users(phase)
    state = env.reset()
    agent.act_hidden = None
    total_reward = 0
    step_rewards = []
    trajectory = [env.uav_position.copy()]
    collection_times = np.zeros(NUM_USERS)
    collection_order = []

    for step in range(1, MAX_STEPS + 1):
        action = agent.select_action(state, noise_scale=0)
        trajectory.append(env.uav_position.copy())
        collected_before = env.collected_tasks.copy()
        next_state, reward, done, info = env.step(action)
        for i in range(NUM_USERS):
            if env.task_generating_users[i] and env.collected_tasks[i] and not collected_before[i]:
                collection_times[i] = step
                collection_order.append(i)
        total_reward += reward
        step_rewards.append(reward)
        state = next_state
        if step % 5 == 0 or done:
            env.render(step)
        if done:
            break

    trajectory = np.array(trajectory)
    plt.figure(figsize=(12, 10))

    # 【新增】绘制基站
    plt.scatter(BASE_STATION_POSITION[0], BASE_STATION_POSITION[1],
                s=300, c='orange', marker='s', label='Base Station', edgecolors='black', linewidth=2)
    plt.annotate('BS\n(MEC)', BASE_STATION_POSITION,
                 textcoords="offset points", xytext=(0, -25),
                 ha='center', fontsize=10, fontweight='bold')

    for i, (x, y) in enumerate(env.user_positions):
        if env.task_generating_users[i]:
            if env.collected_tasks[i]:
                color = 'green'
                plt.scatter(x, y, s=150, c=color, marker='o')
                # 【新增】显示卸载比例
                offload_ratio = env.user_offloading_ratios[i]
                plt.annotate(f"用户 {i + 1}\n(步数 {int(collection_times[i])})\nOffload: {offload_ratio:.2f}",
                             (x, y), textcoords="offset points", xytext=(0, 10),
                             ha='center', fontsize=10)
            else:
                color = 'red'
                plt.scatter(x, y, s=150, c=color, marker='o')
                plt.annotate(f"用户 {i + 1}\n(未收集)", (x, y), textcoords="offset points",
                             xytext=(0, 10), ha='center', fontsize=10)
        else:
            color = 'gray'
            plt.scatter(x, y, s=100, c=color, marker='o')
            plt.annotate(f"用户 {i + 1}\n(不产生任务)", (x, y), textcoords="offset points",
                         xytext=(0, 10), ha='center', fontsize=10)

    plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', label='UAV轨迹', alpha=0.7)
    plt.scatter(trajectory[0, 0], trajectory[0, 1], s=200, c='blue', marker='^', label='起点')
    plt.scatter(trajectory[-1, 0], trajectory[-1, 1], s=200, c='purple', marker='*', label='终点')

    for i in range(0, len(trajectory), 10):
        plt.annotate(f"{i}", (trajectory[i, 0], trajectory[i, 1]), fontsize=8, ha='center', va='center',
                     bbox=dict(boxstyle="circle,pad=0.2", fc="white", alpha=0.7))

    for i in range(NUM_USERS):
        if env.task_generating_users[i] and env.collected_tasks[i]:
            step = int(collection_times[i])
            if step < len(trajectory):
                uav_pos = trajectory[step]
                plt.plot([uav_pos[0], env.user_positions[i, 0]],
                         [uav_pos[1], env.user_positions[i, 1]], 'g--', alpha=0.5)

    # 【新增】显示无人机轨迹与基站的连线
    plt.plot([trajectory[-1, 0], BASE_STATION_POSITION[0]],
             [trajectory[-1, 1], BASE_STATION_POSITION[1]],
             'orange', linestyle='--', alpha=0.5, linewidth=2, label='UAV-BS Link')

    plt.title(
        f"UAV任务收集轨迹 (阶段{phase}: 收集 {sum(env.collected_tasks & env.task_generating_users)}/{sum(env.task_generating_users)} 任务, 步数: {env.step_count})")
    plt.xlabel("X坐标 (m)")
    plt.ylabel("Y坐标 (m)")
    plt.grid(True)
    plt.legend()
    plt.xlim(0, AREA_SIZE)
    plt.ylim(0, AREA_SIZE)
    plt.savefig(f"results/final_uav_trajectory_phase_{phase}.png")
    plt.close()

    plt.figure(figsize=(15, 5))
    plt.subplot(1, 2, 1)
    plt.plot(step_rewards)
    plt.title("步奖励")
    plt.xlabel("步数")
    plt.ylabel("奖励")
    plt.grid(True)
    plt.subplot(1, 2, 2)
    plt.plot(np.cumsum(step_rewards))
    plt.title("累计奖励")
    plt.xlabel("步数")
    plt.ylabel("累计奖励")
    plt.grid(True)
    plt.tight_layout()
    plt.savefig(f"results/test_rewards_phase_{phase}.png")
    plt.close()

    print(f"\n测试结果 (阶段 {phase}):")
    collected_count = sum(env.collected_tasks & env.task_generating_users)
    total_count = sum(env.task_generating_users)
    percentage = collected_count / total_count * 100 if total_count > 0 else 0
    print(f"收集任务: {collected_count}/{total_count} ({percentage:.1f}%)")
    print(f"总奖励: {total_reward:.2f}")
    print(f"总能耗: {info['energy']:.2f}")
    print(f"总延迟: {info['delay']:.2f}")
    print(f"总步数: {env.step_count}")

    # 【新增】显示卸载决策统计
    print("\n卸载决策统计:")
    collected_indices = [i for i in range(NUM_USERS)
                         if env.task_generating_users[i] and env.collected_tasks[i]]
    if collected_indices:
        avg_offload_ratio = np.mean([env.user_offloading_ratios[i] for i in collected_indices])
        print(f"平均卸载比例: {avg_offload_ratio:.3f}")
        local_count = sum(1 for i in collected_indices if env.user_offloading_ratios[i] < 0.1)
        mixed_count = sum(1 for i in collected_indices if 0.1 <= env.user_offloading_ratios[i] < 0.9)
        remote_count = sum(1 for i in collected_indices if env.user_offloading_ratios[i] >= 0.9)
        print(f"本地处理: {local_count}, 混合处理: {mixed_count}, 远程处理: {remote_count}")

    print("\n任务收集详情:")
    collection_indices = [(i, int(collection_times[i])) for i in range(NUM_USERS)
                          if env.task_generating_users[i] and env.collected_tasks[i]]
    collection_indices.sort(key=lambda x: x[1])
    for i, step in collection_indices:
        offload_ratio = env.user_offloading_ratios[i]
        print(f"用户 {i + 1}: 在步数 {step} 收集, 卸载比例: {offload_ratio:.3f}")
    for i in range(NUM_USERS):
        if env.task_generating_users[i] and not env.collected_tasks[i]:
            print(f"用户 {i + 1}: 未收集")




if __name__ == "__main__":
    agent, env = train()
    print("\n" + "=" * 60)
    print("训练完成！开始测试各阶段模型性能...")
    print("=" * 60)
    for phase in range(1, 5):
        print(f"\n{'=' * 20} 测试阶段 {phase} {'=' * 20}")
        test_and_visualize(agent, env, model_path=f"results/actor_phase_{phase}.pth", phase=phase)