import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import random
from collections import deque
import os
import time

# --- 常量和超参数定义 (与您提供的代码相同) ---
SEED = 45
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)
TIMESTEP = 1.0
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

EPISODES_PER_TASK = 200
AREA_SIZE = 200
NUM_USERS = 10
MAX_STEPS = 300
MAX_DISTANCE_COLLECT = 15

UAV_HEIGHT = 30.0
UAV_COMPUTE_CAPACITY = 1e10
MIN_UAV_SPEED = 2.0
MAX_UAV_SPEED = 20.0
DEFAULT_UAV_SPEED = 10.0

BASE_STATION_POSITION = np.array([0.0, 0.0])
BASE_STATION_COMPUTE_CAPACITY = 2e11
BASE_STATION_HEIGHT = 10.0

ACTOR_LR = 3e-5
CRITIC_LR = 3e-5
GAMMA = 0.99
TAU = 0.005
BUFFER_SIZE = 200000
BATCH_SIZE = 256
EXPLORATION_NOISE_START = 0.4
EXPLORATION_NOISE_END = 0.05
REWARD_SCALE = 0.15

NUM_QUANTILES = 51
QUANTILE_TAU = torch.FloatTensor([(2 * i - 1) / (2.0 * NUM_QUANTILES) for i in range(1, NUM_QUANTILES + 1)]).to(device)
KAPPA = 1.0

SEQUENCE_LENGTH = 1
HIDDEN_SIZE = 128

BANDWIDTH = 1e6
USER_TRANSMIT_POWER = 0.1
CHANNEL_GAIN_REF_DB = 30.0
CHANNEL_GAIN_REF_LINEAR = 10 ** (CHANNEL_GAIN_REF_DB / 10)
PATH_LOSS_EXPONENT = 2.5
BOLTZMANN_CONSTANT = 1.38e-23
TEMPERATURE_KELVIN = 290
NOISE_POWER = BOLTZMANN_CONSTANT * TEMPERATURE_KELVIN * BANDWIDTH
RICE_FACTOR = 5

TASK_SIZE_BITS = [1e6, 2e6]
TASK_CPU_CYCLES = [5e8, 15e8]

UAV_WEIGHT_KG = 2.0
GRAVITY = 9.81
AIR_DENSITY = 1.225
ROTOR_RADIUS = 0.4
NUM_ROTORS = 4
P_INDUCED_COEFF = UAV_WEIGHT_KG * GRAVITY * np.sqrt(
    UAV_WEIGHT_KG * GRAVITY / (2 * AIR_DENSITY * np.pi * ROTOR_RADIUS ** 2))
P_PROFILE_COEFF = 0.012
P_PARASITE_COEFF = 0.6
EFFECTIVE_SWITCHED_CAPACITANCE = 1e-27

DELAY_WEIGHT = 0.7
ENERGY_WEIGHT = 0.3
DELAY_SCALE = 100.0
ENERGY_SCALE = 0.001


class Environment:
    def __init__(self):
        self.user_positions = np.random.uniform(0, AREA_SIZE, size=(NUM_USERS, 2))
        self.task_cpu_cycles = np.random.uniform(TASK_CPU_CYCLES[0], TASK_CPU_CYCLES[1], size=NUM_USERS)
        self.task_sizes = np.random.uniform(TASK_SIZE_BITS[0], TASK_SIZE_BITS[1], size=NUM_USERS)
        self.task_generating_users = np.ones(NUM_USERS, dtype=bool)
        self.uav_position = np.array([0.0, 0.0], dtype=float)
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)
        self.step_count = 0

        self.user_completion_delays = np.zeros(NUM_USERS)
        self.user_offloading_delays = np.zeros(NUM_USERS)
        self.user_computation_delays = np.zeros(NUM_USERS)
        self.user_computation_energies = np.zeros(NUM_USERS)
        self.total_flight_energy = 0

        self.user_offloading_ratios = np.zeros(NUM_USERS)

        self.trajectory = [self.uav_position.copy()]
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])
        self.observation_history = deque(maxlen=SEQUENCE_LENGTH)
        self.current_phase = 1

        self.episode_reward_breakdown = {
            'collection_reward': 0.0,
            'proximity_reward': 0.0,
            'completion_reward': 0.0,
            'cost': 0.0,
            'step_penalty': 0.0
        }

    def _calculate_rice_channel_gain(self, distance_2d, height=UAV_HEIGHT):
        distance_3d = np.sqrt(distance_2d ** 2 + height ** 2)
        if distance_3d < 1.0: distance_3d = 1.0
        path_loss = CHANNEL_GAIN_REF_LINEAR * (distance_3d ** (-PATH_LOSS_EXPONENT))
        K = RICE_FACTOR
        h_los = 1.0
        h_nlos_real = np.random.normal(0, 1)
        h_nlos_imag = np.random.normal(0, 1)
        h_nlos = (h_nlos_real + 1j * h_nlos_imag) / np.sqrt(2)
        h = np.sqrt(K / (K + 1)) * h_los + np.sqrt(1 / (K + 1)) * h_nlos
        fading_gain = abs(h) ** 2
        return path_loss * fading_gain

    def _calculate_offloading_delay(self, user_index, distance_2d):
        channel_gain = self._calculate_rice_channel_gain(distance_2d)
        snr = (USER_TRANSMIT_POWER * channel_gain) / NOISE_POWER
        data_rate = BANDWIDTH * np.log2(1 + snr)
        return self.task_sizes[user_index] / data_rate

    def _calculate_uav_to_bs_delay(self, task_size):
        bs_distance_2d = np.linalg.norm(self.uav_position - BASE_STATION_POSITION)
        height_diff = abs(UAV_HEIGHT - BASE_STATION_HEIGHT)
        channel_gain = self._calculate_rice_channel_gain(bs_distance_2d, height_diff)
        snr = (USER_TRANSMIT_POWER * channel_gain) / NOISE_POWER
        data_rate = BANDWIDTH * np.log2(1 + snr)
        return task_size / data_rate

    def _calculate_completion_delay(self, user_index, offloading_ratio):
        total_task_size = self.task_sizes[user_index]
        total_cpu_cycles = self.task_cpu_cycles[user_index]

        user_to_uav_delay = self._calculate_offloading_delay(user_index,
                                                             np.linalg.norm(
                                                                 self.uav_position - self.user_positions[user_index]))

        if offloading_ratio == 0:
            local_computation_delay = total_cpu_cycles / UAV_COMPUTE_CAPACITY
            total_delay = user_to_uav_delay + local_computation_delay

        elif offloading_ratio == 1:
            uav_to_bs_delay = self._calculate_uav_to_bs_delay(total_task_size)
            bs_computation_delay = total_cpu_cycles / BASE_STATION_COMPUTE_CAPACITY
            total_delay = user_to_uav_delay + uav_to_bs_delay + bs_computation_delay

        else:
            local_task_size = total_task_size * (1 - offloading_ratio)
            bs_task_size = total_task_size * offloading_ratio

            local_cpu_cycles = total_cpu_cycles * (1 - offloading_ratio)
            bs_cpu_cycles = total_cpu_cycles * offloading_ratio

            local_computation_delay = local_cpu_cycles / UAV_COMPUTE_CAPACITY
            local_total_delay = user_to_uav_delay + local_computation_delay

            uav_to_bs_delay = self._calculate_uav_to_bs_delay(bs_task_size)
            bs_computation_delay = bs_cpu_cycles / BASE_STATION_COMPUTE_CAPACITY
            bs_total_delay = user_to_uav_delay + uav_to_bs_delay + bs_computation_delay

            total_delay = max(local_total_delay, bs_total_delay)

        return total_delay

    def _calculate_flight_energy(self, distance_moved, actual_speed=None, time_delta=1.0):
        if actual_speed is None:
            speed = distance_moved / time_delta
        else:
            speed = actual_speed

        power = P_INDUCED_COEFF * (
                np.sqrt(1 + (speed ** 4) / (4 * P_INDUCED_COEFF ** 2)) - (speed ** 2) / (2 * P_INDUCED_COEFF)) \
                + P_PROFILE_COEFF * (1 + 3 * (speed ** 2)) \
                + 0.5 * P_PARASITE_COEFF * AIR_DENSITY * speed ** 3
        return power * time_delta

    def _calculate_computation_energy(self, user_index, offloading_ratio):
        local_cpu_cycles = self.task_cpu_cycles[user_index] * (1 - offloading_ratio)
        working_frequency = UAV_COMPUTE_CAPACITY
        energy = EFFECTIVE_SWITCHED_CAPACITANCE * (working_frequency ** 2) * local_cpu_cycles
        return energy

    def update_task_generating_users(self, phase):
        self.current_phase = phase
        if phase == 1:
            self.task_generating_users = np.ones(NUM_USERS, dtype=bool)
        elif phase == 2:
            indices = np.random.choice(NUM_USERS, 9, replace=False)
            self.task_generating_users = np.zeros(NUM_USERS, dtype=bool)
            self.task_generating_users[indices] = True
        else:
            indices = np.random.choice(NUM_USERS, 8, replace=False)
            self.task_generating_users = np.zeros(NUM_USERS, dtype=bool)
            self.task_generating_users[indices] = True
        print(f"Phase {phase}: {sum(self.task_generating_users)} users are generating tasks")
        print(f"Task generating users: {np.where(self.task_generating_users)[0]}")

    def reset(self):
        self.uav_position = np.array([0.0, 0.0], dtype=float)
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)
        self.step_count = 0

        self.user_completion_delays.fill(0)
        self.user_offloading_delays.fill(0)
        self.user_computation_delays.fill(0)
        self.user_computation_energies.fill(0)
        self.user_offloading_ratios.fill(0)
        self.total_flight_energy = 0
        self.current_speed = DEFAULT_UAV_SPEED

        self.trajectory = [self.uav_position.copy()]
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])
        self.observation_history.clear()

        for key in self.episode_reward_breakdown:
            self.episode_reward_breakdown[key] = 0.0

        initial_state = self._get_state()
        for _ in range(SEQUENCE_LENGTH):
            self.observation_history.append(initial_state)
        return self._get_gru_state()

    def step(self, action):
        direction_raw = action[:2]

        direction_norm = np.linalg.norm(direction_raw)
        if direction_norm > 1e-6:
            movement_direction = direction_raw / direction_norm
        else:
            movement_direction = np.array([0.0, 0.0])

        speed = (action[2] + 1) / 2 * (MAX_UAV_SPEED - MIN_UAV_SPEED) + MIN_UAV_SPEED

        offloading_ratio = (action[3] + 1) / 2

        movement = movement_direction * speed

        prev_position = self.uav_position.copy()
        self.uav_position += movement
        self.uav_position = np.clip(self.uav_position, 0, AREA_SIZE)
        self.trajectory.append(self.uav_position.copy())

        actual_movement = self.uav_position - prev_position
        distance_moved = np.linalg.norm(actual_movement)

        if distance_moved > 0:
            actual_speed = speed if direction_norm > 1e-6 else 0
            flight_energy_step = self._calculate_flight_energy(distance_moved, actual_speed=actual_speed)
        else:
            flight_energy_step = self._calculate_flight_energy(0, actual_speed=0)

        self.total_flight_energy += flight_energy_step

        self.current_speed = speed if direction_norm > 1e-6 else 0

        new_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])

        newly_collected = 0
        for i in range(NUM_USERS):
            if self.task_generating_users[i] and not self.collected_tasks[i]:
                if new_distances[i] <= MAX_DISTANCE_COLLECT:
                    self.collected_tasks[i] = True
                    newly_collected += 1

                    self.user_offloading_ratios[i] = offloading_ratio
                    self.user_offloading_delays[i] = self._calculate_offloading_delay(i, new_distances[i])
                    self.user_completion_delays[i] = self._calculate_completion_delay(i, offloading_ratio)
                    self.user_computation_delays[i] = self.user_completion_delays[i] - self.user_offloading_delays[i]
                    self.user_computation_energies[i] = self._calculate_computation_energy(i, offloading_ratio)

        self.step_count += 1

        completed_indices = np.where(self.collected_tasks & self.task_generating_users)[0]

        if len(completed_indices) > 0:
            total_delay = np.sum(self.user_completion_delays[completed_indices])
            total_comp_energy = np.sum(self.user_computation_energies[completed_indices])
            avg_total_delay = np.mean(self.user_completion_delays[completed_indices])
            avg_offloading_delay = np.mean(self.user_offloading_delays[completed_indices])
            avg_computation_delay = np.mean(self.user_computation_delays[completed_indices])
        else:
            total_delay = 0.0
            total_comp_energy = 0.0
            avg_total_delay, avg_offloading_delay, avg_computation_delay = 0.0, 0.0, 0.0

        total_energy = self.total_flight_energy + total_comp_energy

        reward_info = self.calculate_reward(newly_collected, total_energy, total_delay, new_distances,
                                            self.last_distances)
        reward = reward_info['total_reward']
        for key in self.episode_reward_breakdown:
            self.episode_reward_breakdown[key] += reward_info[key]

        self.last_distances = new_distances

        total_tasks_to_collect = sum(self.task_generating_users)
        collected_required_tasks = sum(self.collected_tasks & self.task_generating_users)
        done = (self.step_count >= MAX_STEPS) or (collected_required_tasks == total_tasks_to_collect)

        self.observation_history.append(self._get_state())

        return self._get_gru_state(), reward, done, {
            "collected_required": collected_required_tasks,
            "total_required": total_tasks_to_collect,
            "energy": total_energy,
            "delay": avg_total_delay,
            "reward_breakdown": reward_info,
            "flight_energy": self.total_flight_energy,
            "comp_energy": total_comp_energy,
            "delay_breakdown": {
                "avg_offloading_delay": avg_offloading_delay,
                "avg_computation_delay": avg_computation_delay,
                "avg_total_delay": avg_total_delay,
                "total_delay": total_delay,
            },
            "episode_reward_breakdown": self.episode_reward_breakdown
        }

    def _get_state(self):
        state = np.zeros(2 + NUM_USERS * 4 + 1 + 1)
        state[0:2] = self.uav_position / AREA_SIZE

        for i in range(NUM_USERS):
            dist = np.linalg.norm(self.uav_position - self.user_positions[i])
            idx = 2 + i * 4
            state[idx] = dist / np.sqrt(2 * AREA_SIZE ** 2)
            state[idx + 1] = float(self.collected_tasks[i])
            state[idx + 2] = float(self.task_generating_users[i])
            state[idx + 3] = self.task_cpu_cycles[i] / TASK_CPU_CYCLES[1]

        state[-2] = self.step_count / MAX_STEPS

        bs_distance = np.linalg.norm(self.uav_position - BASE_STATION_POSITION)
        state[-1] = bs_distance / np.sqrt(2 * AREA_SIZE ** 2)

        return state

    def _get_gru_state(self):
        while len(self.observation_history) < SEQUENCE_LENGTH:
            self.observation_history.append(self._get_state())
        return np.array(list(self.observation_history))

    def calculate_reward(self, newly_collected, total_energy, total_delay, new_distances, old_distances):
        collected_required = sum(self.collected_tasks & self.task_generating_users)
        total_required = sum(self.task_generating_users) if sum(self.task_generating_users) > 0 else 1
        completion_rate = collected_required / total_required

        proximity_reward = 0.0
        uncollected_indices = np.where(self.task_generating_users & ~self.collected_tasks)[0]
        if len(uncollected_indices) > 0:
            uncollected_distances_old = old_distances[uncollected_indices]
            uncollected_distances_new = new_distances[uncollected_indices]
            closest_idx = np.argmin(uncollected_distances_new)
            dist_diff = uncollected_distances_old[closest_idx] - uncollected_distances_new[closest_idx]

            if uncollected_distances_new[closest_idx] < 50:
                proximity_reward = dist_diff * 0.5
            else:
                proximity_reward = dist_diff * 0.3

        time_factor = max(0, 1 - self.step_count / MAX_STEPS)
        collection_reward = newly_collected * 30.0 * (1 + time_factor * 0.5)

        progress = self.step_count / MAX_STEPS
        step_penalty = 0.25 + 0.5 * progress

        completion_reward = 0
        done = (self.step_count >= MAX_STEPS) or (collected_required == total_required)
        if done:
            step_efficiency = max(0.1, 1 - self.step_count / MAX_STEPS)
            base_completion = completion_rate * 200
            bonus = 500 if completion_rate == 1.0 else 0
            completion_reward = (base_completion + bonus) * step_efficiency

        cost = 0
        if done and collected_required > 0:
            delay_penalty = total_delay * DELAY_WEIGHT * 1200
            energy_penalty = total_energy * ENERGY_WEIGHT * 0.015
            cost = (delay_penalty + energy_penalty) / collected_required

        total_reward = (collection_reward +
                        proximity_reward +
                        completion_reward -
                        step_penalty -
                        cost)
        return {
            'total_reward': total_reward,
            'proximity_reward': proximity_reward * REWARD_SCALE,
            'collection_reward': collection_reward * REWARD_SCALE,
            'completion_reward': completion_reward * REWARD_SCALE,
            'cost': -cost * REWARD_SCALE,
            'step_penalty': -step_penalty * REWARD_SCALE
        }

    def render(self, episode=0, clear_output=True):
        # This function is unchanged
        pass


class GRUActor(nn.Module):
    # This class is unchanged
    def __init__(self, state_dim, action_dim, max_action):
        super(GRUActor, self).__init__()
        self.state_dim = state_dim
        self.seq_len = SEQUENCE_LENGTH
        self.hidden_size = HIDDEN_SIZE
        self.gru = nn.GRU(input_size=state_dim, hidden_size=self.hidden_size, num_layers=1, batch_first=True)
        self.layer1 = nn.Linear(self.hidden_size, 256)
        self.layer2 = nn.Linear(256, 128)
        self.layer3 = nn.Linear(128, action_dim)
        self.max_action = max_action
        self.ln1 = nn.LayerNorm(256)
        self.ln2 = nn.LayerNorm(128)
        self.hidden = None
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0.0)

    def forward(self, state, reset_hidden=False):
        batch_size = state.size(0)
        if reset_hidden or self.hidden is None or self.hidden.size(1) != batch_size:
            self.reset_hidden(batch_size)

        gru_out, self.hidden = self.gru(state, self.hidden.to(state.device))
        x = gru_out[:, -1]
        x = self.ln1(torch.relu(self.layer1(x)))
        x = self.ln2(torch.relu(self.layer2(x)))
        action = torch.tanh(self.layer3(x))
        return self.max_action * action

    def reset_hidden(self, batch_size=1):
        self.hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)


class SD3DistributionalCritic(nn.Module):
    # This class is unchanged
    def __init__(self, state_dim, action_dim):
        super(SD3DistributionalCritic, self).__init__()
        self.state_dim = state_dim
        self.seq_len = SEQUENCE_LENGTH
        self.hidden_size = HIDDEN_SIZE
        self.num_quantiles = NUM_QUANTILES

        self.q1_gru = nn.GRU(input_size=state_dim, hidden_size=self.hidden_size, num_layers=1, batch_first=True)
        self.q1_layer1 = nn.Linear(self.hidden_size + action_dim, 256)
        self.q1_layer2 = nn.Linear(256, 128)
        self.q1_output = nn.Linear(128, self.num_quantiles)
        self.q1_ln1 = nn.LayerNorm(256)
        self.q1_ln2 = nn.LayerNorm(128)

        self.q2_gru = nn.GRU(input_size=state_dim, hidden_size=self.hidden_size, num_layers=1, batch_first=True)
        self.q2_layer1 = nn.Linear(self.hidden_size + action_dim, 256)
        self.q2_layer2 = nn.Linear(256, 128)
        self.q2_output = nn.Linear(128, self.num_quantiles)
        self.q2_ln1 = nn.LayerNorm(256)
        self.q2_ln2 = nn.LayerNorm(128)

        self.q1_hidden = None
        self.q2_hidden = None
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0.01)

    def forward(self, state, action, reset_hidden=False):
        batch_size = state.size(0)
        if reset_hidden or self.q1_hidden is None or self.q1_hidden.size(1) != batch_size:
            self.reset_q1_hidden(batch_size)
        if reset_hidden or self.q2_hidden is None or self.q2_hidden.size(1) != batch_size:
            self.reset_q2_hidden(batch_size)

        q1_gru_out, self.q1_hidden = self.q1_gru(state, self.q1_hidden.to(state.device))
        q2_gru_out, self.q2_hidden = self.q2_gru(state, self.q2_hidden.to(state.device))

        q1_state = q1_gru_out[:, -1]
        q2_state = q2_gru_out[:, -1]

        q1_x = torch.cat([q1_state, action], dim=1)
        q2_x = torch.cat([q2_state, action], dim=1)

        q1 = self.q1_ln1(torch.relu(self.q1_layer1(q1_x)))
        q1 = self.q1_ln2(torch.relu(self.q1_layer2(q1)))
        q1_quantiles = self.q1_output(q1)

        q2 = self.q2_ln1(torch.relu(self.q2_layer1(q2_x)))
        q2 = self.q2_ln2(torch.relu(self.q2_layer2(q2)))
        q2_quantiles = self.q2_output(q2)

        return q1_quantiles, q2_quantiles

    def Q1(self, state, action, reset_hidden=False):
        batch_size = state.size(0)
        if reset_hidden or self.q1_hidden is None or self.q1_hidden.size(1) != batch_size:
            self.reset_q1_hidden(batch_size)

        q1_gru_out, self.q1_hidden = self.q1_gru(state, self.q1_hidden.to(state.device))
        q1_state = q1_gru_out[:, -1]
        q1_x = torch.cat([q1_state, action], dim=1)
        q1 = self.q1_ln1(torch.relu(self.q1_layer1(q1_x)))
        q1 = self.q1_ln2(torch.relu(self.q1_layer2(q1)))
        q1_quantiles = self.q1_output(q1)

        median_idx = NUM_QUANTILES // 2
        return q1_quantiles[:, median_idx:median_idx + 1]

    def reset_hidden(self, batch_size=1):
        self.reset_q1_hidden(batch_size)
        self.reset_q2_hidden(batch_size)

    def reset_q1_hidden(self, batch_size=1):
        self.q1_hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)

    def reset_q2_hidden(self, batch_size=1):
        self.q2_hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)


def quantile_huber_loss(quantiles, targets, taus, kappa=KAPPA):
    # This function is unchanged
    residual = targets - quantiles
    abs_residual = torch.abs(residual)
    huber_loss = torch.where(abs_residual <= kappa,
                             0.5 * residual.pow(2),
                             kappa * (abs_residual - 0.5 * kappa))
    quantile_weight = torch.abs(taus.unsqueeze(0) - (residual < 0).float())
    loss = quantile_weight * huber_loss
    return loss.mean()


class ReplayBuffer:
    # This class is unchanged
    def __init__(self, max_size=BUFFER_SIZE):
        self.buffer = deque(maxlen=max_size)

    def add(self, state, action, reward, next_state, done):
        self.buffer.append((state, action, reward, next_state, done))

    def sample(self, batch_size):
        batch = random.sample(self.buffer, min(len(self.buffer), batch_size))
        state, action, reward, next_state, done = map(np.stack, zip(*batch))
        return state, action, reward, next_state, done

    def __len__(self):
        return len(self.buffer)


# EWC Class is removed.


class SD3_Rehearsal:
    def __init__(self, state_dim, action_dim, max_action):
        self.actor = GRUActor(state_dim, action_dim, max_action).to(device)
        self.actor_target = GRUActor(state_dim, action_dim, max_action).to(device)
        self.actor_target.load_state_dict(self.actor.state_dict())
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=ACTOR_LR)

        self.critic = SD3DistributionalCritic(state_dim, action_dim).to(device)
        self.critic_target = SD3DistributionalCritic(state_dim, action_dim).to(device)
        self.critic_target.load_state_dict(self.critic.state_dict())
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=CRITIC_LR)

        self.max_action = max_action
        self.memory = ReplayBuffer()
        self.policy_noise = 0.2 * max_action
        self.noise_clip = 0.5 * max_action
        self.policy_freq = 2
        self.total_it = 0
        self.current_task = 1

        self.task_noise = {
            1: np.linspace(EXPLORATION_NOISE_START, EXPLORATION_NOISE_END, EPISODES_PER_TASK),
            2: np.linspace(EXPLORATION_NOISE_START * 0.8, EXPLORATION_NOISE_END, EPISODES_PER_TASK),
            3: np.linspace(EXPLORATION_NOISE_START * 0.7, EXPLORATION_NOISE_END, EPISODES_PER_TASK)
        }

        self.actor_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.actor_optimizer, mode='max', factor=0.5, patience=100, verbose=True)
        self.critic_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.critic_optimizer, mode='max', factor=0.5, patience=100, verbose=True)

    def select_action(self, state, noise_scale=EXPLORATION_NOISE_START):
        if len(state.shape) == 2:
            state = np.expand_dims(state, 0)
        state = torch.FloatTensor(state).to(device)
        self.actor.reset_hidden()
        action = self.actor(state).cpu().data.numpy().flatten()
        if noise_scale > 0:
            noise = np.random.normal(0, self.max_action * noise_scale, size=action.shape)
            action = action + noise
        return np.clip(action, -self.max_action, self.max_action)

    def switch_task(self, task_id):
        print(f"\nSwitching to task {task_id} with Rehearsal strategy.")
        # ---核心改动---
        # 我们不再清空经验池。旧任务的经验被保留下来。
        # 随着新任务的进行，新的经验会逐渐替换掉最旧的经验。
        print(f"Replay buffer is PRESERVED. It currently contains {len(self.memory)} experiences.")
        self.current_task = task_id
        self.actor.reset_hidden()
        self.critic.reset_hidden()
        print(f"Reset GRU states for new task {task_id}")

    def train(self):
        self.total_it += 1
        if len(self.memory) < BATCH_SIZE:
            return {"critic_loss": 0.0, "actor_loss": 0.0}

        # 采样经验 (这里会混合新旧任务的经验)
        state, action, reward, next_state, done = self.memory.sample(BATCH_SIZE)
        state = torch.FloatTensor(state).to(device)
        action = torch.FloatTensor(action).to(device)
        reward = torch.FloatTensor(reward.reshape(-1, 1)).to(device)
        next_state = torch.FloatTensor(next_state).to(device)
        done = torch.FloatTensor(done.reshape(-1, 1)).to(device)

        # 重置隐藏状态
        self.critic.reset_hidden(BATCH_SIZE)
        self.actor.reset_hidden(BATCH_SIZE)

        # 计算目标Q值
        with torch.no_grad():
            self.critic_target.reset_hidden(BATCH_SIZE)
            self.actor_target.reset_hidden(BATCH_SIZE)

            noise = torch.randn_like(action) * self.policy_noise
            noise = noise.clamp(-self.noise_clip, self.noise_clip)
            next_action = (self.actor_target(next_state) + noise).clamp(-self.max_action, self.max_action)

            target_q1_quantiles, target_q2_quantiles = self.critic_target(next_state, next_action)
            target_q_quantiles = torch.min(target_q1_quantiles, target_q2_quantiles)

            reward_expanded = reward.expand(-1, NUM_QUANTILES)
            done_expanded = done.expand(-1, NUM_QUANTILES)
            target_quantiles = reward_expanded + (1 - done_expanded) * GAMMA * target_q_quantiles

        # 计算当前Q值分布
        current_q1_quantiles, current_q2_quantiles = self.critic(state, action)

        # 计算Critic损失 (无EWC损失)
        critic_loss_q1 = quantile_huber_loss(current_q1_quantiles, target_quantiles, QUANTILE_TAU)
        critic_loss_q2 = quantile_huber_loss(current_q2_quantiles, target_quantiles, QUANTILE_TAU)
        critic_loss = critic_loss_q1 + critic_loss_q2

        # 更新Critic
        self.critic_optimizer.zero_grad()
        critic_loss.backward()
        torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 1.0)
        self.critic_optimizer.step()

        actor_loss = 0.0
        # 延迟更新Actor
        if self.total_it % self.policy_freq == 0:
            self.actor.reset_hidden(BATCH_SIZE)
            self.critic.reset_q1_hidden(BATCH_SIZE)

            # 计算Actor损失 (无EWC损失)
            predicted_actions = self.actor(state)
            actor_loss = -self.critic.Q1(state, predicted_actions, reset_hidden=True).mean()

            # 更新Actor
            self.actor_optimizer.zero_grad()
            actor_loss.backward()
            torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 1.0)
            self.actor_optimizer.step()

            # 软更新目标网络
            for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)
            for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)

        return {
            "critic_loss": critic_loss.item(),
            "actor_loss": actor_loss.item() if isinstance(actor_loss, torch.Tensor) else actor_loss,
        }

    def update_lr_schedulers(self, reward):
        self.actor_scheduler.step(reward)
        self.critic_scheduler.step(reward)


class SD3_Baseline:
    # 这个类保持不变，作为对比基准
    def __init__(self, state_dim, action_dim, max_action):
        self.actor = GRUActor(state_dim, action_dim, max_action).to(device)
        self.actor_target = GRUActor(state_dim, action_dim, max_action).to(device)
        self.actor_target.load_state_dict(self.actor.state_dict())
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=ACTOR_LR)

        self.critic = SD3DistributionalCritic(state_dim, action_dim).to(device)
        self.critic_target = SD3DistributionalCritic(state_dim, action_dim).to(device)
        self.critic_target.load_state_dict(self.critic.state_dict())
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=CRITIC_LR)

        self.max_action = max_action
        self.memory = ReplayBuffer()
        self.policy_noise = 0.2 * max_action
        self.noise_clip = 0.5 * max_action
        self.policy_freq = 2
        self.total_it = 0
        self.current_task = 1

        self.task_noise = {
            1: np.linspace(EXPLORATION_NOISE_START, EXPLORATION_NOISE_END, EPISODES_PER_TASK),
            2: np.linspace(EXPLORATION_NOISE_START * 0.8, EXPLORATION_NOISE_END, EPISODES_PER_TASK),
            3: np.linspace(EXPLORATION_NOISE_START * 0.7, EXPLORATION_NOISE_END, EPISODES_PER_TASK)
        }

        self.actor_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.actor_optimizer, mode='max', factor=0.5, patience=100, verbose=True)
        self.critic_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.critic_optimizer, mode='max', factor=0.5, patience=100, verbose=True)

    def select_action(self, state, noise_scale=EXPLORATION_NOISE_START):
        if len(state.shape) == 2:
            state = np.expand_dims(state, 0)
        state = torch.FloatTensor(state).to(device)
        self.actor.reset_hidden()
        action = self.actor(state).cpu().data.numpy().flatten()
        if noise_scale > 0:
            noise = np.random.normal(0, self.max_action * noise_scale, size=action.shape)
            action = action + noise
        return np.clip(action, -self.max_action, self.max_action)

    def switch_task(self, task_id):
        print(f"\nSwitching to baseline task {task_id}")
        # 基线方法：清空经验池，模拟灾难性遗忘
        print(f"Clearing replay buffer for new task.")
        self.memory.buffer.clear()
        self.current_task = task_id
        self.actor.reset_hidden()
        self.critic.reset_hidden()
        print(f"Reset GRU states for new task {task_id}")

    def train(self):
        self.total_it += 1
        if len(self.memory) < BATCH_SIZE:
            return

        state, action, reward, next_state, done = self.memory.sample(BATCH_SIZE)
        state = torch.FloatTensor(state).to(device)
        action = torch.FloatTensor(action).to(device)
        reward = torch.FloatTensor(reward.reshape(-1, 1)).to(device)
        next_state = torch.FloatTensor(next_state).to(device)
        done = torch.FloatTensor(done.reshape(-1, 1)).to(device)

        self.critic.reset_hidden(BATCH_SIZE)
        self.actor.reset_hidden(BATCH_SIZE)

        with torch.no_grad():
            self.critic_target.reset_hidden(BATCH_SIZE)
            self.actor_target.reset_hidden(BATCH_SIZE)

            noise = torch.randn_like(action) * self.policy_noise
            noise = noise.clamp(-self.noise_clip, self.noise_clip)
            next_action = (self.actor_target(next_state) + noise).clamp(-self.max_action, self.max_action)

            target_q1_quantiles, target_q2_quantiles = self.critic_target(next_state, next_action)
            target_q_quantiles = torch.min(target_q1_quantiles, target_q2_quantiles)

            reward_expanded = reward.expand(-1, NUM_QUANTILES)
            done_expanded = done.expand(-1, NUM_QUANTILES)
            target_quantiles = reward_expanded + (1 - done_expanded) * GAMMA * target_q_quantiles

        current_q1_quantiles, current_q2_quantiles = self.critic(state, action)

        critic_loss_q1 = quantile_huber_loss(current_q1_quantiles, target_quantiles, QUANTILE_TAU)
        critic_loss_q2 = quantile_huber_loss(current_q2_quantiles, target_quantiles, QUANTILE_TAU)
        critic_loss = critic_loss_q1 + critic_loss_q2

        self.critic_optimizer.zero_grad()
        critic_loss.backward()
        torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 1.0)
        self.critic_optimizer.step()

        actor_loss = 0
        if self.total_it % self.policy_freq == 0:
            self.actor.reset_hidden(BATCH_SIZE)
            self.critic.reset_q1_hidden(BATCH_SIZE)

            actor_loss = -self.critic.Q1(state, self.actor(state), reset_hidden=True).mean()

            self.actor_optimizer.zero_grad()
            actor_loss.backward()
            torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 1.0)
            self.actor_optimizer.step()

            for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)
            for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)

        return {
            "critic_loss": critic_loss.item(),
            "actor_loss": actor_loss if isinstance(actor_loss, (int, float)) else actor_loss.item(),
        }

    def update_lr_schedulers(self, reward):
        self.actor_scheduler.step(reward)
        self.critic_scheduler.step(reward)


def train():
    os.makedirs("results", exist_ok=True)
    env = Environment()

    state_dim = 2 + NUM_USERS * 4 + 1 + 1
    action_dim = 4
    max_action = 1

    # --- 修改部分: 使用Rehearsal Agent替换EWC Agent ---
    agent_rehearsal = SD3_Rehearsal(state_dim, action_dim, max_action)
    agent_baseline = SD3_Baseline(state_dim, action_dim, max_action)

    total_episodes = 600
    episodes_per_task = 200
    eval_freq = 50

    # 更新变量名
    rewards_history_rehearsal = []
    rewards_history_baseline = []
    smoothed_rewards_rehearsal = []
    smoothed_rewards_baseline = []
    collection_history_rehearsal = []
    collection_history_baseline = []
    energy_history_rehearsal = []
    energy_history_baseline = []
    delay_history_rehearsal = []
    delay_history_baseline = []

    best_reward_rehearsal = -float('inf')
    best_collection_rehearsal = 0
    best_reward_baseline = -float('inf')
    best_collection_baseline = 0

    losses_rehearsal = {"critic": [], "actor": []}
    losses_baseline = {"critic": [], "actor": []}

    start_time = time.time()
    for phase in range(1, 4):
        env.update_task_generating_users(phase)
        agent_rehearsal.switch_task(phase)
        agent_baseline.switch_task(phase)

        phase_noise_base = EXPLORATION_NOISE_START * (0.9 ** (phase - 1))
        phase_noise = np.linspace(phase_noise_base, EXPLORATION_NOISE_END, episodes_per_task)

        for episode in range(1, episodes_per_task + 1):
            global_episode = (phase - 1) * episodes_per_task + episode
            current_noise = phase_noise[episode - 1]

            # Rehearsal版本训练
            state = env.reset()
            agent_rehearsal.actor.reset_hidden()
            agent_rehearsal.critic.reset_hidden()
            episode_reward_rehearsal = 0
            last_collection_rehearsal = 0
            episode_losses_rehearsal = {"critic": [], "actor": []}

            for step in range(1, MAX_STEPS + 1):
                action = agent_rehearsal.select_action(state, noise_scale=current_noise)
                next_state, reward, done, info = env.step(action)
                agent_rehearsal.memory.add(state, action, reward, next_state, done)
                loss_info = agent_rehearsal.train()
                if loss_info:
                    episode_losses_rehearsal["critic"].append(loss_info["critic_loss"])
                    episode_losses_rehearsal["actor"].append(loss_info["actor_loss"])

                state = next_state
                episode_reward_rehearsal += reward
                last_collection_rehearsal = info["collected_required"]
                if done:
                    break

            rewards_history_rehearsal.append(episode_reward_rehearsal)
            collection_history_rehearsal.append(last_collection_rehearsal)
            energy_history_rehearsal.append(info["energy"])
            delay_history_rehearsal.append(info["delay"])

            # Baseline版本训练
            state = env.reset()
            agent_baseline.actor.reset_hidden()
            agent_baseline.critic.reset_hidden()
            episode_reward_baseline = 0
            last_collection_baseline = 0
            episode_losses_baseline = {"critic": [], "actor": []}

            for step in range(1, MAX_STEPS + 1):
                action = agent_baseline.select_action(state, noise_scale=current_noise)
                next_state, reward, done, info_baseline = env.step(action)
                agent_baseline.memory.add(state, action, reward, next_state, done)
                loss_info = agent_baseline.train()
                if loss_info:
                    episode_losses_baseline["critic"].append(loss_info["critic_loss"])
                    episode_losses_baseline["actor"].append(loss_info["actor_loss"])

                state = next_state
                episode_reward_baseline += reward
                last_collection_baseline = info_baseline["collected_required"]
                if done:
                    break

            rewards_history_baseline.append(episode_reward_baseline)
            collection_history_baseline.append(last_collection_baseline)
            energy_history_baseline.append(info_baseline["energy"])
            delay_history_baseline.append(info_baseline["delay"])

            if len(rewards_history_rehearsal) >= 10:
                smoothed_rewards_rehearsal.append(np.mean(rewards_history_rehearsal[-10:]))
                smoothed_rewards_baseline.append(np.mean(rewards_history_baseline[-10:]))
            else:
                smoothed_rewards_rehearsal.append(episode_reward_rehearsal)
                smoothed_rewards_baseline.append(episode_reward_baseline)

            if episode_losses_rehearsal["critic"]: losses_rehearsal["critic"].append(
                np.mean(episode_losses_rehearsal["critic"]))
            if episode_losses_rehearsal["actor"]: losses_rehearsal["actor"].append(
                np.mean(episode_losses_rehearsal["actor"]))
            if episode_losses_baseline["critic"]: losses_baseline["critic"].append(
                np.mean(episode_losses_baseline["critic"]))
            if episode_losses_baseline["actor"]: losses_baseline["actor"].append(
                np.mean(episode_losses_baseline["actor"]))

            agent_rehearsal.update_lr_schedulers(episode_reward_rehearsal)
            agent_baseline.update_lr_schedulers(episode_reward_baseline)

            current_required = info["total_required"]
            collection_ratio_rehearsal = last_collection_rehearsal / current_required if current_required > 0 else 0
            collection_ratio_baseline = last_collection_baseline / current_required if current_required > 0 else 0

            if collection_ratio_rehearsal > best_collection_rehearsal or (
                    collection_ratio_rehearsal == best_collection_rehearsal and episode_reward_rehearsal > best_reward_rehearsal):
                best_reward_rehearsal = episode_reward_rehearsal
                best_collection_rehearsal = collection_ratio_rehearsal
                torch.save(agent_rehearsal.actor.state_dict(), f"results/best_actor_rehearsal_phase_{phase}.pth")

            if collection_ratio_baseline > best_collection_baseline or (
                    collection_ratio_baseline == best_collection_baseline and episode_reward_baseline > best_reward_baseline):
                best_reward_baseline = episode_reward_baseline
                best_collection_baseline = collection_ratio_baseline
                torch.save(agent_baseline.actor.state_dict(), f"results/best_actor_baseline_phase_{phase}.pth")

            elapsed_time = time.time() - start_time

            collected_required_rehearsal = last_collection_rehearsal
            collected_required_baseline = last_collection_baseline
            total_required = info.get("total_required", 1)

            avg_offloading_ratio_rehearsal = 0.0
            completed_tasks_with_offload = []
            for i in range(NUM_USERS):
                if env.task_generating_users[i] and env.collected_tasks[i]:
                    completed_tasks_with_offload.append(env.user_offloading_ratios[i])

            if completed_tasks_with_offload:
                avg_offloading_ratio_rehearsal = np.mean(completed_tasks_with_offload)

            avg_actor_loss_rehearsal = np.mean(episode_losses_rehearsal["actor"]) if episode_losses_rehearsal["actor"] else 0.0
            avg_critic_loss_rehearsal = np.mean(episode_losses_rehearsal["critic"]) if episode_losses_rehearsal["critic"] else 0.0
            avg_actor_loss_baseline = np.mean(episode_losses_baseline["actor"]) if episode_losses_baseline[
                "actor"] else 0.0
            avg_critic_loss_baseline = np.mean(episode_losses_baseline["critic"]) if episode_losses_baseline[
                "critic"] else 0.0

            avg_actor_rehearsal_loss = 0.0
            avg_critic_rehearsal_loss = 0.0
            if hasattr(agent_rehearsal, 'last_rehearsal_losses'):
                avg_actor_rehearsal_loss = agent_rehearsal.last_rehearsal_losses.get('actor_rehearsal', 0.0)
                avg_critic_rehearsal_loss = agent_rehearsal.last_rehearsal_losses.get('critic_rehearsal', 0.0)

            rb = info["episode_reward_breakdown"]
            reward_str = (f"Pro:{rb['proximity_reward']:.1f} "
                          f"Col:{rb['collection_reward']:.1f} "
                          f"Comp:{rb['completion_reward']:.1f} "
                          f"Cost:{rb['cost']:.1f} "
                          f"Step:{rb['step_penalty']:.1f}")

            energy_str = ""
            if 'flight_energy' in info and 'comp_energy' in info:
                energy_str = f"E(Flight:{info['flight_energy']:.1f} Comp:{info['comp_energy']:.1f})"

            delay_str = ""
            if 'delay_breakdown' in info:
                db = info['delay_breakdown']
                delay_str = f"D(Tot:{db['total_delay']:.2f}s AvgComp:{db['avg_computation_delay']:.3f}s AvgOff:{db['avg_offloading_delay']:.3f}s)"

            print(
                f"P:{phase} Ep {episode:3d}/{episodes_per_task} "
                f"EWC Tasks:{collected_required_rehearsal:2d}/{total_required:2d} "
                f"Baseline Tasks:{collected_required_baseline:2d}/{total_required:2d} "
                f"Steps:{env.step_count:3d} "
                f"Speed:{env.current_speed:.1f} m/s "
                f"Noise:{current_noise:.3f} "
                f"AvgOffload: {avg_offloading_ratio_rehearsal:.2f} "
                f"EWC Loss(A/C/EWC_A/EWC_C) {avg_actor_loss_rehearsal:.3f}/{avg_critic_loss_rehearsal:.3f}/{avg_actor_rehearsal_loss:.3f}/{avg_critic_rehearsal_loss:.3f} "
                f"Baseline Loss(A/C) {avg_actor_loss_baseline:.3f}/{avg_critic_loss_baseline:.3f} | "
                f"EWC Rwd: {episode_reward_rehearsal:.2f} Baseline Rwd: {episode_reward_baseline:.2f} "
                f"[{reward_str}] | "
                f"Total E: {info.get('energy', 0):.1f} "
                f"[{energy_str}] | "
                f"Avg D: {info.get('delay', 0):.3f}s "
                f"[{delay_str}] | "
                f"Time: {elapsed_time:.1f}s"
            )

            if global_episode % eval_freq == 0 or global_episode == total_episodes:
                plt.figure(figsize=(15, 10))

                plt.subplot(2, 3, 1)
                plt.plot(rewards_history_rehearsal, alpha=0.3, color='blue', label='EWC Raw')
                plt.plot(smoothed_rewards_rehearsal, color='blue', linewidth=2, label='EWC Smoothed')
                plt.plot(rewards_history_baseline, alpha=0.3, color='red', label='Baseline Raw')
                plt.plot(smoothed_rewards_baseline, color='red', linewidth=2, label='Baseline Smoothed')
                plt.axvline(x=episodes_per_task, color='green', linestyle='--', label='Phase 1->2')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--', label='Phase 2->3')
                plt.title("奖励对比")
                plt.xlabel("Episode")
                plt.ylabel("Reward")
                plt.legend()
                plt.grid(True)

                plt.subplot(2, 3, 2)
                trajectory = np.array(env.trajectory)
                plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', alpha=0.7, label='UAV轨迹')
                plt.scatter(BASE_STATION_POSITION[0], BASE_STATION_POSITION[1],
                            s=300, c='orange', marker='s', label='基站', edgecolors='black', linewidth=2)
                for i, pos in enumerate(env.user_positions):
                    if env.task_generating_users[i]:
                        color = 'green' if env.collected_tasks[i] else 'red'
                    else:
                        color = 'gray'
                    plt.scatter(pos[0], pos[1], s=100, c=color)
                    plt.annotate(f"{i + 1}", (pos[0], pos[1]), ha='center', va='center', fontweight='bold')

                plt.scatter(trajectory[0, 0], trajectory[0, 1], s=200, c='blue', marker='^', label='起点')
                plt.scatter(trajectory[-1, 0], trajectory[-1, 1], s=200, c='purple', marker='*', label='终点')
                plt.title("无人机轨迹图")
                plt.xlabel("X坐标 (m)")
                plt.ylabel("Y坐标 (m)")
                plt.xlim(0, AREA_SIZE)
                plt.ylim(0, AREA_SIZE)
                plt.legend()
                plt.grid(True)

                plt.subplot(2, 3, 3)
                plt.plot(collection_history_rehearsal, color='blue', label='EWC')
                plt.plot(collection_history_baseline, color='red', label='Baseline')
                plt.axvline(x=episodes_per_task, color='green', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.title("收集任务数对比")
                plt.xlabel("Episode")
                plt.ylabel("Number of Tasks")
                plt.legend()
                plt.grid(True)

                plt.subplot(2, 3, 4)
                plt.plot(energy_history_rehearsal, color='blue', label='EWC')
                plt.plot(energy_history_baseline, color='red', label='Baseline')
                plt.axvline(x=episodes_per_task, color='green', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.title("总能耗对比")
                plt.xlabel("Episode")
                plt.ylabel("Energy")
                plt.legend()
                plt.grid(True)

                plt.subplot(2, 3, 5)
                plt.plot(delay_history_rehearsal, color='blue', label='EWC')
                plt.plot(delay_history_baseline, color='red', label='Baseline')
                plt.axvline(x=episodes_per_task, color='green', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.title("平均延迟对比")
                plt.xlabel("Episode")
                plt.ylabel("Delay (s)")
                plt.legend()
                plt.grid(True)

                plt.subplot(2, 3, 6)
                if losses_rehearsal["critic"]: plt.plot(losses_rehearsal["critic"], color='blue', linestyle='-', label='EWC Critic')
                if losses_rehearsal["actor"]: plt.plot(losses_rehearsal["actor"], color='blue', linestyle='--', label='EWC Actor')
                if losses_baseline["critic"]: plt.plot(losses_baseline["critic"], color='red', linestyle='-',
                                                       label='Baseline Critic')
                if losses_baseline["actor"]: plt.plot(losses_baseline["actor"], color='red', linestyle='--',
                                                      label='Baseline Actor')
                plt.axvline(x=episodes_per_task, color='green', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.title("训练损失对比")
                plt.xlabel("Episode")
                plt.ylabel("Loss")
                plt.legend()
                plt.grid(True)

                plt.tight_layout()
                plt.savefig(f"results/comparison_curves_episode_{global_episode}.png")
                plt.close()

                torch.save({
                    'rehearsal_actor_state_dict': agent_rehearsal.actor.state_dict(),
                    'rehearsal_critic_state_dict': agent_rehearsal.critic.state_dict(),
                    'baseline_actor_state_dict': agent_baseline.actor.state_dict(),
                    'baseline_critic_state_dict': agent_baseline.critic.state_dict(),
                    'episode': global_episode,
                    'phase': phase,
                    'rewards_history_rehearsal': rewards_history_rehearsal,
                    'rewards_history_baseline': rewards_history_baseline,
                    'collection_history_rehearsal': collection_history_rehearsal,
                    'collection_history_baseline': collection_history_baseline,
                    'energy_history_rehearsal': energy_history_rehearsal,
                    'energy_history_baseline': energy_history_baseline,
                    'delay_history_rehearsal': delay_history_rehearsal,
                    'delay_history_baseline': delay_history_baseline,
                }, f"results/checkpoint_comparison_episode_{global_episode}.pt")

        torch.save(agent_rehearsal.actor.state_dict(), f"results/actor_rehearsal_phase_{phase}.pth")
        torch.save(agent_rehearsal.critic.state_dict(), f"results/critic_rehearsal_phase_{phase}.pth")
        torch.save(agent_baseline.actor.state_dict(), f"results/actor_baseline_phase_{phase}.pth")
        torch.save(agent_baseline.critic.state_dict(), f"results/critic_baseline_phase_{phase}.pth")

    print(f"Training completed!")
    print(f"EWC Best result: {best_collection_rehearsal * 100:.1f}% tasks, Reward: {best_reward_rehearsal:.2f}")
    print(f"Baseline Best result: {best_collection_baseline * 100:.1f}% tasks, Reward: {best_reward_baseline:.2f}")
    return agent_rehearsal, agent_baseline, env


if __name__ == "__main__":
    # 完整的训练和测试逻辑与您提供的类似，只需将EWC替换为Rehearsal即可
    # 此处仅为演示，省略了测试和绘图部分
    train()
