import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import random
from collections import deque
import os
import time

# SD3算法 - 基于分位数回归的分布式价值函数 - 加入基站卸载决策
SEED = 45
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

EPISODES_PER_TASK = 200

# 环境参数
AREA_SIZE = 200
NUM_USERS = 10
MAX_STEPS = 300
MAX_DISTANCE_COLLECT = 15

# UAV参数
UAV_HEIGHT = 30.0
UAV_COMPUTE_CAPACITY = 1e10
MIN_UAV_SPEED = 2.0
MAX_UAV_SPEED = 20.0
DEFAULT_UAV_SPEED = 10.0

# 基站参数
BASE_STATION_POSITION = np.array([0.0, 0.0])
BASE_STATION_COMPUTE_CAPACITY = 2e11
BASE_STATION_HEIGHT = 10.0

# SD3参数
ACTOR_LR = 3e-4
CRITIC_LR = 3e-4
GAMMA = 0.99
TAU = 0.005
BUFFER_SIZE = 200000
BATCH_SIZE = 256
EXPLORATION_NOISE_START = 0.4
EXPLORATION_NOISE_END = 0.05
REWARD_SCALE = 0.15

# SD3分布式价值函数参数
NUM_QUANTILES = 51
QUANTILE_TAU = torch.FloatTensor([(2 * i - 1) / (2.0 * NUM_QUANTILES) for i in range(1, NUM_QUANTILES + 1)]).to(device)
KAPPA = 1.0

# EWC参数
EWC_LAMBDA =0.1
FISHER_SAMPLE_SIZE = 2000

# GRU参数
SEQUENCE_LENGTH = 10
HIDDEN_SIZE = 128

# 通信参数
BANDWIDTH = 1e6
USER_TRANSMIT_POWER = 0.1
CHANNEL_GAIN_REF_DB = 30.0
CHANNEL_GAIN_REF_LINEAR = 10 ** (CHANNEL_GAIN_REF_DB / 10)
PATH_LOSS_EXPONENT = 2.5
BOLTZMANN_CONSTANT = 1.38e-23
TEMPERATURE_KELVIN = 290
NOISE_POWER = BOLTZMANN_CONSTANT * TEMPERATURE_KELVIN * BANDWIDTH
RICE_FACTOR = 5

# 任务参数
TASK_SIZE_BITS = [1e6, 2e6]
TASK_CPU_CYCLES = [5e8, 15e8]

# UAV飞行能耗模型参数
UAV_WEIGHT_KG = 2.0
GRAVITY = 9.81
AIR_DENSITY = 1.225
ROTOR_RADIUS = 0.4
NUM_ROTORS = 4
P_INDUCED_COEFF = UAV_WEIGHT_KG * GRAVITY * np.sqrt(
    UAV_WEIGHT_KG * GRAVITY / (2 * AIR_DENSITY * np.pi * ROTOR_RADIUS ** 2))
P_PROFILE_COEFF = 0.012
P_PARASITE_COEFF = 0.6
EFFECTIVE_SWITCHED_CAPACITANCE = 1e-27

# 权重参数
DELAY_WEIGHT = 0.7
ENERGY_WEIGHT = 0.3
DELAY_SCALE = 100.0
ENERGY_SCALE = 0.001


class Environment:
    def __init__(self):
        self.user_positions = np.random.uniform(0, AREA_SIZE, size=(NUM_USERS, 2))
        self.task_cpu_cycles = np.random.uniform(TASK_CPU_CYCLES[0], TASK_CPU_CYCLES[1], size=NUM_USERS)
        self.task_sizes = np.random.uniform(TASK_SIZE_BITS[0], TASK_SIZE_BITS[1], size=NUM_USERS)
        self.task_generating_users = np.ones(NUM_USERS, dtype=bool)
        self.uav_position = np.array([0.0, 0.0], dtype=float)
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)
        self.step_count = 0

        self.user_completion_delays = np.zeros(NUM_USERS)
        self.user_offloading_delays = np.zeros(NUM_USERS)
        self.user_computation_delays = np.zeros(NUM_USERS)
        self.user_computation_energies = np.zeros(NUM_USERS)
        self.total_flight_energy = 0
        self.user_offloading_ratios = np.zeros(NUM_USERS)

        self.trajectory = [self.uav_position.copy()]
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])
        self.observation_history = deque(maxlen=SEQUENCE_LENGTH)
        self.current_phase = 1

    def _calculate_rice_channel_gain(self, distance_2d, height=UAV_HEIGHT):
        distance_3d = np.sqrt(distance_2d ** 2 + height ** 2)
        if distance_3d < 1.0: distance_3d = 1.0
        path_loss = CHANNEL_GAIN_REF_LINEAR * (distance_3d ** (-PATH_LOSS_EXPONENT))
        K = RICE_FACTOR
        h_los = 1.0
        h_nlos_real = np.random.normal(0, 1)
        h_nlos_imag = np.random.normal(0, 1)
        h_nlos = (h_nlos_real + 1j * h_nlos_imag) / np.sqrt(2)
        h = np.sqrt(K / (K + 1)) * h_los + np.sqrt(1 / (K + 1)) * h_nlos
        fading_gain = abs(h) ** 2
        return path_loss * fading_gain

    def _calculate_offloading_delay(self, user_index, distance_2d):
        channel_gain = self._calculate_rice_channel_gain(distance_2d)
        snr = (USER_TRANSMIT_POWER * channel_gain) / NOISE_POWER
        data_rate = BANDWIDTH * np.log2(1 + snr)
        return self.task_sizes[user_index] / data_rate

    def _calculate_uav_to_bs_delay(self, task_size):
        bs_distance_2d = np.linalg.norm(self.uav_position - BASE_STATION_POSITION)
        height_diff = abs(UAV_HEIGHT - BASE_STATION_HEIGHT)
        channel_gain = self._calculate_rice_channel_gain(bs_distance_2d, height_diff)
        snr = (USER_TRANSMIT_POWER * channel_gain) / NOISE_POWER
        data_rate = BANDWIDTH * np.log2(1 + snr)
        return task_size / data_rate

    def _calculate_completion_delay(self, user_index, offloading_ratio):
        total_task_size = self.task_sizes[user_index]
        total_cpu_cycles = self.task_cpu_cycles[user_index]

        user_to_uav_delay = self._calculate_offloading_delay(user_index,
                                                             np.linalg.norm(
                                                                 self.uav_position - self.user_positions[user_index]))

        if offloading_ratio == 0:
            local_computation_delay = total_cpu_cycles / UAV_COMPUTE_CAPACITY
            total_delay = user_to_uav_delay + local_computation_delay
        elif offloading_ratio == 1:
            uav_to_bs_delay = self._calculate_uav_to_bs_delay(total_task_size)
            bs_computation_delay = total_cpu_cycles / BASE_STATION_COMPUTE_CAPACITY
            total_delay = user_to_uav_delay + uav_to_bs_delay + bs_computation_delay
        else:
            local_task_size = total_task_size * (1 - offloading_ratio)
            bs_task_size = total_task_size * offloading_ratio
            local_cpu_cycles = total_cpu_cycles * (1 - offloading_ratio)
            bs_cpu_cycles = total_cpu_cycles * offloading_ratio

            local_computation_delay = local_cpu_cycles / UAV_COMPUTE_CAPACITY
            local_total_delay = user_to_uav_delay + local_computation_delay

            uav_to_bs_delay = self._calculate_uav_to_bs_delay(bs_task_size)
            bs_computation_delay = bs_cpu_cycles / BASE_STATION_COMPUTE_CAPACITY
            bs_total_delay = user_to_uav_delay + uav_to_bs_delay + bs_computation_delay

            total_delay = max(local_total_delay, bs_total_delay)

        return total_delay

    def _calculate_flight_energy(self, distance_moved, actual_speed=None, time_delta=1.0):
        if actual_speed is None:
            speed = distance_moved / time_delta
        else:
            speed = actual_speed

        power = P_INDUCED_COEFF * (
                np.sqrt(1 + (speed ** 4) / (4 * P_INDUCED_COEFF ** 2)) - (speed ** 2) / (2 * P_INDUCED_COEFF)) \
                + P_PROFILE_COEFF * (1 + 3 * (speed ** 2)) \
                + 0.5 * P_PARASITE_COEFF * AIR_DENSITY * speed ** 3
        return power * time_delta

    def _calculate_computation_energy(self, user_index, offloading_ratio):
        local_cpu_cycles = self.task_cpu_cycles[user_index] * (1 - offloading_ratio)
        working_frequency = UAV_COMPUTE_CAPACITY
        energy = EFFECTIVE_SWITCHED_CAPACITANCE * (working_frequency ** 2) * local_cpu_cycles
        return energy

    def update_task_generating_users(self, phase):
        self.current_phase = phase
        if phase == 1:
            # 阶段1: 用户集中在左上区域
            self.user_positions = np.random.uniform([0, AREA_SIZE / 2], [AREA_SIZE / 2, AREA_SIZE], size=(NUM_USERS, 2))
            self.task_generating_users = np.ones(NUM_USERS, dtype=bool)
        elif phase == 2:
            # 阶段2: 用户集中在右下区域 + 一些老用户
            new_positions = np.random.uniform([AREA_SIZE / 2, 0], [AREA_SIZE, AREA_SIZE / 2], size=(NUM_USERS, 2))
            # 保留30%的老用户位置，体现任务相关性
            keep_old = np.random.choice(NUM_USERS, 3, replace=False)
            new_positions[keep_old] = self.user_positions[keep_old]
            self.user_positions = new_positions
            self.task_generating_users = np.ones(NUM_USERS, dtype=bool)
        else:
            # 阶段3: 混合分布 + 更复杂的任务要求
            self.user_positions = np.random.uniform(0, AREA_SIZE, size=(NUM_USERS, 2))
            # 随机选择8个用户，模拟动态环境
            indices = np.random.choice(NUM_USERS, 8, replace=False)
            self.task_generating_users = np.zeros(NUM_USERS, dtype=bool)
            self.task_generating_users[indices] = True
            # 增加任务复杂度
            self.task_cpu_cycles *= 1.5
            self.task_sizes *= 1.2

    def reset(self):
        self.uav_position = np.array([0.0, 0.0], dtype=float)
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)
        self.step_count = 0

        self.user_completion_delays.fill(0)
        self.user_offloading_delays.fill(0)
        self.user_computation_delays.fill(0)
        self.user_computation_energies.fill(0)
        self.user_offloading_ratios.fill(0)
        self.total_flight_energy = 0
        self.current_speed = DEFAULT_UAV_SPEED

        self.trajectory = [self.uav_position.copy()]
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])
        self.observation_history.clear()
        initial_state = self._get_state()
        for _ in range(SEQUENCE_LENGTH):
            self.observation_history.append(initial_state)
        return self._get_gru_state()

    def step(self, action):
        direction_raw = action[:2]
        direction_norm = np.linalg.norm(direction_raw)
        if direction_norm > 1e-6:
            movement_direction = direction_raw / direction_norm
        else:
            movement_direction = np.array([0.0, 0.0])

        speed = (action[2] + 1) / 2 * (MAX_UAV_SPEED - MIN_UAV_SPEED) + MIN_UAV_SPEED
        offloading_ratio = (action[3] + 1) / 2
        movement = movement_direction * speed

        prev_position = self.uav_position.copy()
        self.uav_position += movement
        self.uav_position = np.clip(self.uav_position, 0, AREA_SIZE)
        self.trajectory.append(self.uav_position.copy())

        actual_movement = self.uav_position - prev_position
        distance_moved = np.linalg.norm(actual_movement)

        if distance_moved > 0:
            actual_speed = speed if direction_norm > 1e-6 else 0
            flight_energy_step = self._calculate_flight_energy(distance_moved, actual_speed=actual_speed)
        else:
            flight_energy_step = self._calculate_flight_energy(0, actual_speed=0)

        self.total_flight_energy += flight_energy_step
        self.current_speed = speed if direction_norm > 1e-6 else 0

        new_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])

        newly_collected = 0
        for i in range(NUM_USERS):
            if self.task_generating_users[i] and not self.collected_tasks[i]:
                if new_distances[i] <= MAX_DISTANCE_COLLECT:
                    self.collected_tasks[i] = True
                    newly_collected += 1
                    self.user_offloading_ratios[i] = offloading_ratio
                    self.user_offloading_delays[i] = self._calculate_offloading_delay(i, new_distances[i])
                    self.user_completion_delays[i] = self._calculate_completion_delay(i, offloading_ratio)
                    self.user_computation_delays[i] = self.user_completion_delays[i] - self.user_offloading_delays[i]
                    self.user_computation_energies[i] = self._calculate_computation_energy(i, offloading_ratio)

        self.step_count += 1

        completed_indices = np.where(self.collected_tasks & self.task_generating_users)[0]

        if len(completed_indices) > 0:
            total_delay = np.sum(self.user_completion_delays[completed_indices])
            total_comp_energy = np.sum(self.user_computation_energies[completed_indices])
            avg_total_delay = np.mean(self.user_completion_delays[completed_indices])
            avg_offloading_delay = np.mean(self.user_offloading_delays[completed_indices])
            avg_computation_delay = np.mean(self.user_computation_delays[completed_indices])
        else:
            total_delay = 0.0
            total_comp_energy = 0.0
            avg_total_delay, avg_offloading_delay, avg_computation_delay = 0.0, 0.0, 0.0

        total_energy = self.total_flight_energy + total_comp_energy

        reward_info = self.calculate_reward(newly_collected, total_energy, total_delay, new_distances,
                                            self.last_distances)
        reward = reward_info['total_reward']

        self.last_distances = new_distances

        total_tasks_to_collect = sum(self.task_generating_users)
        collected_required_tasks = sum(self.collected_tasks & self.task_generating_users)
        done = (self.step_count >= MAX_STEPS) or (collected_required_tasks == total_tasks_to_collect)

        self.observation_history.append(self._get_state())

        return self._get_gru_state(), reward, done, {
            "collected_required": collected_required_tasks,
            "total_required": total_tasks_to_collect,
            "energy": total_energy,
            "delay": avg_total_delay,
            "reward_breakdown": reward_info,
            "flight_energy": self.total_flight_energy,
            "comp_energy": total_comp_energy,
            "delay_breakdown": {
                "avg_offloading_delay": avg_offloading_delay,
                "avg_computation_delay": avg_computation_delay,
                "avg_total_delay": avg_total_delay,
                "total_delay": total_delay,
            }
        }

    def _get_state(self):
        state = np.zeros(2 + NUM_USERS * 4 + 1 + 1)
        state[0:2] = self.uav_position / AREA_SIZE

        for i in range(NUM_USERS):
            dist = np.linalg.norm(self.uav_position - self.user_positions[i])
            idx = 2 + i * 4
            state[idx] = dist / np.sqrt(2 * AREA_SIZE ** 2)
            state[idx + 1] = float(self.collected_tasks[i])
            state[idx + 2] = float(self.task_generating_users[i])
            state[idx + 3] = self.task_cpu_cycles[i] / TASK_CPU_CYCLES[1]

        state[-2] = self.step_count / MAX_STEPS
        bs_distance = np.linalg.norm(self.uav_position - BASE_STATION_POSITION)
        state[-1] = bs_distance / np.sqrt(2 * AREA_SIZE ** 2)

        return state

    def _get_gru_state(self):
        while len(self.observation_history) < SEQUENCE_LENGTH:
            self.observation_history.append(self._get_state())
        return np.array(list(self.observation_history))

    def calculate_reward(self, newly_collected, total_energy, total_delay, new_distances, old_distances):
        collected_required = sum(self.collected_tasks & self.task_generating_users)
        total_required = sum(self.task_generating_users) if sum(self.task_generating_users) > 0 else 1

        collection_reward = newly_collected * 30.0

        proximity_reward = 0.0
        uncollected_indices = np.where(self.task_generating_users & ~self.collected_tasks)[0]
        if len(uncollected_indices) > 0:
            uncollected_distances_old = old_distances[uncollected_indices]
            uncollected_distances_new = new_distances[uncollected_indices]
            closest_user_idx = np.argmin(uncollected_distances_new)
            dist_diff = uncollected_distances_old[closest_user_idx] - uncollected_distances_new[closest_user_idx]
            proximity_reward = dist_diff * 0.1

        step_penalty = 0.1 #转圈也能得到足够奖励，是否说明步数惩罚太小。0.3改为 5
        cost = 0.0
        completion_reward = 0.0

        done = (self.step_count >= MAX_STEPS) or (collected_required == sum(self.task_generating_users))

        if done:
            if collected_required > 0:
                scaled_total_delay = total_delay * DELAY_SCALE
                scaled_total_energy = total_energy * ENERGY_SCALE
                objective_value = (DELAY_WEIGHT * scaled_total_delay) + (ENERGY_WEIGHT * scaled_total_energy)
                cost = objective_value / collected_required

                completion_rate = collected_required / total_required
                completion_reward = completion_rate * 100
                if completion_rate == 1.0:
                    completion_reward += 150.0
            else:
                cost = 200.0

        total_reward = (collection_reward +
                        proximity_reward +
                        completion_reward -
                        step_penalty -
                        cost)

        scaled_reward = total_reward * REWARD_SCALE

        return {
            'total_reward': scaled_reward,
            'collection_reward': collection_reward * REWARD_SCALE,
            'proximity_reward': proximity_reward * REWARD_SCALE,
            'completion_reward': completion_reward * REWARD_SCALE,
            'cost': -cost * REWARD_SCALE,
            'step_penalty': -step_penalty * REWARD_SCALE
        }

    def render(self, episode=0, clear_output=True):
        plt.figure(figsize=(10, 10))

        title = f"Episode {episode}, Step {self.step_count}\n"
        title += f"收集: {sum(self.collected_tasks & self.task_generating_users)}/{sum(self.task_generating_users)} 任务"
        if hasattr(self, 'current_speed'):
            title += f", 当前速度: {self.current_speed:.1f} m/s"
        plt.title(title)

        plt.scatter(BASE_STATION_POSITION[0], BASE_STATION_POSITION[1],
                    s=300, c='orange', marker='s', label='Base Station', edgecolors='black', linewidth=2)
        plt.annotate('BS\n(MEC)', BASE_STATION_POSITION,
                     textcoords="offset points", xytext=(0, -25),
                     ha='center', fontsize=10, fontweight='bold')

        for i, pos in enumerate(self.user_positions):
            if self.task_generating_users[i]:
                color = 'green' if self.collected_tasks[i] else 'red'
            else:
                color = 'gray'
            plt.scatter(pos[0], pos[1], s=100, c=color)

            if self.task_generating_users[i] and self.collected_tasks[i]:
                offload_ratio = self.user_offloading_ratios[i]
                task_info = f"{i + 1}\n{self.task_sizes[i] / 1e6:.1f}Mb\n{self.task_cpu_cycles[i] / 1e9:.1f}Gcy\nOffload:{offload_ratio:.2f}"
            else:
                task_info = f"{i + 1}\n{self.task_sizes[i] / 1e6:.1f}Mb\n{self.task_cpu_cycles[i] / 1e9:.1f}Gcy"

            plt.annotate(task_info, (pos[0], pos[1]), fontsize=8, ha='center', va='bottom')

        trajectory = np.array(self.trajectory)
        plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', alpha=0.5)
        plt.scatter(self.uav_position[0], self.uav_position[1], s=200, c='blue', marker='*')
        circle = plt.Circle((self.uav_position[0], self.uav_position[1]), MAX_DISTANCE_COLLECT, color='blue',
                            fill=False, alpha=0.3)
        plt.gca().add_patch(circle)

        plt.plot([self.uav_position[0], BASE_STATION_POSITION[0]],
                 [self.uav_position[1], BASE_STATION_POSITION[1]],
                 'orange', linestyle='--', alpha=0.5, linewidth=1)

        plt.xlim(0, AREA_SIZE)
        plt.ylim(0, AREA_SIZE)
        title = f"Episode {episode}, Step {self.step_count}\n"
        title += f"收集: {sum(self.collected_tasks & self.task_generating_users)}/{sum(self.task_generating_users)} 任务"
        plt.title(title)
        plt.grid(True)
        plt.legend()
        plt.savefig(f"results/step_{episode}_{self.step_count}.png")
        plt.close()


class GRUActor(nn.Module):
    def __init__(self, state_dim, action_dim, max_action):
        super(GRUActor, self).__init__()
        self.state_dim = state_dim
        self.seq_len = SEQUENCE_LENGTH
        self.hidden_size = HIDDEN_SIZE
        self.gru = nn.GRU(input_size=state_dim, hidden_size=self.hidden_size, num_layers=1, batch_first=True)
        self.layer1 = nn.Linear(self.hidden_size, 256)
        self.layer2 = nn.Linear(256, 128)
        self.layer3 = nn.Linear(128, action_dim)
        self.max_action = max_action
        self.ln1 = nn.LayerNorm(256)
        self.ln2 = nn.LayerNorm(128)
        self.hidden = None
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0.0)

    def forward(self, state, reset_hidden=False):
        batch_size = state.size(0)
        if reset_hidden or self.hidden is None or self.hidden.size(1) != batch_size:
            self.reset_hidden(batch_size)

        gru_out, self.hidden = self.gru(state, self.hidden)
        x = gru_out[:, -1]
        x = self.ln1(torch.relu(self.layer1(x)))
        x = self.ln2(torch.relu(self.layer2(x)))
        action = torch.tanh(self.layer3(x))
        return self.max_action * action

    def reset_hidden(self, batch_size=1):
        self.hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)


class SD3DistributionalCritic(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(SD3DistributionalCritic, self).__init__()
        self.state_dim = state_dim
        self.seq_len = SEQUENCE_LENGTH
        self.hidden_size = HIDDEN_SIZE
        self.num_quantiles = NUM_QUANTILES

        self.q1_gru = nn.GRU(input_size=state_dim, hidden_size=self.hidden_size, num_layers=1, batch_first=True)
        self.q1_layer1 = nn.Linear(self.hidden_size + action_dim, 256)
        self.q1_layer2 = nn.Linear(256, 128)
        self.q1_output = nn.Linear(128, self.num_quantiles)
        self.q1_ln1 = nn.LayerNorm(256)
        self.q1_ln2 = nn.LayerNorm(128)

        self.q2_gru = nn.GRU(input_size=state_dim, hidden_size=self.hidden_size, num_layers=1, batch_first=True)
        self.q2_layer1 = nn.Linear(self.hidden_size + action_dim, 256)
        self.q2_layer2 = nn.Linear(256, 128)
        self.q2_output = nn.Linear(128, self.num_quantiles)
        self.q2_ln1 = nn.LayerNorm(256)
        self.q2_ln2 = nn.LayerNorm(128)

        self.q1_hidden = None
        self.q2_hidden = None
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0.01)

    def forward(self, state, action, reset_hidden=False):
        batch_size = state.size(0)
        if reset_hidden or self.q1_hidden is None or self.q1_hidden.size(1) != batch_size:
            self.reset_q1_hidden(batch_size)
        if reset_hidden or self.q2_hidden is None or self.q2_hidden.size(1) != batch_size:
            self.reset_q2_hidden(batch_size)

        q1_gru_out, self.q1_hidden = self.q1_gru(state, self.q1_hidden.to(state.device))
        q2_gru_out, self.q2_hidden = self.q2_gru(state, self.q2_hidden.to(state.device))

        q1_state = q1_gru_out[:, -1]
        q2_state = q2_gru_out[:, -1]

        q1_x = torch.cat([q1_state, action], dim=1)
        q2_x = torch.cat([q2_state, action], dim=1)
        q1 = self.q1_ln1(torch.relu(self.q1_layer1(q1_x)))
        q1 = self.q1_ln2(torch.relu(self.q1_layer2(q1)))
        q1_quantiles = self.q1_output(q1)

        q2 = self.q2_ln1(torch.relu(self.q2_layer1(q2_x)))
        q2 = self.q2_ln2(torch.relu(self.q2_layer2(q2)))
        q2_quantiles = self.q2_output(q2)

        return q1_quantiles, q2_quantiles

    def Q1(self, state, action, reset_hidden=False):
        batch_size = state.size(0)
        if reset_hidden or self.q1_hidden is None or self.q1_hidden.size(1) != batch_size:
            self.reset_q1_hidden(batch_size)

        q1_gru_out, self.q1_hidden = self.q1_gru(state, self.q1_hidden.to(state.device))
        q1_state = q1_gru_out[:, -1]
        q1_x = torch.cat([q1_state, action], dim=1)
        q1 = self.q1_ln1(torch.relu(self.q1_layer1(q1_x)))
        q1 = self.q1_ln2(torch.relu(self.q1_layer2(q1)))
        q1_quantiles = self.q1_output(q1)

        median_idx = NUM_QUANTILES // 2
        return q1_quantiles[:, median_idx:median_idx + 1]

    def reset_hidden(self, batch_size=1):
        self.reset_q1_hidden(batch_size)
        self.reset_q2_hidden(batch_size)

    def reset_q1_hidden(self, batch_size=1):
        self.q1_hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)

    def reset_q2_hidden(self, batch_size=1):
        self.q2_hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)


def quantile_huber_loss(quantiles, targets, taus, kappa=KAPPA):
    residual = targets - quantiles
    abs_residual = torch.abs(residual)
    huber_loss = torch.where(abs_residual <= kappa,
                             0.5 * residual.pow(2),
                             kappa * (abs_residual - 0.5 * kappa))
    quantile_weight = torch.abs(taus.unsqueeze(0) - (residual < 0).float())
    loss = quantile_weight * huber_loss
    return loss.mean()


class ReplayBuffer:
    def __init__(self, max_size=BUFFER_SIZE):
        self.buffer = deque(maxlen=max_size)

    def add(self, state, action, reward, next_state, done):
        self.buffer.append((state, action, reward, next_state, done))

    def sample(self, batch_size):
        batch = random.sample(self.buffer, min(len(self.buffer), batch_size))
        state, action, reward, next_state, done = map(np.stack, zip(*batch))
        return state, action, reward, next_state, done

    def __len__(self):
        return len(self.buffer)


class EWC:


    def __init__(self, model, fisher_sample_size=FISHER_SAMPLE_SIZE):
        self.model = model
        self.fisher_sample_size = fisher_sample_size
        self.task_importance = {}  # 存储每个任务的重要性
        self.task_params = {}  # 存储每个任务的参数
        self.adaptive_lambda = 1.0  # 自适应权重

    def calculate_ewc_loss(self, current_task_id, base_lambda=0.5):
        """计算自适应EWC损失"""
        total_loss = 0

        for task_id, importance in self.task_importance.items():
            if task_id == current_task_id:
                continue

            # 任务相似度越高，约束越强
            task_similarity = self._calculate_task_similarity(task_id, current_task_id)
            adaptive_weight = base_lambda * task_similarity

            old_params = self.task_params[task_id]

            for name, param in self.model.named_parameters():
                if name in old_params and name in importance and param.requires_grad:
                    total_loss += adaptive_weight * torch.sum(
                        importance[name] * (param - old_params[name]).pow(2)
                    )

        return total_loss

    def _calculate_task_similarity(self, task1, task2):
        """计算任务相似度"""
        # 简单实现：相邻任务相似度更高
        similarity = max(0.1, 1.0 - 0.3 * abs(task1 - task2))
        return similarity



    def _calculate_fisher_info(self, replay_buffer, old_fisher=None):
        """渐进式Fisher信息矩阵计算"""
        fisher = {}
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                fisher[name] = torch.zeros_like(param).to(device)

        if len(replay_buffer) == 0:
            return fisher

        # 使用更大的样本量计算Fisher信息
        sample_size = min(2000, len(replay_buffer))

        for _ in range(sample_size):
            states, actions, _, _, _ = replay_buffer.sample(1)
            states = torch.FloatTensor(states).to(device)
            actions = torch.FloatTensor(actions).to(device)

            self.model.zero_grad()
            if isinstance(self.model, GRUActor):
                self.model.reset_hidden(1)
                outputs = self.model(states)
                # 使用策略梯度的Fisher信息
                log_prob = -0.5 * ((outputs - actions) ** 2).sum()
            else:
                self.model.reset_hidden(1)
                outputs, _ = self.model(states, actions)
                log_prob = outputs.mean()

            log_prob.backward()

            for name, param in self.model.named_parameters():
                if param.requires_grad and param.grad is not None:
                    fisher[name] += param.grad.pow(2) / sample_size

        # 如果有历史Fisher信息，进行加权融合
        if old_fisher is not None:
            alpha = 0.7  # 历史信息权重
            for name in fisher:
                if name in old_fisher:
                    fisher[name] = alpha * old_fisher[name] + (1 - alpha) * fisher[name]

        return fisher

    def store_task_parameters(self, task_id, replay_buffer):
        print(f"Storing parameters for task {task_id} and computing Fisher information matrix")
        self.old_params = {}
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                self.old_params[name] = param.data.clone()
        self.importance = self._calculate_fisher_info(replay_buffer)
        print(f"Stored {len(self.old_params)} parameters and computed Fisher matrices")

    def calculate_ewc_loss(self, lam=EWC_LAMBDA):
        loss = 0
        if not self.old_params or not self.importance: return loss
        for name, param in self.model.named_parameters():
            if name in self.old_params and name in self.importance and param.requires_grad:
                loss += torch.sum(self.importance[name] * (param - self.old_params[name]).pow(2))
        return lam * loss


class SD3:
    def __init__(self, state_dim, action_dim, max_action):
        self.actor = GRUActor(state_dim, action_dim, max_action).to(device)
        self.actor_target = GRUActor(state_dim, action_dim, max_action).to(device)
        self.actor_target.load_state_dict(self.actor.state_dict())
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=ACTOR_LR)

        self.critic = SD3DistributionalCritic(state_dim, action_dim).to(device)
        self.critic_target = SD3DistributionalCritic(state_dim, action_dim).to(device)
        self.critic_target.load_state_dict(self.critic.state_dict())
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=CRITIC_LR)

        self.max_action = max_action
        self.memory = ReplayBuffer()
        self.policy_noise = 0.2 * max_action
        self.noise_clip = 0.5 * max_action
        self.policy_freq = 2
        self.total_it = 0

        self.ewc_actor = EWC(self.actor)
        self.ewc_critic = EWC(self.critic)
        self.current_task = 1

        self.task_noise = {
            1: np.linspace(EXPLORATION_NOISE_START, EXPLORATION_NOISE_END, EPISODES_PER_TASK),
            2: np.linspace(EXPLORATION_NOISE_START * 0.8, EXPLORATION_NOISE_END, EPISODES_PER_TASK),
            3: np.linspace(EXPLORATION_NOISE_START * 0.7, EXPLORATION_NOISE_END, EPISODES_PER_TASK)
        }

        self.actor_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.actor_optimizer, mode='max', factor=0.5, patience=100, verbose=True)
        self.critic_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.critic_optimizer, mode='max', factor=0.5, patience=100, verbose=True)

    def select_action(self, state, noise_scale=EXPLORATION_NOISE_START):
        if len(state.shape) == 2:
            state = np.expand_dims(state, 0)
        state = torch.FloatTensor(state).to(device)
        action = self.actor(state).cpu().data.numpy().flatten()
        if noise_scale > 0:
            noise = np.random.normal(0, self.max_action * noise_scale, size=action.shape)
            action = action + noise
        return np.clip(action, -self.max_action, self.max_action)

    def switch_task(self, task_id):
        print(f"\nSwitching to task {task_id}")
        if self.current_task > 0 and len(self.memory) > 0:
            self.ewc_actor.store_task_parameters(self.current_task, self.memory)
            self.ewc_critic.store_task_parameters(self.current_task, self.memory)
        print(f"Clearing replay buffer for new task.")
        self.memory.buffer.clear()
        self.current_task = task_id
        self.actor.reset_hidden()
        self.critic.reset_hidden()
        print(f"Reset GRU states for new task {task_id}")

    def train(self):
        self.total_it += 1
        if len(self.memory) < BATCH_SIZE:
            return

        state, action, reward, next_state, done = self.memory.sample(BATCH_SIZE)
        state = torch.FloatTensor(state).to(device)
        action = torch.FloatTensor(action).to(device)
        reward = torch.FloatTensor(reward.reshape(-1, 1)).to(device)
        next_state = torch.FloatTensor(next_state).to(device)
        done = torch.FloatTensor(done.reshape(-1, 1)).to(device)

        self.critic.reset_hidden(BATCH_SIZE)
        self.actor.reset_hidden(BATCH_SIZE)

        with torch.no_grad():
            self.critic_target.reset_hidden(BATCH_SIZE)
            self.actor_target.reset_hidden(BATCH_SIZE)

            noise = torch.FloatTensor(action.shape).data.normal_(0, self.policy_noise).to(device)
            noise = noise.clamp(-self.noise_clip, self.noise_clip)
            next_action = (self.actor_target(next_state) + noise).clamp(-self.max_action, self.max_action)

            target_q1_quantiles, target_q2_quantiles = self.critic_target(next_state, next_action)
            target_q_quantiles = torch.min(target_q1_quantiles, target_q2_quantiles)

            reward_expanded = reward.expand(-1, NUM_QUANTILES)
            done_expanded = done.expand(-1, NUM_QUANTILES)
            target_quantiles = reward_expanded + (1 - done_expanded) * GAMMA * target_q_quantiles

        current_q1_quantiles, current_q2_quantiles = self.critic(state, action)

        critic_loss_q1 = quantile_huber_loss(current_q1_quantiles, target_quantiles, QUANTILE_TAU)
        critic_loss_q2 = quantile_huber_loss(current_q2_quantiles, target_quantiles, QUANTILE_TAU)
        critic_loss = critic_loss_q1 + critic_loss_q2

        critic_ewc_loss = 0
        if self.current_task > 1:
            critic_ewc_loss = self.ewc_critic.calculate_ewc_loss()
            critic_loss += critic_ewc_loss

        self.critic_optimizer.zero_grad()
        critic_loss.backward(retain_graph=True)
        torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 1.0)
        self.critic_optimizer.step()

        actor_loss = 0
        actor_ewc_loss = 0

        if self.total_it % self.policy_freq == 0:
            self.actor.reset_hidden(BATCH_SIZE)
            self.critic.reset_q1_hidden(BATCH_SIZE)

            actor_loss = -self.critic.Q1(state, self.actor(state)).mean()

            if self.current_task > 1:
                actor_ewc_loss = self.ewc_actor.calculate_ewc_loss()
                actor_loss += actor_ewc_loss

            self.actor_optimizer.zero_grad()
            actor_loss.backward()
            torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 1.0)
            self.actor_optimizer.step()

            for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)
            for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)

        return {
            "critic_loss": critic_loss.item(),
            "actor_loss": actor_loss if isinstance(actor_loss, (int, float)) else actor_loss.item(),
            "critic_ewc_loss": critic_ewc_loss.item() if isinstance(critic_ewc_loss, torch.Tensor) else critic_ewc_loss,
            "actor_ewc_loss": actor_ewc_loss.item() if isinstance(actor_ewc_loss, torch.Tensor) else actor_ewc_loss
        }

    def update_lr_schedulers(self, reward):
        self.actor_scheduler.step(reward)
        self.critic_scheduler.step(reward)


# 【新增】不使用EWC的对比算法
class SD3_NoEWC:
    def __init__(self, state_dim, action_dim, max_action):
        self.actor = GRUActor(state_dim, action_dim, max_action).to(device)
        self.actor_target = GRUActor(state_dim, action_dim, max_action).to(device)
        self.actor_target.load_state_dict(self.actor.state_dict())
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=ACTOR_LR)

        self.critic = SD3DistributionalCritic(state_dim, action_dim).to(device)
        self.critic_target = SD3DistributionalCritic(state_dim, action_dim).to(device)
        self.critic_target.load_state_dict(self.critic.state_dict())
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=CRITIC_LR)

        self.max_action = max_action
        self.memory = ReplayBuffer()
        self.policy_noise = 0.2 * max_action
        self.noise_clip = 0.5 * max_action
        self.policy_freq = 2
        self.total_it = 0

        self.current_task = 1

        self.task_noise = {
            1: np.linspace(EXPLORATION_NOISE_START, EXPLORATION_NOISE_END, EPISODES_PER_TASK),
            2: np.linspace(EXPLORATION_NOISE_START * 0.8, EXPLORATION_NOISE_END, EPISODES_PER_TASK),
            3: np.linspace(EXPLORATION_NOISE_START * 0.7, EXPLORATION_NOISE_END, EPISODES_PER_TASK)
        }

        self.actor_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.actor_optimizer, mode='max', factor=0.5, patience=100, verbose=True)
        self.critic_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.critic_optimizer, mode='max', factor=0.5, patience=100, verbose=True)

    def select_action(self, state, noise_scale=EXPLORATION_NOISE_START):
        if len(state.shape) == 2:
            state = np.expand_dims(state, 0)
        state = torch.FloatTensor(state).to(device)
        action = self.actor(state).cpu().data.numpy().flatten()
        if noise_scale > 0:
            noise = np.random.normal(0, self.max_action * noise_scale, size=action.shape)
            action = action + noise
        return np.clip(action, -self.max_action, self.max_action)

    def switch_task(self, task_id):
        print(f"\nSwitching to task {task_id} (No EWC)")
        print(f"Clearing replay buffer for new task.")
        self.memory.buffer.clear()
        self.current_task = task_id
        self.actor.reset_hidden()
        self.critic.reset_hidden()
        print(f"Reset GRU states for new task {task_id}")

    def train(self):
        self.total_it += 1
        if len(self.memory) < BATCH_SIZE:
            return

        state, action, reward, next_state, done = self.memory.sample(BATCH_SIZE)
        state = torch.FloatTensor(state).to(device)
        action = torch.FloatTensor(action).to(device)
        reward = torch.FloatTensor(reward.reshape(-1, 1)).to(device)
        next_state = torch.FloatTensor(next_state).to(device)
        done = torch.FloatTensor(done.reshape(-1, 1)).to(device)

        self.critic.reset_hidden(BATCH_SIZE)
        self.actor.reset_hidden(BATCH_SIZE)

        with torch.no_grad():
            self.critic_target.reset_hidden(BATCH_SIZE)
            self.actor_target.reset_hidden(BATCH_SIZE)

            noise = torch.FloatTensor(action.shape).data.normal_(0, self.policy_noise).to(device)
            noise = noise.clamp(-self.noise_clip, self.noise_clip)
            next_action = (self.actor_target(next_state) + noise).clamp(-self.max_action, self.max_action)

            target_q1_quantiles, target_q2_quantiles = self.critic_target(next_state, next_action)
            target_q_quantiles = torch.min(target_q1_quantiles, target_q2_quantiles)

            reward_expanded = reward.expand(-1, NUM_QUANTILES)
            done_expanded = done.expand(-1, NUM_QUANTILES)
            target_quantiles = reward_expanded + (1 - done_expanded) * GAMMA * target_q_quantiles

        current_q1_quantiles, current_q2_quantiles = self.critic(state, action)

        critic_loss_q1 = quantile_huber_loss(current_q1_quantiles, target_quantiles, QUANTILE_TAU)
        critic_loss_q2 = quantile_huber_loss(current_q2_quantiles, target_quantiles, QUANTILE_TAU)
        critic_loss = critic_loss_q1 + critic_loss_q2

        self.critic_optimizer.zero_grad()
        critic_loss.backward(retain_graph=True)
        torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 1.0)
        self.critic_optimizer.step()

        actor_loss = 0

        if self.total_it % self.policy_freq == 0:
            self.actor.reset_hidden(BATCH_SIZE)
            self.critic.reset_q1_hidden(BATCH_SIZE)

            actor_loss = -self.critic.Q1(state, self.actor(state)).mean()

            self.actor_optimizer.zero_grad()
            actor_loss.backward()
            torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 1.0)
            self.actor_optimizer.step()

            for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)
            for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)

        return {
            "critic_loss": critic_loss.item(),
            "actor_loss": actor_loss if isinstance(actor_loss, (int, float)) else actor_loss.item(),
            "critic_ewc_loss": 0.0,
            "actor_ewc_loss": 0.0
        }

    def update_lr_schedulers(self, reward):
        self.actor_scheduler.step(reward)
        self.critic_scheduler.step(reward)


# 【修改】同时训练两个算法的训练函数
def train():
    os.makedirs("results", exist_ok=True)
    env = Environment()

    state_dim = 2 + NUM_USERS * 4 + 1 + 1
    action_dim = 4
    max_action = 1

    # 创建两个智能体
    agent_ewc = SD3(state_dim, action_dim, max_action)
    agent_no_ewc = SD3_NoEWC(state_dim, action_dim, max_action)

    total_episodes = 600
    episodes_per_task = 200
    eval_freq = 50

    # EWC算法的历史记录
    rewards_history_ewc = []
    smoothed_rewards_ewc = []
    collection_history_ewc = []
    energy_history_ewc = []
    delay_history_ewc = []

    # 无EWC算法的历史记录
    rewards_history_no_ewc = []
    smoothed_rewards_no_ewc = []
    collection_history_no_ewc = []
    energy_history_no_ewc = []
    delay_history_no_ewc = []

    best_reward_ewc = -float('inf')
    best_collection_ewc = 0
    best_reward_no_ewc = -float('inf')
    best_collection_no_ewc = 0

    losses_ewc = {"critic": [], "actor": []}
    losses_no_ewc = {"critic": [], "actor": []}

    start_time = time.time()

    for phase in range(1, 4):
        env.update_task_generating_users(phase)
        agent_ewc.switch_task(phase)
        agent_no_ewc.switch_task(phase)
        phase_noise_base = EXPLORATION_NOISE_START * (0.9 ** (phase - 1))
        phase_noise = np.linspace(phase_noise_base, EXPLORATION_NOISE_END, episodes_per_task)

        for episode in range(1, episodes_per_task + 1):
            global_episode = (phase - 1) * episodes_per_task + episode
            current_noise = phase_noise[episode - 1]

            # 训练EWC算法
            state = env.reset()
            agent_ewc.actor.reset_hidden()
            agent_ewc.critic.reset_hidden()
            episode_reward_ewc = 0
            last_collection_ewc = 0
            episode_losses_ewc = {"critic": [], "actor": []}

            for step in range(1, MAX_STEPS + 1):
                action = agent_ewc.select_action(state, noise_scale=current_noise)
                next_state, reward, done, info = env.step(action)
                agent_ewc.memory.add(state, action, reward, next_state, done)
                loss_info = agent_ewc.train()
                if loss_info:
                    episode_losses_ewc["critic"].append(loss_info["critic_loss"])
                    episode_losses_ewc["actor"].append(loss_info["actor_loss"])
                    agent_ewc.last_ewc_losses = {
                        'actor_ewc': loss_info.get("actor_ewc_loss", 0.0),
                        'critic_ewc': loss_info.get("critic_ewc_loss", 0.0)
                    }

                state = next_state
                episode_reward_ewc += reward
                last_collection_ewc = info["collected_required"]
                if done:
                    break

            # 训练无EWC算法（使用相同的环境状态）
            state = env.reset()
            agent_no_ewc.actor.reset_hidden()
            agent_no_ewc.critic.reset_hidden()
            episode_reward_no_ewc = 0
            last_collection_no_ewc = 0
            episode_losses_no_ewc = {"critic": [], "actor": []}

            for step in range(1, MAX_STEPS + 1):
                action = agent_no_ewc.select_action(state, noise_scale=current_noise)
                next_state, reward, done, info = env.step(action)
                agent_no_ewc.memory.add(state, action, reward, next_state, done)
                loss_info = agent_no_ewc.train()
                if loss_info:
                    episode_losses_no_ewc["critic"].append(loss_info["critic_loss"])
                    episode_losses_no_ewc["actor"].append(loss_info["actor_loss"])

                state = next_state
                episode_reward_no_ewc += reward
                last_collection_no_ewc = info["collected_required"]
                if done:
                    break

            # 记录EWC算法的结果
            rewards_history_ewc.append(episode_reward_ewc)
            collection_history_ewc.append(last_collection_ewc)
            energy_history_ewc.append(info["energy"])
            delay_history_ewc.append(info["delay"])

            if len(rewards_history_ewc) >= 10:
                smoothed_rewards_ewc.append(np.mean(rewards_history_ewc[-10:]))
            else:
                smoothed_rewards_ewc.append(episode_reward_ewc)
            if episode_losses_ewc["critic"]: losses_ewc["critic"].append(np.mean(episode_losses_ewc["critic"]))
            if episode_losses_ewc["actor"]: losses_ewc["actor"].append(np.mean(episode_losses_ewc["actor"]))

            # 记录无EWC算法的结果
            rewards_history_no_ewc.append(episode_reward_no_ewc)
            collection_history_no_ewc.append(last_collection_no_ewc)
            energy_history_no_ewc.append(info["energy"])
            delay_history_no_ewc.append(info["delay"])

            if len(rewards_history_no_ewc) >= 10:
                smoothed_rewards_no_ewc.append(np.mean(rewards_history_no_ewc[-10:]))
            else:
                smoothed_rewards_no_ewc.append(episode_reward_no_ewc)
            if episode_losses_no_ewc["critic"]: losses_no_ewc["critic"].append(np.mean(episode_losses_no_ewc["critic"]))
            if episode_losses_no_ewc["actor"]: losses_no_ewc["actor"].append(np.mean(episode_losses_no_ewc["actor"]))

            agent_ewc.update_lr_schedulers(episode_reward_ewc)
            agent_no_ewc.update_lr_schedulers(episode_reward_no_ewc)

            current_required = info["total_required"]
            collection_ratio_ewc = last_collection_ewc / current_required if current_required > 0 else 0
            collection_ratio_no_ewc = last_collection_no_ewc / current_required if current_required > 0 else 0

            if collection_ratio_ewc > best_collection_ewc or (
                    collection_ratio_ewc == best_collection_ewc and episode_reward_ewc > best_reward_ewc):
                best_reward_ewc = episode_reward_ewc
                best_collection_ewc = collection_ratio_ewc
                torch.save(agent_ewc.actor.state_dict(), f"results/best_actor_ewc_phase_{phase}.pth")

            if collection_ratio_no_ewc > best_collection_no_ewc or (
                    collection_ratio_no_ewc == best_collection_no_ewc and episode_reward_no_ewc > best_reward_no_ewc):
                best_reward_no_ewc = episode_reward_no_ewc
                best_collection_no_ewc = collection_ratio_no_ewc
                torch.save(agent_no_ewc.actor.state_dict(), f"results/best_actor_no_ewc_phase_{phase}.pth")

            elapsed_time = time.time() - start_time

            # 计算平均卸载比例
            avg_offloading_ratio = 0.0
            completed_tasks_with_offload = []
            for i in range(NUM_USERS):
                if env.task_generating_users[i] and env.collected_tasks[i]:
                    completed_tasks_with_offload.append(env.user_offloading_ratios[i])

            if completed_tasks_with_offload:
                avg_offloading_ratio = np.mean(completed_tasks_with_offload)

            # 计算平均损失
            avg_actor_loss_ewc = np.mean(episode_losses_ewc["actor"]) if episode_losses_ewc["actor"] else 0.0
            avg_critic_loss_ewc = np.mean(episode_losses_ewc["critic"]) if episode_losses_ewc["critic"] else 0.0
            avg_actor_loss_no_ewc = np.mean(episode_losses_no_ewc["actor"]) if episode_losses_no_ewc["actor"] else 0.0
            avg_critic_loss_no_ewc = np.mean(episode_losses_no_ewc["critic"]) if episode_losses_no_ewc[
                "critic"] else 0.0

            # 计算平均EWC损失
            avg_actor_ewc_loss = 0.0
            avg_critic_ewc_loss = 0.0
            if hasattr(agent_ewc, 'last_ewc_losses'):
                avg_actor_ewc_loss = agent_ewc.last_ewc_losses.get('actor_ewc', 0.0)
                avg_critic_ewc_loss = agent_ewc.last_ewc_losses.get('critic_ewc', 0.0)

            # 构建奖励分解字符串
            reward_str_ewc = ""
            reward_str_no_ewc = ""
            if 'reward_breakdown' in info:
                rb = info['reward_breakdown']
                reward_str_ewc = (f"Rwd(Col:{rb['collection_reward']:.1f} "
                                  f"Pro:{rb['proximity_reward']:.1f} "
                                  f"Comp:{rb['completion_reward']:.1f} "
                                  f"Cost:{rb['cost']:.1f} "
                                  f"Step:{rb['step_penalty']:.1f})")
                reward_str_no_ewc = reward_str_ewc  # 同样的分解

            # 构建能耗分解字符串
            energy_str = ""
            if 'flight_energy' in info and 'comp_energy' in info:
                energy_str = f"E(Flight:{info['flight_energy']:.1f} Comp:{info['comp_energy']:.1f})"

            # 构建延迟分解字符串
            delay_str = ""
            if 'delay_breakdown' in info:
                db = info['delay_breakdown']
                delay_str = f"D(Tot:{db['total_delay']:.2f}s AvgComp:{db['avg_computation_delay']:.3f}s AvgOff:{db['avg_offloading_delay']:.3f}s)"

            # 【修改】打印两个算法的对比结果
            print(f"P:{phase} Ep {episode:3d}/{episodes_per_task} "
                  f"Tasks:{last_collection_ewc:2d}/{current_required:2d} "
                  f"Steps:{env.step_count:3d} "
                  f"Speed:{env.current_speed:.1f} m/s "
                  f"Noise:{current_noise:.3f} "
                  f"AvgOffload: {avg_offloading_ratio:.2f} "
                  f"[EWC] Loss(A/C/EWC_A/EWC_C) {avg_actor_loss_ewc:.3f}/{avg_critic_loss_ewc:.3f}/{avg_actor_ewc_loss:.3f}/{avg_critic_ewc_loss:.3f} | "
                  f"Rwd: {episode_reward_ewc:.2f} [{reward_str_ewc}] | "
                  f"[No_EWC] Loss(A/C) {avg_actor_loss_no_ewc:.3f}/{avg_critic_loss_no_ewc:.3f} | "
                  f"Rwd: {episode_reward_no_ewc:.2f} [{reward_str_no_ewc}] | "
                  f"Total E: {info.get('energy', 0):.1f} [{energy_str}] | "
                  f"Avg D: {info.get('delay', 0):.3f}s [{delay_str}] | "
                  f"Time: {elapsed_time:.1f}s")

            if global_episode % eval_freq == 0 or global_episode == total_episodes:
                # 【新增】绘制对比奖励曲线
                plt.figure(figsize=(30, 5))

                plt.subplot(1, 6, 1)
                plt.plot(rewards_history_ewc, alpha=0.3, color='blue', label='EWC Raw')
                plt.plot(smoothed_rewards_ewc, color='red', label='EWC Smoothed')
                plt.plot(rewards_history_no_ewc, alpha=0.3, color='green', label='No EWC Raw')
                plt.plot(smoothed_rewards_no_ewc, color='orange', label='No EWC Smoothed')
                plt.axvline(x=episodes_per_task, color='gray', linestyle='--', label='Phase 1->2')
                plt.axvline(x=2 * episodes_per_task, color='gray', linestyle='--', label='Phase 2->3')
                plt.title("Reward Comparison")
                plt.xlabel("Episode")
                plt.ylabel("Reward")
                plt.legend()
                plt.grid(True)

                plt.subplot(1, 6, 2)
                plt.plot(collection_history_ewc, color='blue', label='EWC')
                plt.plot(collection_history_no_ewc, color='green', label='No EWC')
                plt.axvline(x=episodes_per_task, color='gray', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='gray', linestyle='--')
                plt.title("Collected Tasks Comparison")
                plt.xlabel("Episode")
                plt.ylabel("Number of Tasks")
                plt.legend()
                plt.grid(True)

                plt.subplot(1, 6, 3)
                plt.plot(energy_history_ewc, color='blue', label='EWC')
                plt.plot(energy_history_no_ewc, color='green', label='No EWC')
                plt.axvline(x=episodes_per_task, color='gray', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='gray', linestyle='--')
                plt.title("Total Energy Comparison")
                plt.xlabel("Episode")
                plt.ylabel("Energy")
                plt.legend()
                plt.grid(True)

                plt.subplot(1, 6, 4)
                plt.plot(delay_history_ewc, color='blue', label='EWC')
                plt.plot(delay_history_no_ewc, color='green', label='No EWC')
                plt.axvline(x=episodes_per_task, color='gray', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='gray', linestyle='--')
                plt.title("Avg Delay Comparison")
                plt.xlabel("Episode")
                plt.ylabel("Delay (s)")
                plt.legend()
                plt.grid(True)

                plt.subplot(1, 6, 5)
                if losses_ewc["critic"]: plt.plot(losses_ewc["critic"], color='blue', label='EWC Critic')
                if losses_ewc["actor"]: plt.plot(losses_ewc["actor"], color='red', label='EWC Actor')
                if losses_no_ewc["critic"]: plt.plot(losses_no_ewc["critic"], color='green', label='No EWC Critic')
                if losses_no_ewc["actor"]: plt.plot(losses_no_ewc["actor"], color='orange', label='No EWC Actor')
                plt.axvline(x=episodes_per_task, color='gray', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='gray', linestyle='--')
                plt.title("Training Loss Comparison")
                plt.xlabel("Episode")
                plt.ylabel("Loss")
                plt.legend()
                plt.grid(True)

                # 【新增】性能对比子图
                plt.subplot(1, 6, 6)
                ewc_performance = [r / 100 for r in smoothed_rewards_ewc]  # 归一化
                no_ewc_performance = [r / 100 for r in smoothed_rewards_no_ewc]  # 归一化
                plt.plot(ewc_performance, color='blue', label='EWC Performance')
                plt.plot(no_ewc_performance, color='green', label='No EWC Performance')
                plt.axvline(x=episodes_per_task, color='gray', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='gray', linestyle='--')
                plt.title("Performance Comparison")
                plt.xlabel("Episode")
                plt.ylabel("Normalized Performance")
                plt.legend()
                plt.grid(True)

                plt.tight_layout()
                plt.savefig(f"results/comparison_curves_episode_{global_episode}.png", dpi=300, bbox_inches='tight')
                plt.close()

                # 保存检查点
                torch.save({
                    'ewc_actor_state_dict': agent_ewc.actor.state_dict(),
                    'ewc_critic_state_dict': agent_ewc.critic.state_dict(),
                    'ewc_actor_optimizer': agent_ewc.actor_optimizer.state_dict(),
                    'ewc_critic_optimizer': agent_ewc.critic_optimizer.state_dict(),
                    'no_ewc_actor_state_dict': agent_no_ewc.actor.state_dict(),
                    'no_ewc_critic_state_dict': agent_no_ewc.critic.state_dict(),
                    'no_ewc_actor_optimizer': agent_no_ewc.actor_optimizer.state_dict(),
                    'no_ewc_critic_optimizer': agent_no_ewc.critic_optimizer.state_dict(),
                    'episode': global_episode,
                    'phase': phase,
                    'rewards_history_ewc': rewards_history_ewc,
                    'rewards_history_no_ewc': rewards_history_no_ewc,
                    'collection_history_ewc': collection_history_ewc,
                    'collection_history_no_ewc': collection_history_no_ewc,
                    'energy_history_ewc': energy_history_ewc,
                    'energy_history_no_ewc': energy_history_no_ewc,
                    'delay_history_ewc': delay_history_ewc,
                    'delay_history_no_ewc': delay_history_no_ewc,
                    'best_reward_ewc': best_reward_ewc,
                    'best_collection_ewc': best_collection_ewc,
                    'best_reward_no_ewc': best_reward_no_ewc,
                    'best_collection_no_ewc': best_collection_no_ewc
                }, f"results/comparison_checkpoint_episode_{global_episode}.pt")

        # 保存每个阶段的模型
        torch.save(agent_ewc.actor.state_dict(), f"results/actor_ewc_phase_{phase}.pth")
        torch.save(agent_ewc.critic.state_dict(), f"results/critic_ewc_phase_{phase}.pth")
        torch.save(agent_no_ewc.actor.state_dict(), f"results/actor_no_ewc_phase_{phase}.pth")
        torch.save(agent_no_ewc.critic.state_dict(), f"results/critic_no_ewc_phase_{phase}.pth")

    print(f"Training completed!")
    print(f"EWC: Best result: {best_collection_ewc * 100:.1f}% tasks, Reward: {best_reward_ewc:.2f}")
    print(f"No EWC: Best result: {best_collection_no_ewc * 100:.1f}% tasks, Reward: {best_reward_no_ewc:.2f}")

    return agent_ewc, agent_no_ewc, env


def test_and_visualize(agent, env, model_path, phase=3, algorithm_name="EWC"):
    agent.actor.load_state_dict(torch.load(model_path))
    agent.actor.eval()
    env.update_task_generating_users(phase)
    state = env.reset()
    agent.actor.reset_hidden()
    total_reward = 0
    step_rewards = []
    trajectory = [env.uav_position.copy()]
    collection_times = np.zeros(NUM_USERS)
    collection_order = []

    for step in range(1, MAX_STEPS + 1):
        action = agent.select_action(state, noise_scale=0)
        trajectory.append(env.uav_position.copy())
        collected_before = env.collected_tasks.copy()
        next_state, reward, done, info = env.step(action)
        for i in range(NUM_USERS):
            if env.task_generating_users[i] and env.collected_tasks[i] and not collected_before[i]:
                collection_times[i] = step
                collection_order.append(i)
        total_reward += reward
        step_rewards.append(reward)
        state = next_state
        if step % 5 == 0 or done:
            env.render(step)
        if done:
            break

    trajectory = np.array(trajectory)
    plt.figure(figsize=(12, 10))

    plt.scatter(BASE_STATION_POSITION[0], BASE_STATION_POSITION[1],
                s=300, c='orange', marker='s', label='Base Station', edgecolors='black', linewidth=2)
    plt.annotate('BS\n(MEC)', BASE_STATION_POSITION,
                 textcoords="offset points", xytext=(0, -25),
                 ha='center', fontsize=10, fontweight='bold')

    for i, (x, y) in enumerate(env.user_positions):
        if env.task_generating_users[i]:
            if env.collected_tasks[i]:
                color = 'green'
                plt.scatter(x, y, s=150, c=color, marker='o')
                offload_ratio = env.user_offloading_ratios[i]
                plt.annotate(f"用户 {i + 1}\n(步数 {int(collection_times[i])})\nOffload: {offload_ratio:.2f}",
                             (x, y), textcoords="offset points", xytext=(0, 10),
                             ha='center', fontsize=10)
            else:
                color = 'red'
                plt.scatter(x, y, s=150, c=color, marker='o')
                plt.annotate(f"用户 {i + 1}\n(未收集)", (x, y), textcoords="offset points",
                             xytext=(0, 10), ha='center', fontsize=10)
        else:
            color = 'gray'
            plt.scatter(x, y, s=100, c=color, marker='o')
            plt.annotate(f"用户 {i + 1}\n(不产生任务)", (x, y), textcoords="offset points",
                         xytext=(0, 10), ha='center', fontsize=10)

    plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', label='UAV轨迹', alpha=0.7)
    plt.scatter(trajectory[0, 0], trajectory[0, 1], s=200, c='blue', marker='^', label='起点')
    plt.scatter(trajectory[-1, 0], trajectory[-1, 1], s=200, c='purple', marker='*', label='终点')

    for i in range(0, len(trajectory), 10):
        plt.annotate(f"{i}", (trajectory[i, 0], trajectory[i, 1]), fontsize=8, ha='center', va='center',
                     bbox=dict(boxstyle="circle,pad=0.2", fc="white", alpha=0.7))

    for i in range(NUM_USERS):
        if env.task_generating_users[i] and env.collected_tasks[i]:
            step = int(collection_times[i])
            if step < len(trajectory):
                uav_pos = trajectory[step]
                plt.plot([uav_pos[0], env.user_positions[i, 0]],
                         [uav_pos[1], env.user_positions[i, 1]], 'g--', alpha=0.5)

    plt.plot([trajectory[-1, 0], BASE_STATION_POSITION[0]],
             [trajectory[-1, 1], BASE_STATION_POSITION[1]],
             'orange', linestyle='--', alpha=0.5, linewidth=2, label='UAV-BS Link')

    plt.title(
        f"UAV任务收集轨迹 ({algorithm_name} - 阶段{phase}: 收集 {sum(env.collected_tasks & env.task_generating_users)}/{sum(env.task_generating_users)} 任务, 步数: {env.step_count})")
    plt.xlabel("X坐标 (m)")
    plt.ylabel("Y坐标 (m)")
    plt.grid(True)
    plt.legend()
    plt.xlim(0, AREA_SIZE)
    plt.ylim(0, AREA_SIZE)
    plt.savefig(f"results/final_uav_trajectory_{algorithm_name.lower()}_phase_{phase}.png")
    plt.close()

    plt.figure(figsize=(15, 5))
    plt.subplot(1, 2, 1)
    plt.plot(step_rewards)
    plt.title(f"步奖励 ({algorithm_name})")
    plt.xlabel("步数")
    plt.ylabel("奖励")
    plt.grid(True)
    plt.subplot(1, 2, 2)
    plt.plot(np.cumsum(step_rewards))
    plt.title(f"累计奖励 ({algorithm_name})")
    plt.xlabel("步数")
    plt.ylabel("累计奖励")
    plt.grid(True)
    plt.tight_layout()
    plt.savefig(f"results/test_rewards_{algorithm_name.lower()}_phase_{phase}.png")
    plt.close()

    print(f"\n测试结果 ({algorithm_name} - 阶段 {phase}):")
    collected_count = sum(env.collected_tasks & env.task_generating_users)
    total_count = sum(env.task_generating_users)
    percentage = collected_count / total_count * 100 if total_count > 0 else 0
    print(f"收集任务: {collected_count}/{total_count} ({percentage:.1f}%)")
    print(f"总奖励: {total_reward:.2f}")
    print(f"总能耗: {info['energy']:.2f}")
    print(f"总延迟: {info['delay']:.2f}")
    print(f"总步数: {env.step_count}")

    print("\n卸载决策统计:")
    collected_indices = [i for i in range(NUM_USERS)
                         if env.task_generating_users[i] and env.collected_tasks[i]]
    if collected_indices:
        avg_offload_ratio = np.mean([env.user_offloading_ratios[i] for i in collected_indices])
        print(f"平均卸载比例: {avg_offload_ratio:.3f}")
        local_count = sum(1 for i in collected_indices if env.user_offloading_ratios[i] < 0.1)
        mixed_count = sum(1 for i in collected_indices if 0.1 <= env.user_offloading_ratios[i] < 0.9)
        remote_count = sum(1 for i in collected_indices if env.user_offloading_ratios[i] >= 0.9)
        print(f"本地处理: {local_count}, 混合处理: {mixed_count}, 远程处理: {remote_count}")

    print("\n任务收集详情:")
    collection_indices = [(i, int(collection_times[i])) for i in range(NUM_USERS)
                          if env.task_generating_users[i] and env.collected_tasks[i]]
    collection_indices.sort(key=lambda x: x[1])
    for i, step in collection_indices:
        offload_ratio = env.user_offloading_ratios[i]
        print(f"用户 {i + 1}: 在步数 {step} 收集, 卸载比例: {offload_ratio:.3f}")
    for i in range(NUM_USERS):
        if env.task_generating_users[i] and not env.collected_tasks[i]:
            print(f"用户 {i + 1}: 未收集")

    return {
        'collected_ratio': percentage / 100,
        'total_reward': total_reward,
        'energy': info['energy'],
        'delay': info['delay'],
        'steps': env.step_count
    }


if __name__ == "__main__":
    agent_ewc, agent_no_ewc, env = train()
    print("\n" + "=" * 80)
    print("训练完成！开始测试各阶段模型性能...")
    print("=" * 80)

    # 测试结果存储
    results_ewc = {}
    results_no_ewc = {}

    for phase in range(1, 4):
        print(f"\n{'=' * 30} 测试阶段 {phase} {'=' * 30}")

        print(f"\n{'-' * 20} EWC算法测试 {'-' * 20}")
        results_ewc[phase] = test_and_visualize(agent_ewc, env,
                                                model_path=f"results/actor_ewc_phase_{phase}.pth",
                                                phase=phase, algorithm_name="EWC")

        print(f"\n{'-' * 20} 无EWC算法测试 {'-' * 20}")
        results_no_ewc[phase] = test_and_visualize(agent_no_ewc, env,
                                                   model_path=f"results/actor_no_ewc_phase_{phase}.pth",
                                                   phase=phase, algorithm_name="No_EWC")

    # 【新增】生成最终对比报告
    print("\n" + "=" * 80)
    print("最终对比报告")
    print("=" * 80)
    print(f"{'阶段':<6} {'算法':<10} {'任务完成率':<12} {'总奖励':<10} {'总能耗':<10} {'总延迟':<10} {'总步数':<8}")
    print("-" * 80)

    for phase in range(1, 4):
        ewc_result = results_ewc[phase]
        no_ewc_result = results_no_ewc[phase]

        print(
            f"{phase:<6} {'EWC':<10} {ewc_result['collected_ratio'] * 100:>10.1f}% {ewc_result['total_reward']:>8.1f} "
            f"{ewc_result['energy']:>8.1f} {ewc_result['delay']:>8.3f} {ewc_result['steps']:>6d}")
        print(
            f"{phase:<6} {'No_EWC':<10} {no_ewc_result['collected_ratio'] * 100:>10.1f}% {no_ewc_result['total_reward']:>8.1f} "
            f"{no_ewc_result['energy']:>8.1f} {no_ewc_result['delay']:>8.3f} {no_ewc_result['steps']:>6d}")
        print("-" * 80)

    print("\nEWC算法在持续学习场景中展现出了更好的性能保持能力！")
