
import copy
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import random
from collections import deque
import os
import time

SEED = 45
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

# [所有环境参数和常量保持不变]
AREA_SIZE = 200
NUM_USERS = 12
MAX_STEPS = 300
MAX_DISTANCE_COLLECT = 15

UAV_HEIGHT = 30.0
UAV_COMPUTE_CAPACITY = 1e10
MIN_UAV_SPEED = 2.0
MAX_UAV_SPEED = 20.0
DEFAULT_UAV_SPEED = 10.0

BASE_STATION_POSITION = np.array([0.0, 0.0])
BASE_STATION_COMPUTE_CAPACITY = 2e11
BASE_STATION_HEIGHT = 10.0

ACTOR_LR = 3e-4
CRITIC_LR = 3e-4
GAMMA = 0.99
TAU = 0.005
BUFFER_SIZE = 200000
BATCH_SIZE = 64
EXPLORATION_NOISE_START = 0.4
EXPLORATION_NOISE_END = 0.05
REWARD_SCALE = 0.1

NUM_QUANTILES = 51
QUANTILE_TAU = torch.FloatTensor([(2 * i - 1) / (2.0 * NUM_QUANTILES) for i in range(1, NUM_QUANTILES + 1)]).to(device)
KAPPA = 1.0

AGEM_MEMORY_SIZE = 1000
AGEM_BATCH_SIZE = 32
AGEM_GAMMA = 0.5

SEQUENCE_LENGTH = 10
HIDDEN_SIZE = 128

BANDWIDTH = 1e6
USER_TRANSMIT_POWER = 0.1
CHANNEL_GAIN_REF_DB = 30.0
CHANNEL_GAIN_REF_LINEAR = 10 ** (CHANNEL_GAIN_REF_DB / 10)
PATH_LOSS_EXPONENT = 2.5
BOLTZMANN_CONSTANT = 1.38e-23
TEMPERATURE_KELVIN = 290
NOISE_POWER = BOLTZMANN_CONSTANT * TEMPERATURE_KELVIN * BANDWIDTH
RICE_FACTOR = 5

TASK_SIZE_BITS = [1e6, 2e6]
TASK_CPU_CYCLES = [5e8, 15e8]

UAV_WEIGHT_KG = 2.0
GRAVITY = 9.81
AIR_DENSITY = 1.225
ROTOR_RADIUS = 0.4
NUM_ROTORS = 4
P_INDUCED_COEFF = UAV_WEIGHT_KG * GRAVITY * np.sqrt(
    UAV_WEIGHT_KG * GRAVITY / (2 * AIR_DENSITY * np.pi * ROTOR_RADIUS ** 2))
P_PROFILE_COEFF = 0.012
P_PARASITE_COEFF = 0.6
EFFECTIVE_SWITCHED_CAPACITANCE = 1e-27

DELAY_WEIGHT = 0.7
ENERGY_WEIGHT = 0.3
DELAY_SCALE = 100.0
ENERGY_SCALE = 0.001


class Environment:
    def __init__(self):
        self.user_positions = np.random.uniform(0, AREA_SIZE, size=(NUM_USERS, 2))
        self.task_cpu_cycles = np.random.uniform(TASK_CPU_CYCLES[0], TASK_CPU_CYCLES[1], size=NUM_USERS)
        self.task_sizes = np.random.uniform(TASK_SIZE_BITS[0], TASK_SIZE_BITS[1], size=NUM_USERS)
        self.task_generating_users = np.ones(NUM_USERS, dtype=bool)
        self.uav_position = np.array([0.0, 0.0], dtype=float)
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)
        self.step_count = 0

        self.user_completion_delays = np.zeros(NUM_USERS)
        self.user_offloading_delays = np.zeros(NUM_USERS)
        self.user_computation_delays = np.zeros(NUM_USERS)
        self.user_computation_energies = np.zeros(NUM_USERS)
        self.total_flight_energy = 0

        self.user_offloading_ratios = np.zeros(NUM_USERS)

        self.trajectory = [self.uav_position.copy()]
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])
        self.observation_history = deque(maxlen=SEQUENCE_LENGTH)
        self.current_phase = 1
        self.phase_1_users = None

        self.episode_reward_breakdown = {
            'collection_reward': 0.0,
            'proximity_reward': 0.0,
            'completion_reward': 0.0,
            'cost': 0.0,
            'step_penalty': 0.0
        }

    def _calculate_rice_channel_gain(self, distance_2d, height=UAV_HEIGHT):
        distance_3d = np.sqrt(distance_2d ** 2 + height ** 2)
        if distance_3d < 1.0: distance_3d = 1.0
        path_loss = CHANNEL_GAIN_REF_LINEAR * (distance_3d ** (-PATH_LOSS_EXPONENT))
        K = RICE_FACTOR
        h_los = 1.0
        h_nlos_real = np.random.normal(0, 1)
        h_nlos_imag = np.random.normal(0, 1)
        h_nlos = (h_nlos_real + 1j * h_nlos_imag) / np.sqrt(2)
        h = np.sqrt(K / (K + 1)) * h_los + np.sqrt(1 / (K + 1)) * h_nlos
        fading_gain = abs(h) ** 2
        return path_loss * fading_gain

    def _calculate_offloading_delay(self, user_index, distance_2d):
        channel_gain = self._calculate_rice_channel_gain(distance_2d)
        snr = (USER_TRANSMIT_POWER * channel_gain) / NOISE_POWER
        data_rate = BANDWIDTH * np.log2(1 + snr)
        return self.task_sizes[user_index] / data_rate

    def _calculate_uav_to_bs_delay(self, task_size):
        bs_distance_2d = np.linalg.norm(self.uav_position - BASE_STATION_POSITION)
        height_diff = abs(UAV_HEIGHT - BASE_STATION_HEIGHT)
        channel_gain = self._calculate_rice_channel_gain(bs_distance_2d, height_diff)
        snr = (USER_TRANSMIT_POWER * channel_gain) / NOISE_POWER
        data_rate = BANDWIDTH * np.log2(1 + snr)
        return task_size / data_rate

    def _calculate_completion_delay(self, user_index, offloading_ratio):
        total_task_size = self.task_sizes[user_index]
        total_cpu_cycles = self.task_cpu_cycles[user_index]

        user_to_uav_delay = self._calculate_offloading_delay(user_index,
                                                             np.linalg.norm(
                                                                 self.uav_position - self.user_positions[user_index]))

        if offloading_ratio == 0:
            local_computation_delay = total_cpu_cycles / UAV_COMPUTE_CAPACITY
            total_delay = user_to_uav_delay + local_computation_delay

        elif offloading_ratio == 1:
            uav_to_bs_delay = self._calculate_uav_to_bs_delay(total_task_size)
            bs_computation_delay = total_cpu_cycles / BASE_STATION_COMPUTE_CAPACITY
            total_delay = user_to_uav_delay + uav_to_bs_delay + bs_computation_delay

        else:
            local_task_size = total_task_size * (1 - offloading_ratio)
            bs_task_size = total_task_size * offloading_ratio

            local_cpu_cycles = total_cpu_cycles * (1 - offloading_ratio)
            bs_cpu_cycles = total_cpu_cycles * offloading_ratio

            local_computation_delay = local_cpu_cycles / UAV_COMPUTE_CAPACITY
            local_total_delay = user_to_uav_delay + local_computation_delay

            uav_to_bs_delay = self._calculate_uav_to_bs_delay(bs_task_size)
            bs_computation_delay = bs_cpu_cycles / BASE_STATION_COMPUTE_CAPACITY
            bs_total_delay = user_to_uav_delay + uav_to_bs_delay + bs_computation_delay

            total_delay = max(local_total_delay, bs_total_delay)

        return total_delay

    def _calculate_flight_energy(self, distance_moved, actual_speed=None, time_delta=1.0):
        if actual_speed is None:
            speed = distance_moved / time_delta
        else:
            speed = actual_speed

        power = P_INDUCED_COEFF * (
                np.sqrt(1 + (speed ** 4) / (4 * P_INDUCED_COEFF ** 2)) - (speed ** 2) / (2 * P_INDUCED_COEFF)) \
                + P_PROFILE_COEFF * (1 + 3 * (speed ** 2)) \
                + 0.5 * P_PARASITE_COEFF * AIR_DENSITY * speed ** 3
        return power * time_delta

    def _calculate_computation_energy(self, user_index, offloading_ratio):
        local_cpu_cycles = self.task_cpu_cycles[user_index] * (1 - offloading_ratio)
        working_frequency = UAV_COMPUTE_CAPACITY
        energy = EFFECTIVE_SWITCHED_CAPACITANCE * (working_frequency ** 2) * local_cpu_cycles
        return energy

    def update_task_generating_users(self, phase):
        self.current_phase = phase
        self.task_generating_users = np.zeros(NUM_USERS, dtype=bool)

        if phase == 1:
            indices = np.random.choice(NUM_USERS, 10, replace=False)
            self.task_generating_users[indices] = True
            self.phase_1_users = self.task_generating_users.copy()
        elif phase == 2:
            self.task_generating_users = np.ones(NUM_USERS, dtype=bool)
        elif phase == 3:
            indices = np.random.choice(NUM_USERS, 8, replace=False)
            self.task_generating_users[indices] = True
        elif phase == 4:
            if self.phase_1_users is None:
                print("错误: 未找到第一阶段的用户数据! 将重新为第四阶段生成10个用户。")
                indices = np.random.choice(NUM_USERS, 10, replace=False)
                self.task_generating_users[indices] = True
            else:
                self.task_generating_users = self.phase_1_users.copy()

        print(f"Phase {phase}: {sum(self.task_generating_users)} users are generating tasks")
        print(f"Task generating users: {np.where(self.task_generating_users)[0]}")

    def reset(self):
        self.uav_position = np.array([0.0, 0.0], dtype=float)
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)
        self.step_count = 0

        self.user_completion_delays.fill(0)
        self.user_offloading_delays.fill(0)
        self.user_computation_delays.fill(0)
        self.user_computation_energies.fill(0)
        self.user_offloading_ratios.fill(0)
        self.total_flight_energy = 0
        self.current_speed = DEFAULT_UAV_SPEED

        self.trajectory = [self.uav_position.copy()]
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])
        self.observation_history.clear()

        for key in self.episode_reward_breakdown:
            self.episode_reward_breakdown[key] = 0.0

        initial_state = self._get_state()
        for _ in range(SEQUENCE_LENGTH):
            self.observation_history.append(initial_state)
        return self._get_gru_state()

    def step(self, action):
        direction_raw = action[:2]

        direction_norm = np.linalg.norm(direction_raw)
        if direction_norm > 1e-6:
            movement_direction = direction_raw / direction_norm
        else:
            movement_direction = np.array([0.0, 0.0])

        speed = (action[2] + 1) / 2 * (MAX_UAV_SPEED - MIN_UAV_SPEED) + MIN_UAV_SPEED
        offloading_ratio = (action[3] + 1) / 2
        movement = movement_direction * speed

        prev_position = self.uav_position.copy()
        self.uav_position += movement
        self.uav_position = np.clip(self.uav_position, 0, AREA_SIZE)
        self.trajectory.append(self.uav_position.copy())

        actual_movement = self.uav_position - prev_position
        distance_moved = np.linalg.norm(actual_movement)

        if distance_moved > 0:
            actual_speed = speed if direction_norm > 1e-6 else 0
            flight_energy_step = self._calculate_flight_energy(distance_moved, actual_speed=actual_speed)
        else:
            flight_energy_step = self._calculate_flight_energy(0, actual_speed=0)

        self.total_flight_energy += flight_energy_step
        self.current_speed = speed if direction_norm > 1e-6 else 0

        new_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])

        newly_collected = 0
        for i in range(NUM_USERS):
            if self.task_generating_users[i] and not self.collected_tasks[i]:
                if new_distances[i] <= MAX_DISTANCE_COLLECT:
                    self.collected_tasks[i] = True
                    newly_collected += 1

                    self.user_offloading_ratios[i] = offloading_ratio
                    self.user_offloading_delays[i] = self._calculate_offloading_delay(i, new_distances[i])
                    self.user_completion_delays[i] = self._calculate_completion_delay(i, offloading_ratio)
                    self.user_computation_delays[i] = self.user_completion_delays[i] - self.user_offloading_delays[i]
                    self.user_computation_energies[i] = self._calculate_computation_energy(i, offloading_ratio)

        self.step_count += 1

        completed_indices = np.where(self.collected_tasks & self.task_generating_users)[0]

        if len(completed_indices) > 0:
            total_delay = np.sum(self.user_completion_delays[completed_indices])
            total_comp_energy = np.sum(self.user_computation_energies[completed_indices])
            avg_total_delay = np.mean(self.user_completion_delays[completed_indices])
            avg_offloading_delay = np.mean(self.user_offloading_delays[completed_indices])
            avg_computation_delay = np.mean(self.user_computation_delays[completed_indices])
        else:
            total_delay = 0.0
            total_comp_energy = 0.0
            avg_total_delay, avg_offloading_delay, avg_computation_delay = 0.0, 0.0, 0.0

        total_energy = self.total_flight_energy + total_comp_energy

        reward_info = self.calculate_reward(newly_collected, total_energy, total_delay, new_distances,
                                                      self.last_distances)
        reward = reward_info['total_reward']
        for key in self.episode_reward_breakdown:
            self.episode_reward_breakdown[key] += reward_info[key]

        self.last_distances = new_distances

        total_tasks_to_collect = sum(self.task_generating_users)
        collected_required_tasks = sum(self.collected_tasks & self.task_generating_users)
        done = (self.step_count >= MAX_STEPS) or (collected_required_tasks == total_tasks_to_collect)

        self.observation_history.append(self._get_state())

        return self._get_gru_state(), reward, done, {
            "collected_required": collected_required_tasks,
            "total_required": total_tasks_to_collect,
            "energy": total_energy,
            "delay": avg_total_delay,
            "reward_breakdown": reward_info,
            "flight_energy": self.total_flight_energy,
            "comp_energy": total_comp_energy,
            "delay_breakdown": {
                "avg_offloading_delay": avg_offloading_delay,
                "avg_computation_delay": avg_computation_delay,
                "avg_total_delay": avg_total_delay,
                "total_delay": total_delay,
            },
            "episode_reward_breakdown": self.episode_reward_breakdown
        }

    def _get_state(self):
        state = np.zeros(2 + NUM_USERS * 4 + 1 + 1)
        state[0:2] = self.uav_position / AREA_SIZE

        for i in range(NUM_USERS):
            dist = np.linalg.norm(self.uav_position - self.user_positions[i])
            idx = 2 + i * 4
            state[idx] = dist / np.sqrt(2 * AREA_SIZE ** 2)
            state[idx + 1] = float(self.collected_tasks[i])
            state[idx + 2] = float(self.task_generating_users[i])
            state[idx + 3] = self.task_cpu_cycles[i] / TASK_CPU_CYCLES[1]

        state[-2] = self.step_count / MAX_STEPS

        bs_distance = np.linalg.norm(self.uav_position - BASE_STATION_POSITION)
        state[-1] = bs_distance / np.sqrt(2 * AREA_SIZE ** 2)

        return state

    def _get_gru_state(self):
        while len(self.observation_history) < SEQUENCE_LENGTH:
            self.observation_history.append(self._get_state())
        return np.array(list(self.observation_history))

    def calculate_reward(self, newly_collected, total_energy, total_delay, new_distances, old_distances):
        collected_required = sum(self.collected_tasks & self.task_generating_users)
        total_required = sum(self.task_generating_users) if sum(self.task_generating_users) > 0 else 1
        completion_rate = collected_required / total_required

        proximity_reward = 0.0
        uncollected_indices = np.where(self.task_generating_users & ~self.collected_tasks)[0]
        if len(uncollected_indices) > 0:
            uncollected_distances_old = old_distances[uncollected_indices]
            uncollected_distances_new = new_distances[uncollected_indices]
            closest_idx = np.argmin(uncollected_distances_new)
            dist_diff = uncollected_distances_old[closest_idx] - uncollected_distances_new[closest_idx]

            if uncollected_distances_new[closest_idx] < 50:
                proximity_reward = dist_diff * 0.24
            else:
                proximity_reward = dist_diff * 0.12

        time_factor = max(0, 1 - self.step_count / MAX_STEPS)
        collection_reward = newly_collected * 30.0 * (1 + time_factor * 0.5)

        progress = self.step_count / MAX_STEPS
        step_penalty = 0.25 + 0.5 * progress

        completion_reward = 0
        done = (self.step_count >= MAX_STEPS) or (collected_required == total_required)
        if done:
            step_efficiency = max(0.1, 1 - self.step_count / MAX_STEPS)
            base_completion = completion_rate * 400
            bonus = 1000 if completion_rate == 1.0 else 0
            completion_reward = (base_completion + bonus) * step_efficiency

        cost = 0
        if done and collected_required > 0:
            delay_penalty = total_delay * DELAY_WEIGHT * 1200
            energy_penalty = total_energy * ENERGY_WEIGHT * 0.015
            cost = (delay_penalty + energy_penalty) / collected_required

        total_reward = (collection_reward +
                        proximity_reward +
                        completion_reward -
                        step_penalty -
                        cost)
        return {
            'total_reward': total_reward * REWARD_SCALE,
            'proximity_reward': proximity_reward * REWARD_SCALE,
            'collection_reward': collection_reward * REWARD_SCALE,
            'completion_reward': completion_reward * REWARD_SCALE,
            'cost': -cost * REWARD_SCALE,
            'step_penalty': -step_penalty * REWARD_SCALE
        }

    def render(self, episode=0, clear_output=True):
        plt.figure(figsize=(10, 10))

        title = f"Episode {episode}, Step {self.step_count}\n"
        title += f"收集: {sum(self.collected_tasks & self.task_generating_users)}/{sum(self.task_generating_users)} 任务"
        if hasattr(self, 'current_speed'):
            title += f", 当前速度: {self.current_speed:.1f} m/s"
        plt.title(title)

        plt.scatter(BASE_STATION_POSITION[0], BASE_STATION_POSITION[1],
                    s=300, c='orange', marker='s', label='Base Station', edgecolors='black', linewidth=2)
        plt.annotate('BS\n(MEC)', BASE_STATION_POSITION,
                     textcoords="offset points", xytext=(0, -25),
                     ha='center', fontsize=10, fontweight='bold')

        for i, pos in enumerate(self.user_positions):
            if self.task_generating_users[i]:
                color = 'green' if self.collected_tasks[i] else 'red'
            else:
                color = 'gray'
            plt.scatter(pos[0], pos[1], s=100, c=color)

            if self.task_generating_users[i] and self.collected_tasks[i]:
                offload_ratio = self.user_offloading_ratios[i]
                task_info = f"{i + 1}\n{self.task_sizes[i] / 1e6:.1f}Mb\n{self.task_cpu_cycles[i] / 1e9:.1f}Gcy\nOffload:{offload_ratio:.2f}"
            else:
                task_info = f"{i + 1}\n{self.task_sizes[i] / 1e6:.1f}Mb\n{self.task_cpu_cycles[i] / 1e9:.1f}Gcy"

            plt.annotate(task_info, (pos[0], pos[1]), fontsize=8, ha='center', va='bottom')

        trajectory = np.array(self.trajectory)
        plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', alpha=0.5)
        plt.scatter(self.uav_position[0], self.uav_position[1], s=200, c='blue', marker='*')
        circle = plt.Circle((self.uav_position[0], self.uav_position[1]), MAX_DISTANCE_COLLECT, color='blue',
                            fill=False, alpha=0.3)
        plt.gca().add_patch(circle)

        plt.plot([self.uav_position[0], BASE_STATION_POSITION[0]],
                 [self.uav_position[1], BASE_STATION_POSITION[1]],
                 'orange', linestyle='--', alpha=0.5, linewidth=1)

        plt.xlim(0, AREA_SIZE)
        plt.ylim(0, AREA_SIZE)
        title = f"Episode {episode}, Step {self.step_count}\n"
        title += f"收集: {sum(self.collected_tasks & self.task_generating_users)}/{sum(self.task_generating_users)} 任务"
        plt.title(title)
        plt.grid(True)
        plt.legend()
        plt.savefig(f"results/step_{episode}_{self.step_count}.png")
        plt.close()

class GRUActor(nn.Module):
    def __init__(self, state_dim, action_dim, max_action):
        super(GRUActor, self).__init__()
        self.state_dim = state_dim
        self.seq_len = SEQUENCE_LENGTH
        self.hidden_size = HIDDEN_SIZE
        self.gru = nn.GRU(input_size=state_dim, hidden_size=self.hidden_size, num_layers=1, batch_first=True)
        self.layer1 = nn.Linear(self.hidden_size, 256)
        self.layer2 = nn.Linear(256, 128)
        self.layer3 = nn.Linear(128, action_dim)
        self.max_action = max_action
        self.ln1 = nn.LayerNorm(256)
        self.ln2 = nn.LayerNorm(128)
        self.hidden = None
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0.0)

    def forward(self, state, reset_hidden=False):
        batch_size = state.size(0)
        if reset_hidden or self.hidden is None or self.hidden.size(1) != batch_size:
            self.reset_hidden(batch_size)

        gru_out, self.hidden = self.gru(state, self.hidden)
        x = gru_out[:, -1]
        x = self.ln1(torch.relu(self.layer1(x)))
        x = self.ln2(torch.relu(self.layer2(x)))
        action = torch.tanh(self.layer3(x))
        return self.max_action * action

    def reset_hidden(self, batch_size=1):
        self.hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)


class SD3DistributionalCritic(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(SD3DistributionalCritic, self).__init__()
        self.state_dim = state_dim
        self.seq_len = SEQUENCE_LENGTH
        self.hidden_size = HIDDEN_SIZE
        self.num_quantiles = NUM_QUANTILES

        self.q1_gru = nn.GRU(input_size=state_dim, hidden_size=self.hidden_size, num_layers=1, batch_first=True)
        self.q1_layer1 = nn.Linear(self.hidden_size + action_dim, 256)
        self.q1_layer2 = nn.Linear(256, 128)
        self.q1_output = nn.Linear(128, self.num_quantiles)
        self.q1_ln1 = nn.LayerNorm(256)
        self.q1_ln2 = nn.LayerNorm(128)

        self.q2_gru = nn.GRU(input_size=state_dim, hidden_size=self.hidden_size, num_layers=1, batch_first=True)
        self.q2_layer1 = nn.Linear(self.hidden_size + action_dim, 256)
        self.q2_layer2 = nn.Linear(256, 128)
        self.q2_output = nn.Linear(128, self.num_quantiles)
        self.q2_ln1 = nn.LayerNorm(256)
        self.q2_ln2 = nn.LayerNorm(128)

        self.q1_hidden = None
        self.q2_hidden = None
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0.01)

    def forward(self, state, action, reset_hidden=False):
        batch_size = state.size(0)
        if reset_hidden or self.q1_hidden is None or self.q1_hidden.size(1) != batch_size:
            self.reset_q1_hidden(batch_size)
        if reset_hidden or self.q2_hidden is None or self.q2_hidden.size(1) != batch_size:
            self.reset_q2_hidden(batch_size)

        q1_gru_out, self.q1_hidden = self.q1_gru(state, self.q1_hidden.to(state.device))
        q2_gru_out, self.q2_hidden = self.q2_gru(state, self.q2_hidden.to(state.device))

        q1_state = q1_gru_out[:, -1]
        q2_state = q2_gru_out[:, -1]

        q1_x = torch.cat([q1_state, action], dim=1)
        q2_x = torch.cat([q2_state, action], dim=1)

        q1 = self.q1_ln1(torch.relu(self.q1_layer1(q1_x)))
        q1 = self.q1_ln2(torch.relu(self.q1_layer2(q1)))
        q1_quantiles = self.q1_output(q1)

        q2 = self.q2_ln1(torch.relu(self.q2_layer1(q2_x)))
        q2 = self.q2_ln2(torch.relu(self.q2_layer2(q2)))
        q2_quantiles = self.q2_output(q2)

        return q1_quantiles, q2_quantiles

    def Q1(self, state, action, reset_hidden=False):
        batch_size = state.size(0)
        if reset_hidden or self.q1_hidden is None or self.q1_hidden.size(1) != batch_size:
            self.reset_q1_hidden(batch_size)

        q1_gru_out, self.q1_hidden = self.q1_gru(state, self.q1_hidden.to(state.device))
        q1_state = q1_gru_out[:, -1]
        q1_x = torch.cat([q1_state, action], dim=1)
        q1 = self.q1_ln1(torch.relu(self.q1_layer1(q1_x)))
        q1 = self.q1_ln2(torch.relu(self.q1_layer2(q1)))
        q1_quantiles = self.q1_output(q1)
        return q1_quantiles

    def reset_q1_hidden(self, batch_size=1):
        self.q1_hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)

    def reset_q2_hidden(self, batch_size=1):
        self.q2_hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)


def quantile_huber_loss(quantiles, target, tau, kappa=KAPPA):
    pairwise_delta = target - quantiles
    abs_pairwise_delta = torch.abs(pairwise_delta)
    huber_loss = torch.where(abs_pairwise_delta > kappa,
                             kappa * abs_pairwise_delta - 0.5 * kappa ** 2,
                             0.5 * pairwise_delta ** 2)

    n_quantiles = quantiles.shape[1]
    tau = tau.view(1, n_quantiles)
    loss = torch.abs(tau - (pairwise_delta.detach() < 0).float()) * huber_loss / kappa
    return loss.sum(dim=1).mean()


class ReplayBuffer:
    def __init__(self, capacity):
        self.buffer = deque(maxlen=capacity)

    def push(self, state, action, reward, next_state, done):
        self.buffer.append((state, action, reward, next_state, done))

    def sample(self, batch_size):
        state, action, reward, next_state, done = zip(*random.sample(self.buffer, batch_size))
        return (np.array(state), np.array(action), np.array(reward),
                np.array(next_state), np.array(done))

    def __len__(self):
        return len(self.buffer)


# 修改后的A-GEM算法，实现柔性梯度融合
class FlexibleAGEM:
    def __init__(self, memory_size, batch_size, gamma=AGEM_GAMMA):
        self.memory_size = memory_size
        self.batch_size = batch_size
        self.gamma = gamma  # 柔性融合参数
        self.memory = deque(maxlen=memory_size)

    def store_experience(self, state, action, reward, next_state, done):
        """存储经验到A-GEM记忆库"""
        self.memory.append((state, action, reward, next_state, done))

    def compute_reference_gradients(self, actor, critic, actor_optimizer, critic_optimizer):
        """计算参考梯度"""
        if len(self.memory) < self.batch_size:
            return None, None

        # 从记忆库中采样
        batch = random.sample(self.memory, self.batch_size)
        states, actions, rewards, next_states, dones = zip(*batch)

        states = torch.FloatTensor(np.array(states)).to(device)
        actions = torch.FloatTensor(np.array(actions)).to(device)
        rewards = torch.FloatTensor(np.array(rewards)).to(device)
        next_states = torch.FloatTensor(np.array(next_states)).to(device)
        dones = torch.FloatTensor(np.array(dones)).to(device)

        # 重置隐藏状态
        actor.reset_hidden(states.size(0))
        critic.reset_q1_hidden(states.size(0))
        critic.reset_q2_hidden(states.size(0))

        # 计算Actor参考梯度
        actor_optimizer.zero_grad()
        pred_actions = actor(states, reset_hidden=True)
        q1_quantiles = critic.Q1(states, pred_actions, reset_hidden=True)
        actor_loss = -q1_quantiles.mean(dim=1).mean()
        actor_loss.backward()

        actor_ref_grads = []
        for param in actor.parameters():
            if param.grad is not None:
                actor_ref_grads.append(param.grad.clone().flatten())
        actor_ref_grad = torch.cat(actor_ref_grads) if actor_ref_grads else None

        # 计算Critic参考梯度
        critic_optimizer.zero_grad()
        with torch.no_grad():
            next_actions = actor(next_states, reset_hidden=True)
            q1_next, q2_next = critic(next_states, next_actions, reset_hidden=True)
            target_q = torch.min(q1_next, q2_next)
            target_quantiles = rewards.unsqueeze(1) + (1 - dones.unsqueeze(1)) * GAMMA * target_q

        current_q1, current_q2 = critic(states, actions, reset_hidden=True)
        critic_loss = (quantile_huber_loss(current_q1, target_quantiles.detach(), QUANTILE_TAU) +
                       quantile_huber_loss(current_q2, target_quantiles.detach(), QUANTILE_TAU))
        critic_loss.backward()

        critic_ref_grads = []
        for param in critic.parameters():
            if param.grad is not None:
                critic_ref_grads.append(param.grad.clone().flatten())
        critic_ref_grad = torch.cat(critic_ref_grads) if critic_ref_grads else None

        return actor_ref_grad, critic_ref_grad

    def apply_flexible_gradient_fusion(self, model, current_grad, reference_grad, model_type):
        """应用柔性梯度融合"""
        if reference_grad is None or current_grad is None:
            return

        # 计算梯度相似性
        cosine_sim = torch.cosine_similarity(current_grad, reference_grad, dim=0)

        # 根据相似性调整融合权重
        if cosine_sim < -0.1:  # 梯度冲突较严重
            # 使用投影方法，但保留部分当前梯度
            dot_product = torch.dot(current_grad, reference_grad)
            ref_norm_sq = torch.dot(reference_grad, reference_grad)

            if ref_norm_sq > 1e-8:
                projection = (dot_product / ref_norm_sq) * reference_grad
                corrected_grad = current_grad - self.gamma * projection
            else:
                corrected_grad = current_grad
        else:
            # 梯度兼容，使用加权融合
            fusion_weight = min(0.3, max(0.1, (cosine_sim.item() + 1) / 2 * 0.3))
            corrected_grad = (1 - fusion_weight) * current_grad + fusion_weight * reference_grad

        # 应用修正后的梯度
        idx = 0
        for param in model.parameters():
            if param.grad is not None:
                param_size = param.grad.numel()
                param.grad.data = corrected_grad[idx:idx + param_size].view_as(param.grad)
                idx += param_size

# 基础SD3智能体（不使用持续学习）
class BaseSD3Agent:
    def __init__(self, state_dim, action_dim, max_action, agent_name="BaseSD3"):
        self.state_dim = state_dim
        self.action_dim = action_dim
        self.max_action = max_action
        self.agent_name = agent_name

        self.actor = GRUActor(state_dim, action_dim, max_action).to(device)
        self.actor_target = GRUActor(state_dim, action_dim, max_action).to(device)
        self.critic = SD3DistributionalCritic(state_dim, action_dim).to(device)
        self.critic_target = SD3DistributionalCritic(state_dim, action_dim).to(device)

        self.actor_target.load_state_dict(self.actor.state_dict())
        self.critic_target.load_state_dict(self.critic.state_dict())

        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=ACTOR_LR)
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=CRITIC_LR)

        self.replay_buffer = ReplayBuffer(BUFFER_SIZE)

        self.total_it = 0
        self.policy_delay = 2

        # 性能跟踪
        self.recent_rewards = deque(maxlen=100)
        self.phase_performance = {}

    def select_action(self, state, noise=0.1):
        state = torch.FloatTensor(state).unsqueeze(0).to(device)
        self.actor.reset_hidden(1)
        action = self.actor(state, reset_hidden=True).cpu().data.numpy().flatten()

        if noise != 0:
            noise_vec = np.random.normal(0, noise, size=action.shape)
            action = (action + noise_vec).clip(-self.max_action, self.max_action)

        return action

    def store_transition(self, state, action, reward, next_state, done):
        self.replay_buffer.push(state, action, reward, next_state, done)

    def train(self):
        if len(self.replay_buffer) < BATCH_SIZE:
            return {}

        self.total_it += 1

        state, action, reward, next_state, done = self.replay_buffer.sample(BATCH_SIZE)

        state = torch.FloatTensor(state).to(device)
        action = torch.FloatTensor(action).to(device)
        reward = torch.FloatTensor(reward).to(device)
        next_state = torch.FloatTensor(next_state).to(device)
        done = torch.FloatTensor(done).to(device)

        # 训练Critic
        with torch.no_grad():
            self.actor_target.reset_hidden(BATCH_SIZE)
            next_action = self.actor_target(next_state, reset_hidden=True)

            self.critic_target.reset_q1_hidden(BATCH_SIZE)
            self.critic_target.reset_q2_hidden(BATCH_SIZE)
            target_Q1, target_Q2 = self.critic_target(next_state, next_action, reset_hidden=True)
            target_Q = torch.min(target_Q1, target_Q2)
            target_quantiles = reward.unsqueeze(1) + (1 - done.unsqueeze(1)) * GAMMA * target_Q

        self.critic.reset_q1_hidden(BATCH_SIZE)
        self.critic.reset_q2_hidden(BATCH_SIZE)
        current_Q1, current_Q2 = self.critic(state, action, reset_hidden=True)

        critic_loss = (quantile_huber_loss(current_Q1, target_quantiles, QUANTILE_TAU) +
                       quantile_huber_loss(current_Q2, target_quantiles, QUANTILE_TAU))

        self.critic_optimizer.zero_grad()
        critic_loss.backward()
        torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 1.0)
        self.critic_optimizer.step()

        actor_loss_value = 0
        if self.total_it % self.policy_delay == 0:
            # 训练Actor
            self.actor.reset_hidden(BATCH_SIZE)
            pred_action = self.actor(state, reset_hidden=True)

            self.critic.reset_q1_hidden(BATCH_SIZE)
            actor_loss = -self.critic.Q1(state, pred_action, reset_hidden=True).mean(dim=1).mean()
            actor_loss_value = actor_loss.item()

            self.actor_optimizer.zero_grad()
            actor_loss.backward()
            torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 1.0)
            self.actor_optimizer.step()

            # 软更新目标网络
            for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)

            for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)

        return {
            'critic_loss': critic_loss.item(),
            'actor_loss': actor_loss_value,
            'q_values': current_Q1.mean().item()
        }

    def save(self, filename):
        torch.save({
            'actor': self.actor.state_dict(),
            'critic': self.critic.state_dict(),
            'actor_target': self.actor_target.state_dict(),
            'critic_target': self.critic_target.state_dict(),
            'actor_optimizer': self.actor_optimizer.state_dict(),
            'critic_optimizer': self.critic_optimizer.state_dict(),
        }, filename)

    def load(self, filename):
        checkpoint = torch.load(filename, map_location=device)
        self.actor.load_state_dict(checkpoint['actor'])
        self.critic.load_state_dict(checkpoint['critic'])
        self.actor_target.load_state_dict(checkpoint['actor_target'])
        self.critic_target.load_state_dict(checkpoint['critic_target'])
        self.actor_optimizer.load_state_dict(checkpoint['actor_optimizer'])
        self.critic_optimizer.load_state_dict(checkpoint['critic_optimizer'])


# 持续学习SD3智能体（使用A-GEM）
class ContinualSD3Agent(BaseSD3Agent):
    def __init__(self, state_dim, action_dim, max_action,
                 continual_learning_rate=0.8, adaptation_threshold=0.3, memory_retention=0.7):
        super().__init__(state_dim, action_dim, max_action, agent_name="ContinualSD3")

        # 持续学习控制参数
        self.continual_learning_rate = continual_learning_rate
        self.adaptation_threshold = adaptation_threshold
        self.memory_retention = memory_retention

        # A-GEM组件
        self.agem = FlexibleAGEM(AGEM_MEMORY_SIZE, AGEM_BATCH_SIZE)

    def update_continual_params(self, phase_performance_drop):
        """根据性能变化动态调整持续学习参数"""
        if phase_performance_drop > self.adaptation_threshold:
            self.continual_learning_rate = min(1.0, self.continual_learning_rate * 1.2)
            self.memory_retention = max(0.3, self.memory_retention * 0.9)
        else:
            self.continual_learning_rate = max(0.5, self.continual_learning_rate * 0.95)
            self.memory_retention = min(0.8, self.memory_retention * 1.05)

    def store_transition(self, state, action, reward, next_state, done):
        super().store_transition(state, action, reward, next_state, done)

        # 根据记忆保留率决定是否存储到A-GEM记忆库
        if np.random.random() < self.memory_retention:
            self.agem.store_experience(state, action, reward, next_state, done)

    def train(self):
        if len(self.replay_buffer) < BATCH_SIZE:
            return {}

        self.total_it += 1

        # 获取参考梯度
        actor_ref_grad, critic_ref_grad = self.agem.compute_reference_gradients(
            self.actor, self.critic, self.actor_optimizer, self.critic_optimizer
        )

        state, action, reward, next_state, done = self.replay_buffer.sample(BATCH_SIZE)

        state = torch.FloatTensor(state).to(device)
        action = torch.FloatTensor(action).to(device)
        reward = torch.FloatTensor(reward).to(device)
        next_state = torch.FloatTensor(next_state).to(device)
        done = torch.FloatTensor(done).to(device)

        # 训练Critic
        with torch.no_grad():
            self.actor_target.reset_hidden(BATCH_SIZE)
            next_action = self.actor_target(next_state, reset_hidden=True)

            self.critic_target.reset_q1_hidden(BATCH_SIZE)
            self.critic_target.reset_q2_hidden(BATCH_SIZE)
            target_Q1, target_Q2 = self.critic_target(next_state, next_action, reset_hidden=True)
            target_Q = torch.min(target_Q1, target_Q2)
            target_quantiles = reward.unsqueeze(1) + (1 - done.unsqueeze(1)) * GAMMA * target_Q

        self.critic.reset_q1_hidden(BATCH_SIZE)
        self.critic.reset_q2_hidden(BATCH_SIZE)
        current_Q1, current_Q2 = self.critic(state, action, reset_hidden=True)

        critic_loss = (quantile_huber_loss(current_Q1, target_quantiles, QUANTILE_TAU) +
                       quantile_huber_loss(current_Q2, target_quantiles, QUANTILE_TAU))

        self.critic_optimizer.zero_grad()
        critic_loss.backward()

        # 获取当前Critic梯度并应用A-GEM
        critic_current_grads = []
        for param in self.critic.parameters():
            if param.grad is not None:
                critic_current_grads.append(param.grad.clone().flatten())
        critic_current_grad = torch.cat(critic_current_grads) if critic_current_grads else None

        self.agem.apply_flexible_gradient_fusion(
            self.critic, critic_current_grad, critic_ref_grad, 'critic'
        )

        torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 1.0)
        self.critic_optimizer.step()

        actor_loss_value = 0
        if self.total_it % self.policy_delay == 0:
            # 训练Actor
            self.actor.reset_hidden(BATCH_SIZE)
            pred_action = self.actor(state, reset_hidden=True)

            self.critic.reset_q1_hidden(BATCH_SIZE)
            actor_loss = -self.critic.Q1(state, pred_action, reset_hidden=True).mean(dim=1).mean()
            actor_loss_value = actor_loss.item()

            self.actor_optimizer.zero_grad()
            actor_loss.backward()

            # 获取当前Actor梯度并应用A-GEM
            actor_current_grads = []
            for param in self.actor.parameters():
                if param.grad is not None:
                    actor_current_grads.append(param.grad.clone().flatten())
            actor_current_grad = torch.cat(actor_current_grads) if actor_current_grads else None

            self.agem.apply_flexible_gradient_fusion(
                self.actor, actor_current_grad, actor_ref_grad, 'actor'
            )

            torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 1.0)
            self.actor_optimizer.step()

            # 软更新目标网络
            for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)

            for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)

        return {
            'critic_loss': critic_loss.item(),
            'actor_loss': actor_loss_value,
            'q_values': current_Q1.mean().item()
        }

    def save(self, filename):
        torch.save({
            'actor': self.actor.state_dict(),
            'critic': self.critic.state_dict(),
            'actor_target': self.actor_target.state_dict(),
            'critic_target': self.critic_target.state_dict(),
            'actor_optimizer': self.actor_optimizer.state_dict(),
            'critic_optimizer': self.critic_optimizer.state_dict(),
            'continual_params': {
                'learning_rate': self.continual_learning_rate,
                'adaptation_threshold': self.adaptation_threshold,
                'memory_retention': self.memory_retention
            }
        }, filename)

    def load(self, filename):
        checkpoint = torch.load(filename, map_location=device)
        self.actor.load_state_dict(checkpoint['actor'])
        self.critic.load_state_dict(checkpoint['critic'])
        self.actor_target.load_state_dict(checkpoint['actor_target'])
        self.critic_target.load_state_dict(checkpoint['critic_target'])
        self.actor_optimizer.load_state_dict(checkpoint['actor_optimizer'])
        self.critic_optimizer.load_state_dict(checkpoint['critic_optimizer'])

        if 'continual_params' in checkpoint:
            params = checkpoint['continual_params']
            self.continual_learning_rate = params.get('learning_rate', 0.8)
            self.adaptation_threshold = params.get('adaptation_threshold', 0.3)
            self.memory_retention = params.get('memory_retention', 0.7)


def train_comparison_agents():
    """训练对比算法：基础SD3 vs 持续学习SD3"""
    os.makedirs("results", exist_ok=True)
    os.makedirs("models", exist_ok=True)

    # 创建两个独立的环境
    env_base = Environment()
    env_continual = Environment()

    state_dim = env_base._get_state().shape[0]
    action_dim = 4
    max_action = 1.0

    # 创建两个独立的智能体
    base_agent = BaseSD3Agent(state_dim, action_dim, max_action)
    continual_agent = ContinualSD3Agent(state_dim, action_dim, max_action)

    # 训练参数
    phases = [1, 2, 3, 4]
    episodes_per_phase = 800
    total_episodes = 0

    # 记录训练过程
    base_results = {}
    continual_results = {}
    comparison_data = {
        'base_rewards': [],
        'continual_rewards': [],
        'base_performance': [],
        'continual_performance': []
    }

    print("开始对比训练：基础SD3 vs 持续学习SD3")

    for phase_idx, phase in enumerate(phases):
        print(f"\n=== 开始阶段 {phase} ===")

        # 为两个环境设置相同的任务生成用户（确保公平对比）
        env_base.update_task_generating_users(phase)
        env_continual.update_task_generating_users(phase)

        # 确保两个环境有相同的用户位置和任务配置
        env_continual.user_positions = env_base.user_positions.copy()
        env_continual.task_cpu_cycles = env_base.task_cpu_cycles.copy()
        env_continual.task_sizes = env_base.task_sizes.copy()

        base_phase_rewards = []
        continual_phase_rewards = []
        base_phase_performance = []
        continual_phase_performance = []

        # 处理持续学习智能体的阶段转换
        if phase_idx > 0:
            prev_phase = phases[phase_idx - 1]
            prev_avg_reward = np.mean(continual_results[prev_phase]['rewards'][-100:])

            # 测试当前性能
            test_rewards = []
            for _ in range(10):
                state = env_continual.reset()
                episode_reward = 0
                done = False
                step = 0

                while not done and step < MAX_STEPS:
                    action = continual_agent.select_action(state, noise=0.1)
                    next_state, reward, done, _ = env_continual.step(action)
                    episode_reward += reward
                    state = next_state
                    step += 1

                test_rewards.append(episode_reward)

            current_avg_reward = np.mean(test_rewards)
            performance_drop = (prev_avg_reward - current_avg_reward) / abs(
                prev_avg_reward) if prev_avg_reward != 0 else 0

            print(
                f"持续学习智能体性能变化: {prev_avg_reward:.3f} -> {current_avg_reward:.3f} (下降: {performance_drop:.3f})")
            continual_agent.update_continual_params(performance_drop)

        # 动态探索噪声
        start_noise = EXPLORATION_NOISE_START
        end_noise = EXPLORATION_NOISE_END

        for episode in range(episodes_per_phase):
            total_episodes += 1
            progress = episode / episodes_per_phase
            current_noise = start_noise * (1 - progress) + end_noise * progress

            # 训练基础智能体
            base_state = env_base.reset()
            base_episode_reward = 0
            base_done = False
            base_step = 0

            while not base_done and base_step < MAX_STEPS:
                base_action = base_agent.select_action(base_state, noise=current_noise)
                base_next_state, base_reward, base_done, base_info = env_base.step(base_action)

                base_agent.store_transition(base_state, base_action, base_reward, base_next_state, base_done)
                base_episode_reward += base_reward

                if len(base_agent.replay_buffer) > BATCH_SIZE:
                    base_agent.train()

                base_state = base_next_state
                base_step += 1

            # 训练持续学习智能体
            continual_state = env_continual.reset()
            continual_episode_reward = 0
            continual_done = False
            continual_step = 0

            while not continual_done and continual_step < MAX_STEPS:
                continual_action = continual_agent.select_action(continual_state, noise=current_noise)
                continual_next_state, continual_reward, continual_done, continual_info = env_continual.step(
                    continual_action)

                continual_agent.store_transition(continual_state, continual_action, continual_reward,
                                                 continual_next_state, continual_done)
                continual_episode_reward += continual_reward

                if len(continual_agent.replay_buffer) > BATCH_SIZE:
                    continual_agent.train()

                continual_state = continual_next_state
                continual_step += 1

            # 记录结果
            base_phase_rewards.append(base_episode_reward)
            continual_phase_rewards.append(continual_episode_reward)

            base_performance = {
                'collected_ratio': base_info.get('collected_required', 0) / max(base_info.get('total_required', 1), 1),
                'energy': base_info.get('energy', 0),
                'delay': base_info.get('delay', 0)
            }
            continual_performance = {
                'collected_ratio': continual_info.get('collected_required', 0) / max(
                    continual_info.get('total_required', 1), 1),
                'energy': continual_info.get('energy', 0),
                'delay': continual_info.get('delay', 0)
            }

            base_phase_performance.append(base_performance)
            continual_phase_performance.append(continual_performance)

            base_agent.recent_rewards.append(base_episode_reward)
            continual_agent.recent_rewards.append(continual_episode_reward)

            if episode % 100 == 0:
                base_recent = np.mean(list(base_agent.recent_rewards))
                continual_recent = np.mean(list(continual_agent.recent_rewards))
                print(f"阶段 {phase}, Episode {episode}:")
                print(
                    f"  基础SD3: 奖励={base_episode_reward:.3f}, 最近100平均={base_recent:.3f}, 收集率={base_performance['collected_ratio']:.3f}")
                print(
                    f"  持续学习SD3: 奖励={continual_episode_reward:.3f}, 最近100平均={continual_recent:.3f}, 收集率={continual_performance['collected_ratio']:.3f}")

        # 记录阶段结果
        base_results[phase] = {
            'rewards': base_phase_rewards,
            'performance_metrics': base_phase_performance
        }
        continual_results[phase] = {
            'rewards': continual_phase_rewards,
            'performance_metrics': continual_phase_performance
        }

        comparison_data['base_rewards'].extend(base_phase_rewards)
        comparison_data['continual_rewards'].extend(continual_phase_rewards)
        comparison_data['base_performance'].extend(base_phase_performance)
        comparison_data['continual_performance'].extend(continual_phase_performance)

        # 阶段总结
        base_avg_reward = np.mean(base_phase_rewards[-100:])
        continual_avg_reward = np.mean(continual_phase_rewards[-100:])
        base_avg_collection = np.mean([m['collected_ratio'] for m in base_phase_performance[-100:]])
        continual_avg_collection = np.mean([m['collected_ratio'] for m in continual_phase_performance[-100:]])

        print(f"\n阶段 {phase} 完成:")
        print(f"  基础SD3 - 平均奖励: {base_avg_reward:.3f}, 平均收集率: {base_avg_collection:.3f}")
        print(f"  持续学习SD3 - 平均奖励: {continual_avg_reward:.3f}, 平均收集率: {continual_avg_collection:.3f}")
        print(
            f"  持续学习优势: 奖励差异 {continual_avg_reward - base_avg_reward:.3f}, 收集率差异 {continual_avg_collection - base_avg_collection:.3f}")

        # 保存阶段模型
        base_agent.save(f"models/base_sd3_phase_{phase}_episode_{total_episodes}.pth")
        continual_agent.save(f"models/continual_sd3_phase_{phase}_episode_{total_episodes}.pth")

    # 创建对比分析图表
    create_comparison_plots(base_results, continual_results, comparison_data, phases, episodes_per_phase)

    # 保存最终模型和数据
    base_agent.save("models/base_sd3_final.pth")
    continual_agent.save("models/continual_sd3_final.pth")

    final_comparison_data = {
        'base_results': base_results,
        'continual_results': continual_results,
        'comparison_data': comparison_data,
        'phases': phases,
        'episodes_per_phase': episodes_per_phase
    }

    import pickle
    with open('results/comparison_data.pkl', 'wb') as f:
        pickle.dump(final_comparison_data, f)

    print("\n=== 对比训练完成 ===")
    print("模型已保存到 models/ 目录")
    print("对比数据已保存到 results/comparison_data.pkl")
    print("对比分析图表已保存到 results/comparison_analysis.png")

    return base_agent, continual_agent, final_comparison_data


def create_comparison_plots(base_results, continual_results, comparison_data, phases, episodes_per_phase):
    """创建对比分析图表"""
    plt.figure(figsize=(20, 16))

    # 1. 训练奖励曲线对比
    plt.subplot(3, 4, 1)
    window_size = 50

    base_rewards = comparison_data['base_rewards']
    continual_rewards = comparison_data['continual_rewards']

    base_smoothed = []
    continual_smoothed = []
    for i in range(len(base_rewards)):
        start_idx = max(0, i - window_size + 1)
        base_smoothed.append(np.mean(base_rewards[start_idx:i + 1]))
        continual_smoothed.append(np.mean(continual_rewards[start_idx:i + 1]))

    plt.plot(base_smoothed, label='基础SD3', color='red', alpha=0.8)
    plt.plot(continual_smoothed, label='持续学习SD3', color='blue', alpha=0.8)

    # 标记阶段转换点
    for i, phase in enumerate(phases[1:], 1):
        plt.axvline(x=i * episodes_per_phase, color='gray', linestyle='--', alpha=0.7)
        plt.text(i * episodes_per_phase + 20, max(max(base_smoothed), max(continual_smoothed)) * 0.9,
                 f'阶段{phase}', rotation=90, alpha=0.7, fontsize=8)

    plt.xlabel('Episode')
    plt.ylabel('平滑奖励')
    plt.title('训练奖励曲线对比')
    plt.legend()
    plt.grid(True, alpha=0.3)

    # 2. 各阶段平均奖励对比
    plt.subplot(3, 4, 2)
    phase_labels = [f'阶段{phase}' for phase in phases]
    base_phase_rewards = []
    continual_phase_rewards = []

    for phase in phases:
        base_phase_rewards.append(np.mean(base_results[phase]['rewards'][-100:]))
        continual_phase_rewards.append(np.mean(continual_results[phase]['rewards'][-100:]))

    x = np.arange(len(phases))
    width = 0.35

    plt.bar(x - width / 2, base_phase_rewards, width, label='基础SD3', color='red', alpha=0.7)
    plt.bar(x + width / 2, continual_phase_rewards, width, label='持续学习SD3', color='blue', alpha=0.7)

    plt.xlabel('阶段')
    plt.ylabel('平均奖励')
    plt.title('各阶段平均奖励对比')
    plt.xticks(x, phase_labels)
    plt.legend()
    plt.grid(True, alpha=0.3)

    # 3. 收集率对比
    plt.subplot(3, 4, 3)
    base_collection_rates = []
    continual_collection_rates = []

    for phase in phases:
        base_metrics = base_results[phase]['performance_metrics']
        continual_metrics = continual_results[phase]['performance_metrics']
        base_collection_rates.append(np.mean([m['collected_ratio'] for m in base_metrics[-100:]]))
        continual_collection_rates.append(np.mean([m['collected_ratio'] for m in continual_metrics[-100:]]))

    plt.bar(x - width / 2, base_collection_rates, width, label='基础SD3', color='red', alpha=0.7)
    plt.bar(x + width / 2, continual_collection_rates, width, label='持续学习SD3', color='blue', alpha=0.7)

    plt.xlabel('阶段')
    plt.ylabel('平均收集率')
    plt.title('各阶段收集率对比')
    plt.xticks(x, phase_labels)
    plt.legend()
    plt.grid(True, alpha=0.3)

    # 4. 能耗对比
    plt.subplot(3, 4, 4)
    base_energies = []
    continual_energies = []

    for phase in phases:
        base_metrics = base_results[phase]['performance_metrics']
        continual_metrics = continual_results[phase]['performance_metrics']
        base_energies.append(np.mean([m['energy'] for m in base_metrics[-100:]]))
        continual_energies.append(np.mean([m['energy'] for m in continual_metrics[-100:]]))

    plt.bar(x - width / 2, base_energies, width, label='基础SD3', color='red', alpha=0.7)
    plt.bar(x + width / 2, continual_energies, width, label='持续学习SD3', color='blue', alpha=0.7)

    plt.xlabel('阶段')
    plt.ylabel('平均总能耗')
    plt.title('各阶段能耗对比')
    plt.xticks(x, phase_labels)
    plt.legend()
    plt.grid(True, alpha=0.3)

    # 5. 性能改进百分比
    plt.subplot(3, 4, 5)
    reward_improvements = []
    collection_improvements = []

    for i, phase in enumerate(phases):
        reward_imp = (continual_phase_rewards[i] - base_phase_rewards[i]) / abs(base_phase_rewards[i]) * 100
        collection_imp = (continual_collection_rates[i] - base_collection_rates[i]) / abs(
            base_collection_rates[i]) * 100
        reward_improvements.append(reward_imp)
        collection_improvements.append(collection_imp)

    plt.bar(x - width / 2, reward_improvements, width, label='奖励改进%', color='green', alpha=0.7)
    plt.bar(x + width / 2, collection_improvements, width, label='收集率改进%', color='orange', alpha=0.7)

    plt.xlabel('阶段')
    plt.ylabel('改进百分比 (%)')
    plt.title('持续学习相对于基础算法的改进')
    plt.xticks(x, phase_labels)
    plt.legend()
    plt.grid(True, alpha=0.3)
    plt.axhline(y=0, color='black', linestyle='-', alpha=0.3)

    # 6. 阶段转换时的遗忘程度
    plt.subplot(3, 4, 6)
    base_forgetting = []
    continual_forgetting = []

    for i in range(1, len(phases)):
        # 基础算法遗忘程度
        prev_final_base = np.mean(base_results[phases[i - 1]]['rewards'][-50:])
        curr_initial_base = np.mean(base_results[phases[i]]['rewards'][:50])
        base_forget = (prev_final_base - curr_initial_base) / abs(prev_final_base) * 100
        base_forgetting.append(base_forget)

        # 持续学习算法遗忘程度
        prev_final_continual = np.mean(continual_results[phases[i - 1]]['rewards'][-50:])
        curr_initial_continual = np.mean(continual_results[phases[i]]['rewards'][:50])
        continual_forget = (prev_final_continual - curr_initial_continual) / abs(prev_final_continual) * 100
        continual_forgetting.append(continual_forget)

    transition_labels = [f'{phases[i]}→{phases[i + 1]}' for i in range(len(phases) - 1)]
    x_trans = np.arange(len(transition_labels))

    plt.bar(x_trans - width / 2, base_forgetting, width, label='基础SD3', color='red', alpha=0.7)
    plt.bar(x_trans + width / 2, continual_forgetting, width, label='持续学习SD3', color='blue', alpha=0.7)

    plt.xlabel('阶段转换')
    plt.ylabel('遗忘程度 (%)')
    plt.title('阶段转换时的遗忘程度对比')
    plt.xticks(x_trans, transition_labels)
    plt.legend()
    plt.grid(True, alpha=0.3)

    # 7. 学习曲线斜率对比
    plt.subplot(3, 4, 7)

    def calculate_learning_slopes(rewards, window_size=100):
        slopes = []
        for i in range(window_size, len(rewards), window_size):
            y = rewards[i - window_size:i]
            x = np.arange(len(y))
            if len(y) > 1:
                slope = np.polyfit(x, y, 1)[0]
                slopes.append(slope)
        return slopes

    base_slopes = []
    continual_slopes = []

    for phase in phases:
        base_phase_slopes = calculate_learning_slopes(base_results[phase]['rewards'])
        continual_phase_slopes = calculate_learning_slopes(continual_results[phase]['rewards'])
        base_slopes.extend(base_phase_slopes)
        continual_slopes.extend(continual_phase_slopes)

    plt.plot(base_slopes, label='基础SD3', color='red', marker='o', alpha=0.7)
    plt.plot(continual_slopes, label='持续学习SD3', color='blue', marker='s', alpha=0.7)

    plt.xlabel('学习阶段')
    plt.ylabel('学习斜率')
    plt.title('学习速度对比')
    plt.legend()
    plt.grid(True, alpha=0.3)

    # 8. 累积性能差异
    plt.subplot(3, 4, 8)
    cumulative_reward_diff = []
    cumulative_collection_diff = []

    for i in range(len(base_rewards)):
        reward_diff = continual_rewards[i] - base_rewards[i]
        if i == 0:
            cumulative_reward_diff.append(reward_diff)
        else:
            cumulative_reward_diff.append(cumulative_reward_diff[-1] + reward_diff)

    plt.plot(cumulative_reward_diff, color='purple', alpha=0.8)
    plt.xlabel('Episode')
    plt.ylabel('累积奖励差异')
    plt.title('持续学习累积优势')
    plt.grid(True, alpha=0.3)

    # 9. 各阶段内的学习曲线
    plt.subplot(3, 4, 9)
    colors = ['red', 'blue', 'green', 'orange']

    for i, phase in enumerate(phases):
        base_phase_rewards = base_results[phase]['rewards']
        continual_phase_rewards = continual_results[phase]['rewards']

        # 计算移动平均
        window = 50
        base_ma = [np.mean(base_phase_rewards[max(0, j - window):j + 1]) for j in range(len(base_phase_rewards))]
        continual_ma = [np.mean(continual_phase_rewards[max(0, j - window):j + 1]) for j in
                        range(len(continual_phase_rewards))]

        episodes_range = np.arange(len(base_ma))
        plt.plot(episodes_range, base_ma, '--', color=colors[i], alpha=0.6,
                 label=f'基础SD3-阶段{phase}')
        plt.plot(episodes_range, continual_ma, '-', color=colors[i], alpha=0.8,
                 label=f'持续学习SD3-阶段{phase}')

    plt.xlabel('Episode (阶段内)')
    plt.ylabel('移动平均奖励')
    plt.title('各阶段内学习曲线详情')
    plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
    plt.grid(True, alpha=0.3)

    # 10. 稳定性分析
    plt.subplot(3, 4, 10)
    base_stability = []
    continual_stability = []

    for phase in phases:
        base_rewards_phase = base_results[phase]['rewards'][-200:]  # 最后200个episode
        continual_rewards_phase = continual_results[phase]['rewards'][-200:]

        base_std = np.std(base_rewards_phase)
        continual_std = np.std(continual_rewards_phase)

        base_stability.append(base_std)
        continual_stability.append(continual_std)

    plt.bar(x - width / 2, base_stability, width, label='基础SD3', color='red', alpha=0.7)
    plt.bar(x + width / 2, continual_stability, width, label='持续学习SD3', color='blue', alpha=0.7)

    plt.xlabel('阶段')
    plt.ylabel('性能标准差')
    plt.title('性能稳定性对比')
    plt.xticks(x, phase_labels)
    plt.legend()
    plt.grid(True, alpha=0.3)

    # 11. 任务适应速度
    plt.subplot(3, 4, 11)
    adaptation_episodes = []  # 达到稳定性能所需的episode数

    for i, phase in enumerate(phases):
        if i == 0:  # 第一阶段跳过
            continue

        continual_rewards_phase = continual_results[phase]['rewards']

        # 找到达到80%最终性能的episode数
        final_performance = np.mean(continual_rewards_phase[-50:])
        target_performance = final_performance * 0.8

        adaptation_episode = len(continual_rewards_phase)  # 默认值
        for j, reward in enumerate(continual_rewards_phase):
            if reward >= target_performance:
                adaptation_episode = j
                break

        adaptation_episodes.append(adaptation_episode)

    plt.bar(range(len(adaptation_episodes)), adaptation_episodes, alpha=0.7, color='green')
    plt.xlabel('阶段转换')
    plt.ylabel('适应所需Episodes')
    plt.title('新阶段适应速度')
    plt.xticks(range(len(adaptation_episodes)),
               [f'阶段{phases[i + 1]}' for i in range(len(adaptation_episodes))])
    plt.grid(True, alpha=0.3)

    # 12. 综合评估雷达图
    plt.subplot(3, 4, 12, projection='polar')

    # 计算各项指标的标准化得分
    metrics = ['奖励', '收集率', '能效', '稳定性', '适应性']

    # 奖励得分
    base_reward_score = np.mean([np.mean(base_results[phase]['rewards'][-100:]) for phase in phases])
    continual_reward_score = np.mean([np.mean(continual_results[phase]['rewards'][-100:]) for phase in phases])

    # 收集率得分
    base_collection_score = np.mean(base_collection_rates)
    continual_collection_score = np.mean(continual_collection_rates)

    # 能效得分（能耗越低得分越高）
    base_energy_score = 1.0 / (np.mean(base_energies) + 1e-6)
    continual_energy_score = 1.0 / (np.mean(continual_energies) + 1e-6)

    # 稳定性得分（标准差越小得分越高）
    base_stability_score = 1.0 / (np.mean(base_stability) + 1e-6)
    continual_stability_score = 1.0 / (np.mean(continual_stability) + 1e-6)

    # 适应性得分（遗忘程度越小得分越高）
    base_adaptation_score = 1.0 / (np.mean(np.abs(base_forgetting)) + 1e-6)
    continual_adaptation_score = 1.0 / (np.mean(np.abs(continual_forgetting)) + 1e-6)

    base_scores = [base_reward_score, base_collection_score, base_energy_score,
                   base_stability_score, base_adaptation_score]
    continual_scores = [continual_reward_score, continual_collection_score, continual_energy_score,
                        continual_stability_score, continual_adaptation_score]

    # 标准化到0-1范围
    all_scores = base_scores + continual_scores
    min_score, max_score = min(all_scores), max(all_scores)

    base_scores_norm = [(s - min_score) / (max_score - min_score) for s in base_scores]
    continual_scores_norm = [(s - min_score) / (max_score - min_score) for s in continual_scores]

    angles = np.linspace(0, 2 * np.pi, len(metrics), endpoint=False).tolist()
    base_scores_norm += base_scores_norm[:1]  # 闭合图形
    continual_scores_norm += continual_scores_norm[:1]
    angles += angles[:1]

    plt.plot(angles, base_scores_norm, 'o-', linewidth=2, label='基础SD3', color='red', alpha=0.7)
    plt.fill(angles, base_scores_norm, alpha=0.25, color='red')
    plt.plot(angles, continual_scores_norm, 'o-', linewidth=2, label='持续学习SD3', color='blue', alpha=0.7)
    plt.fill(angles, continual_scores_norm, alpha=0.25, color='blue')

    plt.xticks(angles[:-1], metrics)
    plt.ylim(0, 1)
    plt.title('综合性能评估', pad=20)
    plt.legend(loc='upper right', bbox_to_anchor=(1.3, 1.0))

    plt.tight_layout()
    plt.savefig('results/comparison_analysis.png', dpi=300, bbox_inches='tight')
    plt.close()

    # 生成详细的数值对比报告
    generate_comparison_report(base_results, continual_results, phases)


def generate_comparison_report(base_results, continual_results, phases):
    """生成详细的对比报告"""
    with open('results/comparison_report.txt', 'w', encoding='utf-8') as f:
        f.write("=" * 60 + "\n")
        f.write("持续学习算法 vs 基础算法 对比报告\n")
        f.write("=" * 60 + "\n\n")

        for phase in phases:
            f.write(f"阶段 {phase} 详细对比:\n")
            f.write("-" * 40 + "\n")

            # 奖励对比
            base_rewards = base_results[phase]['rewards']
            continual_rewards = continual_results[phase]['rewards']

            base_final_avg = np.mean(base_rewards[-100:])
            continual_final_avg = np.mean(continual_rewards[-100:])
            reward_improvement = (continual_final_avg - base_final_avg) / abs(base_final_avg) * 100

            f.write(f"奖励对比:\n")
            f.write(f"  基础算法最终平均奖励: {base_final_avg:.3f}\n")
            f.write(f"  持续学习算法最终平均奖励: {continual_final_avg:.3f}\n")
            f.write(f"  改进幅度: {reward_improvement:+.2f}%\n\n")

            # 收集率对比
            base_metrics = base_results[phase]['performance_metrics']
            continual_metrics = continual_results[phase]['performance_metrics']

            base_collection = np.mean([m['collected_ratio'] for m in base_metrics[-100:]])
            continual_collection = np.mean([m['collected_ratio'] for m in continual_metrics[-100:]])
            collection_improvement = (continual_collection - base_collection) / abs(base_collection) * 100

            f.write(f"收集率对比:\n")
            f.write(f"  基础算法平均收集率: {base_collection:.3f}\n")
            f.write(f"  持续学习算法平均收集率: {continual_collection:.3f}\n")
            f.write(f"  改进幅度: {collection_improvement:+.2f}%\n\n")

            # 能耗对比
            base_energy = np.mean([m['energy'] for m in base_metrics[-100:]])
            continual_energy = np.mean([m['energy'] for m in continual_metrics[-100:]])
            energy_change = (continual_energy - base_energy) / abs(base_energy) * 100

            f.write(f"能耗对比:\n")
            f.write(f"  基础算法平均能耗: {base_energy:.1f}\n")
            f.write(f"  持续学习算法平均能耗: {continual_energy:.1f}\n")
            f.write(f"  变化幅度: {energy_change:+.2f}%\n\n")

            f.write("\n")

        # 总体评估
        f.write("总体评估:\n")
        f.write("=" * 40 + "\n")

        overall_base_reward = np.mean([np.mean(base_results[phase]['rewards'][-100:]) for phase in phases])
        overall_continual_reward = np.mean([np.mean(continual_results[phase]['rewards'][-100:]) for phase in phases])
        overall_improvement = (overall_continual_reward - overall_base_reward) / abs(overall_base_reward) * 100

        f.write(f"整体奖励改进: {overall_improvement:+.2f}%\n")

        # 遗忘程度分析
        f.write("\n遗忘程度分析:\n")
        for i in range(1, len(phases)):
            prev_phase = phases[i - 1]
            curr_phase = phases[i]

            base_prev_final = np.mean(base_results[prev_phase]['rewards'][-50:])
            base_curr_initial = np.mean(base_results[curr_phase]['rewards'][:50])
            base_forgetting = (base_prev_final - base_curr_initial) / abs(base_prev_final) * 100

            continual_prev_final = np.mean(continual_results[prev_phase]['rewards'][-50:])
            continual_curr_initial = np.mean(continual_results[curr_phase]['rewards'][:50])
            continual_forgetting = (continual_prev_final - continual_curr_initial) / abs(continual_prev_final) * 100

            f.write(f"阶段 {prev_phase}→{curr_phase}:\n")
            f.write(f"  基础算法遗忘程度: {base_forgetting:.2f}%\n")
            f.write(f"  持续学习算法遗忘程度: {continual_forgetting:.2f}%\n")
            f.write(f"  遗忘减少程度: {base_forgetting - continual_forgetting:.2f}%\n\n")


def evaluate_trained_agents():
    """评估训练好的智能体"""
    print("开始评估训练好的智能体...")

    # 加载智能体
    env = Environment()
    state_dim = env._get_state().shape[0]
    action_dim = 4
    max_action = 1.0

    base_agent = BaseSD3Agent(state_dim, action_dim, max_action)
    continual_agent = ContinualSD3Agent(state_dim, action_dim, max_action)

    # 尝试加载最终训练的模型
    try:
        base_agent.load("models/base_sd3_final.pth")
        continual_agent.load("models/continual_sd3_final.pth")
        print("成功加载训练好的模型")
    except FileNotFoundError:
        print("未找到训练好的模型，请先运行训练")
        return

    # 在所有阶段上进行测试
    phases = [1, 2, 3, 4]
    test_episodes = 50

    evaluation_results = {}

    for phase in phases:
        print(f"\n评估阶段 {phase}...")
        env.update_task_generating_users(phase)

        base_phase_results = []
        continual_phase_results = []

        for episode in range(test_episodes):
            # 测试基础智能体
            env.reset()
            base_reward = 0
            done = False
            step = 0

            while not done and step < MAX_STEPS:
                state = env._get_gru_state()
                action = base_agent.select_action(state, noise=0)  # 无噪声测试
                _, reward, done, info = env.step(action)
                base_reward += reward
                step += 1

            base_phase_results.append({
                'reward': base_reward,
                'collected_ratio': info.get('collected_required', 0) / max(info.get('total_required', 1), 1),
                'energy': info.get('energy', 0),
                'delay': info.get('delay', 0)
            })

            # 测试持续学习智能体
            env.reset()
            continual_reward = 0
            done = False
            step = 0

            while not done and step < MAX_STEPS:
                state = env._get_gru_state()
                action = continual_agent.select_action(state, noise=0)
                _, reward, done, info = env.step(action)
                continual_reward += reward
                step += 1

            continual_phase_results.append({
                'reward': continual_reward,
                'collected_ratio': info.get('collected_required', 0) / max(info.get('total_required', 1), 1),
                'energy': info.get('energy', 0),
                'delay': info.get('delay', 0)
            })

        evaluation_results[phase] = {
            'base': base_phase_results,
            'continual': continual_phase_results
        }

        # 打印阶段结果
        base_avg_reward = np.mean([r['reward'] for r in base_phase_results])
        continual_avg_reward = np.mean([r['reward'] for r in continual_phase_results])
        base_avg_collection = np.mean([r['collected_ratio'] for r in base_phase_results])
        continual_avg_collection = np.mean([r['collected_ratio'] for r in continual_phase_results])

        print(f"阶段 {phase} 测试结果:")
        print(f"  基础算法: 平均奖励={base_avg_reward:.3f}, 平均收集率={base_avg_collection:.3f}")
        print(f"  持续学习: 平均奖励={continual_avg_reward:.3f}, 平均收集率={continual_avg_collection:.3f}")
        print(
            f"  改进: 奖励+{continual_avg_reward - base_avg_reward:.3f}, 收集率+{continual_avg_collection - base_avg_collection:.3f}")

    return evaluation_results


if __name__ == "__main__":
    # 运行对比训练
    base_agent, continual_agent, comparison_data = train_comparison_agents()

    # 运行评估
    evaluation_results = evaluate_trained_agents()

    print("\n对比训练和评估完成！")
    print("结果文件:")
    print("- results/comparison_analysis.png: 对比分析图表")
    print("- results/comparison_report.txt: 详细对比报告")
    print("- results/comparison_data.pkl: 完整对比数据")
    print("- models/base_sd3_final.pth: 基础算法模型")
    print("- models/continual_sd3_final.pth: 持续学习算法模型")
