import copy

import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import random
from collections import deque
import os
import time

SEED = 45
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

AREA_SIZE = 200
NUM_USERS = 12
MAX_STEPS = 300
MAX_DISTANCE_COLLECT = 15

UAV_HEIGHT = 30.0
UAV_COMPUTE_CAPACITY = 1e10
MIN_UAV_SPEED = 2.0
MAX_UAV_SPEED = 20.0
DEFAULT_UAV_SPEED = 10.0

BASE_STATION_POSITION = np.array([0.0, 0.0])
BASE_STATION_COMPUTE_CAPACITY = 2e11
BASE_STATION_HEIGHT = 10.0

ACTOR_LR = 3e-4
CRITIC_LR = 3e-4
GAMMA = 0.99
TAU = 0.005
BUFFER_SIZE = 20000
BATCH_SIZE = 32
EXPLORATION_NOISE_START = 0.4
EXPLORATION_NOISE_END = 0.05
REWARD_SCALE = 0.1

NUM_QUANTILES = 51
QUANTILE_TAU = torch.FloatTensor([(2 * i - 1) / (2.0 * NUM_QUANTILES) for i in range(1, NUM_QUANTILES + 1)]).to(device)
KAPPA = 1.0

AGEM_MEMORY_SIZE = 1000
AGEM_BATCH_SIZE = 32
AGEM_GAMMA = 0.5

SEQUENCE_LENGTH = 10
HIDDEN_SIZE = 128

BANDWIDTH = 1e6
USER_TRANSMIT_POWER = 0.1
CHANNEL_GAIN_REF_DB = 30.0
CHANNEL_GAIN_REF_LINEAR = 10 ** (CHANNEL_GAIN_REF_DB / 10)
PATH_LOSS_EXPONENT = 2.5
BOLTZMANN_CONSTANT = 1.38e-23
TEMPERATURE_KELVIN = 290
NOISE_POWER = BOLTZMANN_CONSTANT * TEMPERATURE_KELVIN * BANDWIDTH
RICE_FACTOR = 5

TASK_SIZE_BITS = [1e6, 2e6]
TASK_CPU_CYCLES = [5e8, 15e8]

UAV_WEIGHT_KG = 2.0
GRAVITY = 9.81
AIR_DENSITY = 1.225
ROTOR_RADIUS = 0.4
NUM_ROTORS = 4
P_INDUCED_COEFF = UAV_WEIGHT_KG * GRAVITY * np.sqrt(
    UAV_WEIGHT_KG * GRAVITY / (2 * AIR_DENSITY * np.pi * ROTOR_RADIUS ** 2))
P_PROFILE_COEFF = 0.012
P_PARASITE_COEFF = 0.6
EFFECTIVE_SWITCHED_CAPACITANCE = 1e-27

DELAY_WEIGHT = 0.7
ENERGY_WEIGHT = 0.3
DELAY_SCALE = 100.0
ENERGY_SCALE = 0.001

class Environment:
    def __init__(self):
        self.user_positions = np.random.uniform(0, AREA_SIZE, size=(NUM_USERS, 2))
        self.task_cpu_cycles = np.random.uniform(TASK_CPU_CYCLES[0], TASK_CPU_CYCLES[1], size=NUM_USERS)
        self.task_sizes = np.random.uniform(TASK_SIZE_BITS[0], TASK_SIZE_BITS[1], size=NUM_USERS)
        self.task_generating_users = np.ones(NUM_USERS, dtype=bool)
        self.uav_position = np.array([0.0, 0.0], dtype=float)
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)
        self.step_count = 0

        self.user_completion_delays = np.zeros(NUM_USERS)
        self.user_offloading_delays = np.zeros(NUM_USERS)
        self.user_computation_delays = np.zeros(NUM_USERS)
        self.user_computation_energies = np.zeros(NUM_USERS)
        self.total_flight_energy = 0

        self.user_offloading_ratios = np.zeros(NUM_USERS)

        self.trajectory = [self.uav_position.copy()]
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])
        self.observation_history = deque(maxlen=SEQUENCE_LENGTH)
        self.current_phase = 1
        self.phase_1_users = None

        self.episode_reward_breakdown = {
            'collection_reward': 0.0,
            'proximity_reward': 0.0,
            'completion_reward': 0.0,
            'cost': 0.0,
            'step_penalty': 0.0
        }

    def _calculate_rice_channel_gain(self, distance_2d, height=UAV_HEIGHT):
        distance_3d = np.sqrt(distance_2d ** 2 + height ** 2)
        if distance_3d < 1.0: distance_3d = 1.0
        path_loss = CHANNEL_GAIN_REF_LINEAR * (distance_3d ** (-PATH_LOSS_EXPONENT))
        K = RICE_FACTOR
        h_los = 1.0
        h_nlos_real = np.random.normal(0, 1)
        h_nlos_imag = np.random.normal(0, 1)
        h_nlos = (h_nlos_real + 1j * h_nlos_imag) / np.sqrt(2)
        h = np.sqrt(K / (K + 1)) * h_los + np.sqrt(1 / (K + 1)) * h_nlos
        fading_gain = abs(h) ** 2
        return path_loss * fading_gain

    def _calculate_offloading_delay(self, user_index, distance_2d):
        channel_gain = self._calculate_rice_channel_gain(distance_2d)
        snr = (USER_TRANSMIT_POWER * channel_gain) / NOISE_POWER
        data_rate = BANDWIDTH * np.log2(1 + snr)
        return self.task_sizes[user_index] / data_rate

    def _calculate_uav_to_bs_delay(self, task_size):
        bs_distance_2d = np.linalg.norm(self.uav_position - BASE_STATION_POSITION)
        height_diff = abs(UAV_HEIGHT - BASE_STATION_HEIGHT)
        channel_gain = self._calculate_rice_channel_gain(bs_distance_2d, height_diff)
        snr = (USER_TRANSMIT_POWER * channel_gain) / NOISE_POWER
        data_rate = BANDWIDTH * np.log2(1 + snr)
        return task_size / data_rate

    def _calculate_completion_delay(self, user_index, offloading_ratio):
        total_task_size = self.task_sizes[user_index]
        total_cpu_cycles = self.task_cpu_cycles[user_index]

        user_to_uav_delay = self._calculate_offloading_delay(user_index,
                                                             np.linalg.norm(
                                                                 self.uav_position - self.user_positions[user_index]))

        if offloading_ratio == 0:
            local_computation_delay = total_cpu_cycles / UAV_COMPUTE_CAPACITY
            total_delay = user_to_uav_delay + local_computation_delay

        elif offloading_ratio == 1:
            uav_to_bs_delay = self._calculate_uav_to_bs_delay(total_task_size)
            bs_computation_delay = total_cpu_cycles / BASE_STATION_COMPUTE_CAPACITY
            total_delay = user_to_uav_delay + uav_to_bs_delay + bs_computation_delay

        else:
            local_task_size = total_task_size * (1 - offloading_ratio)
            bs_task_size = total_task_size * offloading_ratio

            local_cpu_cycles = total_cpu_cycles * (1 - offloading_ratio)
            bs_cpu_cycles = total_cpu_cycles * offloading_ratio

            local_computation_delay = local_cpu_cycles / UAV_COMPUTE_CAPACITY
            local_total_delay = user_to_uav_delay + local_computation_delay

            uav_to_bs_delay = self._calculate_uav_to_bs_delay(bs_task_size)
            bs_computation_delay = bs_cpu_cycles / BASE_STATION_COMPUTE_CAPACITY
            bs_total_delay = user_to_uav_delay + uav_to_bs_delay + bs_computation_delay

            total_delay = max(local_total_delay, bs_total_delay)

        return total_delay

    def _calculate_flight_energy(self, distance_moved, actual_speed=None, time_delta=1.0):
        if actual_speed is None:
            speed = distance_moved / time_delta
        else:
            speed = actual_speed

        power = P_INDUCED_COEFF * (
                np.sqrt(1 + (speed ** 4) / (4 * P_INDUCED_COEFF ** 2)) - (speed ** 2) / (2 * P_INDUCED_COEFF)) \
                + P_PROFILE_COEFF * (1 + 3 * (speed ** 2)) \
                + 0.5 * P_PARASITE_COEFF * AIR_DENSITY * speed ** 3
        return power * time_delta

    def _calculate_computation_energy(self, user_index, offloading_ratio):
        local_cpu_cycles = self.task_cpu_cycles[user_index] * (1 - offloading_ratio)
        working_frequency = UAV_COMPUTE_CAPACITY
        energy = EFFECTIVE_SWITCHED_CAPACITANCE * (working_frequency ** 2) * local_cpu_cycles
        return energy

    def update_task_generating_users(self, phase):
        self.current_phase = phase
        self.task_generating_users = np.zeros(NUM_USERS, dtype=bool)

        if phase == 1:
            indices = np.random.choice(NUM_USERS, 10, replace=False)
            self.task_generating_users[indices] = True
            self.phase_1_users = self.task_generating_users.copy()
        elif phase == 2:
            self.task_generating_users = np.ones(NUM_USERS, dtype=bool)
        elif phase == 3:
            indices = np.random.choice(NUM_USERS, 8, replace=False)
            self.task_generating_users[indices] = True
        elif phase == 4:
            if self.phase_1_users is None:
                print("错误: 未找到第一阶段的用户数据! 将重新为第四阶段生成10个用户。")
                indices = np.random.choice(NUM_USERS, 10, replace=False)
                self.task_generating_users[indices] = True
            else:
                self.task_generating_users = self.phase_1_users.copy()

        print(f"Phase {phase}: {sum(self.task_generating_users)} users are generating tasks")
        print(f"Task generating users: {np.where(self.task_generating_users)[0]}")

    def reset(self):
        self.uav_position = np.array([0.0, 0.0], dtype=float)
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)
        self.step_count = 0

        self.user_completion_delays.fill(0)
        self.user_offloading_delays.fill(0)
        self.user_computation_delays.fill(0)
        self.user_computation_energies.fill(0)
        self.user_offloading_ratios.fill(0)
        self.total_flight_energy = 0
        self.current_speed = DEFAULT_UAV_SPEED

        self.trajectory = [self.uav_position.copy()]
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])
        self.observation_history.clear()

        for key in self.episode_reward_breakdown:
            self.episode_reward_breakdown[key] = 0.0

        initial_state = self._get_state()
        for _ in range(SEQUENCE_LENGTH):
            self.observation_history.append(initial_state)
        return self._get_gru_state()

    def step(self, action):
        direction_raw = action[:2]

        direction_norm = np.linalg.norm(direction_raw)
        if direction_norm > 1e-6:
            movement_direction = direction_raw / direction_norm
        else:
            movement_direction = np.array([0.0, 0.0])

        speed = (action[2] + 1) / 2 * (MAX_UAV_SPEED - MIN_UAV_SPEED) + MIN_UAV_SPEED
        offloading_ratio = (action[3] + 1) / 2
        movement = movement_direction * speed

        prev_position = self.uav_position.copy()
        self.uav_position += movement
        self.uav_position = np.clip(self.uav_position, 0, AREA_SIZE)
        self.trajectory.append(self.uav_position.copy())

        actual_movement = self.uav_position - prev_position
        distance_moved = np.linalg.norm(actual_movement)

        if distance_moved > 0:
            actual_speed = speed if direction_norm > 1e-6 else 0
            flight_energy_step = self._calculate_flight_energy(distance_moved, actual_speed=actual_speed)
        else:
            flight_energy_step = self._calculate_flight_energy(0, actual_speed=0)

        self.total_flight_energy += flight_energy_step
        self.current_speed = speed if direction_norm > 1e-6 else 0

        new_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])

        newly_collected = 0
        for i in range(NUM_USERS):
            if self.task_generating_users[i] and not self.collected_tasks[i]:
                if new_distances[i] <= MAX_DISTANCE_COLLECT:
                    self.collected_tasks[i] = True
                    newly_collected += 1

                    self.user_offloading_ratios[i] = offloading_ratio
                    self.user_offloading_delays[i] = self._calculate_offloading_delay(i, new_distances[i])
                    self.user_completion_delays[i] = self._calculate_completion_delay(i, offloading_ratio)
                    self.user_computation_delays[i] = self.user_completion_delays[i] - self.user_offloading_delays[i]
                    self.user_computation_energies[i] = self._calculate_computation_energy(i, offloading_ratio)

        self.step_count += 1

        completed_indices = np.where(self.collected_tasks & self.task_generating_users)[0]

        if len(completed_indices) > 0:
            total_delay = np.sum(self.user_completion_delays[completed_indices])
            total_comp_energy = np.sum(self.user_computation_energies[completed_indices])
            avg_total_delay = np.mean(self.user_completion_delays[completed_indices])
            avg_offloading_delay = np.mean(self.user_offloading_delays[completed_indices])
            avg_computation_delay = np.mean(self.user_computation_delays[completed_indices])
        else:
            total_delay = 0.0
            total_comp_energy = 0.0
            avg_total_delay, avg_offloading_delay, avg_computation_delay = 0.0, 0.0, 0.0

        total_energy = self.total_flight_energy + total_comp_energy

        reward_info = self.calculate_reward(newly_collected, total_energy, total_delay, new_distances,
                                                      self.last_distances)
        reward = reward_info['total_reward']
        for key in self.episode_reward_breakdown:
            self.episode_reward_breakdown[key] += reward_info[key]

        self.last_distances = new_distances

        total_tasks_to_collect = sum(self.task_generating_users)
        collected_required_tasks = sum(self.collected_tasks & self.task_generating_users)
        done = (self.step_count >= MAX_STEPS) or (collected_required_tasks == total_tasks_to_collect)

        self.observation_history.append(self._get_state())

        return self._get_gru_state(), reward, done, {
            "collected_required": collected_required_tasks,
            "total_required": total_tasks_to_collect,
            "energy": total_energy,
            "delay": avg_total_delay,
            "reward_breakdown": reward_info,
            "flight_energy": self.total_flight_energy,
            "comp_energy": total_comp_energy,
            "delay_breakdown": {
                "avg_offloading_delay": avg_offloading_delay,
                "avg_computation_delay": avg_computation_delay,
                "avg_total_delay": avg_total_delay,
                "total_delay": total_delay,
            },
            "episode_reward_breakdown": self.episode_reward_breakdown
        }

    def _get_state(self):
        state = np.zeros(2 + NUM_USERS * 4 + 1 + 1)
        state[0:2] = self.uav_position / AREA_SIZE

        for i in range(NUM_USERS):
            dist = np.linalg.norm(self.uav_position - self.user_positions[i])
            idx = 2 + i * 4
            state[idx] = dist / np.sqrt(2 * AREA_SIZE ** 2)
            state[idx + 1] = float(self.collected_tasks[i])
            state[idx + 2] = float(self.task_generating_users[i])
            state[idx + 3] = self.task_cpu_cycles[i] / TASK_CPU_CYCLES[1]

        state[-2] = self.step_count / MAX_STEPS

        bs_distance = np.linalg.norm(self.uav_position - BASE_STATION_POSITION)
        state[-1] = bs_distance / np.sqrt(2 * AREA_SIZE ** 2)

        return state

    def _get_gru_state(self):
        while len(self.observation_history) < SEQUENCE_LENGTH:
            self.observation_history.append(self._get_state())
        return np.array(list(self.observation_history))

    def calculate_reward(self, newly_collected, total_energy, total_delay, new_distances, old_distances):
        collected_required = sum(self.collected_tasks & self.task_generating_users)
        total_required = sum(self.task_generating_users) if sum(self.task_generating_users) > 0 else 1
        completion_rate = collected_required / total_required

        proximity_reward = 0.0
        uncollected_indices = np.where(self.task_generating_users & ~self.collected_tasks)[0]
        if len(uncollected_indices) > 0:
            uncollected_distances_old = old_distances[uncollected_indices]
            uncollected_distances_new = new_distances[uncollected_indices]
            closest_idx = np.argmin(uncollected_distances_new)
            dist_diff = uncollected_distances_old[closest_idx] - uncollected_distances_new[closest_idx]

            if uncollected_distances_new[closest_idx] < 50:
                proximity_reward = dist_diff * 0.24
            else:
                proximity_reward = dist_diff * 0.12

        time_factor = max(0, 1 - self.step_count / MAX_STEPS)
        collection_reward = newly_collected * 30.0 * (1 + time_factor * 0.5)

        progress = self.step_count / MAX_STEPS
        step_penalty = 0.25 + 0.5 * progress

        completion_reward = 0
        done = (self.step_count >= MAX_STEPS) or (collected_required == total_required)
        if done:
            step_efficiency = max(0.1, 1 - self.step_count / MAX_STEPS)
            base_completion = completion_rate * 400
            bonus = 1000 if completion_rate == 1.0 else 0
            completion_reward = (base_completion + bonus) * step_efficiency

        cost = 0
        if done and collected_required > 0:
            delay_penalty = total_delay * DELAY_WEIGHT * 1200
            energy_penalty = total_energy * ENERGY_WEIGHT * 0.015
            cost = (delay_penalty + energy_penalty) / collected_required

        total_reward = (collection_reward +
                        proximity_reward +
                        completion_reward -
                        step_penalty -
                        cost)
        return {
            'total_reward': total_reward * REWARD_SCALE,
            'proximity_reward': proximity_reward * REWARD_SCALE,
            'collection_reward': collection_reward * REWARD_SCALE,
            'completion_reward': completion_reward * REWARD_SCALE,
            'cost': -cost * REWARD_SCALE,
            'step_penalty': -step_penalty * REWARD_SCALE
        }

    def render(self, episode=0, clear_output=True):
        plt.figure(figsize=(10, 10))

        title = f"Episode {episode}, Step {self.step_count}\n"
        title += f"收集: {sum(self.collected_tasks & self.task_generating_users)}/{sum(self.task_generating_users)} 任务"
        if hasattr(self, 'current_speed'):
            title += f", 当前速度: {self.current_speed:.1f} m/s"
        plt.title(title)

        plt.scatter(BASE_STATION_POSITION[0], BASE_STATION_POSITION[1],
                    s=300, c='orange', marker='s', label='Base Station', edgecolors='black', linewidth=2)
        plt.annotate('BS\n(MEC)', BASE_STATION_POSITION,
                     textcoords="offset points", xytext=(0, -25),
                     ha='center', fontsize=10, fontweight='bold')

        for i, pos in enumerate(self.user_positions):
            if self.task_generating_users[i]:
                color = 'green' if self.collected_tasks[i] else 'red'
            else:
                color = 'gray'
            plt.scatter(pos[0], pos[1], s=100, c=color)

            if self.task_generating_users[i] and self.collected_tasks[i]:
                offload_ratio = self.user_offloading_ratios[i]
                task_info = f"{i + 1}\n{self.task_sizes[i] / 1e6:.1f}Mb\n{self.task_cpu_cycles[i] / 1e9:.1f}Gcy\nOffload:{offload_ratio:.2f}"
            else:
                task_info = f"{i + 1}\n{self.task_sizes[i] / 1e6:.1f}Mb\n{self.task_cpu_cycles[i] / 1e9:.1f}Gcy"

            plt.annotate(task_info, (pos[0], pos[1]), fontsize=8, ha='center', va='bottom')

        trajectory = np.array(self.trajectory)
        plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', alpha=0.5)
        plt.scatter(self.uav_position[0], self.uav_position[1], s=200, c='blue', marker='*')
        circle = plt.Circle((self.uav_position[0], self.uav_position[1]), MAX_DISTANCE_COLLECT, color='blue',
                            fill=False, alpha=0.3)
        plt.gca().add_patch(circle)

        plt.plot([self.uav_position[0], BASE_STATION_POSITION[0]],
                 [self.uav_position[1], BASE_STATION_POSITION[1]],
                 'orange', linestyle='--', alpha=0.5, linewidth=1)

        plt.xlim(0, AREA_SIZE)
        plt.ylim(0, AREA_SIZE)
        title = f"Episode {episode}, Step {self.step_count}\n"
        title += f"收集: {sum(self.collected_tasks & self.task_generating_users)}/{sum(self.task_generating_users)} 任务"
        plt.title(title)
        plt.grid(True)
        plt.legend()
        plt.savefig(f"results/step_{episode}_{self.step_count}.png")
        plt.close()

class GRUActor(nn.Module):
    def __init__(self, state_dim, action_dim, max_action):
        super(GRUActor, self).__init__()
        self.state_dim = state_dim
        self.seq_len = SEQUENCE_LENGTH
        self.hidden_size = HIDDEN_SIZE
        self.gru = nn.GRU(input_size=state_dim, hidden_size=self.hidden_size, num_layers=1, batch_first=True)
        self.layer1 = nn.Linear(self.hidden_size, 256)
        self.layer2 = nn.Linear(256, 128)
        self.layer3 = nn.Linear(128, action_dim)
        self.max_action = max_action
        self.ln1 = nn.LayerNorm(256)
        self.ln2 = nn.LayerNorm(128)
        self.hidden = None
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0.0)

    def forward(self, state, reset_hidden=False):
        batch_size = state.size(0)
        if reset_hidden or self.hidden is None or self.hidden.size(1) != batch_size:
            self.reset_hidden(batch_size)

        gru_out, self.hidden = self.gru(state, self.hidden)
        x = gru_out[:, -1]
        x = self.ln1(torch.relu(self.layer1(x)))
        x = self.ln2(torch.relu(self.layer2(x)))
        action = torch.tanh(self.layer3(x))
        return self.max_action * action

    def reset_hidden(self, batch_size=1):
        self.hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)


class SD3DistributionalCritic(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(SD3DistributionalCritic, self).__init__()
        self.state_dim = state_dim
        self.seq_len = SEQUENCE_LENGTH
        self.hidden_size = HIDDEN_SIZE
        self.num_quantiles = NUM_QUANTILES

        self.q1_gru = nn.GRU(input_size=state_dim, hidden_size=self.hidden_size, num_layers=1, batch_first=True)
        self.q1_layer1 = nn.Linear(self.hidden_size + action_dim, 256)
        self.q1_layer2 = nn.Linear(256, 128)
        self.q1_output = nn.Linear(128, self.num_quantiles)
        self.q1_ln1 = nn.LayerNorm(256)
        self.q1_ln2 = nn.LayerNorm(128)

        self.q2_gru = nn.GRU(input_size=state_dim, hidden_size=self.hidden_size, num_layers=1, batch_first=True)
        self.q2_layer1 = nn.Linear(self.hidden_size + action_dim, 256)
        self.q2_layer2 = nn.Linear(256, 128)
        self.q2_output = nn.Linear(128, self.num_quantiles)
        self.q2_ln1 = nn.LayerNorm(256)
        self.q2_ln2 = nn.LayerNorm(128)

        self.q1_hidden = None
        self.q2_hidden = None
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0.01)

    def forward(self, state, action, reset_hidden=False):
        batch_size = state.size(0)
        if reset_hidden or self.q1_hidden is None or self.q1_hidden.size(1) != batch_size:
            self.reset_q1_hidden(batch_size)
        if reset_hidden or self.q2_hidden is None or self.q2_hidden.size(1) != batch_size:
            self.reset_q2_hidden(batch_size)

        q1_gru_out, self.q1_hidden = self.q1_gru(state, self.q1_hidden.to(state.device))
        q2_gru_out, self.q2_hidden = self.q2_gru(state, self.q2_hidden.to(state.device))

        q1_state = q1_gru_out[:, -1]
        q2_state = q2_gru_out[:, -1]

        q1_x = torch.cat([q1_state, action], dim=1)
        q2_x = torch.cat([q2_state, action], dim=1)

        q1 = self.q1_ln1(torch.relu(self.q1_layer1(q1_x)))
        q1 = self.q1_ln2(torch.relu(self.q1_layer2(q1)))
        q1_quantiles = self.q1_output(q1)

        q2 = self.q2_ln1(torch.relu(self.q2_layer1(q2_x)))
        q2 = self.q2_ln2(torch.relu(self.q2_layer2(q2)))
        q2_quantiles = self.q2_output(q2)

        return q1_quantiles, q2_quantiles

    def Q1(self, state, action, reset_hidden=False):
        batch_size = state.size(0)
        if reset_hidden or self.q1_hidden is None or self.q1_hidden.size(1) != batch_size:
            self.reset_q1_hidden(batch_size)

        q1_gru_out, self.q1_hidden = self.q1_gru(state, self.q1_hidden.to(state.device))
        q1_state = q1_gru_out[:, -1]
        q1_x = torch.cat([q1_state, action], dim=1)
        q1 = self.q1_ln1(torch.relu(self.q1_layer1(q1_x)))
        q1 = self.q1_ln2(torch.relu(self.q1_layer2(q1)))
        q1_quantiles = self.q1_output(q1)
        return q1_quantiles

    def reset_q1_hidden(self, batch_size=1):
        self.q1_hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)

    def reset_q2_hidden(self, batch_size=1):
        self.q2_hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)


def quantile_huber_loss(quantiles, target, tau, kappa=KAPPA):
    pairwise_delta = target - quantiles
    abs_pairwise_delta = torch.abs(pairwise_delta)
    huber_loss = torch.where(abs_pairwise_delta > kappa,
                             kappa * abs_pairwise_delta - 0.5 * kappa ** 2,
                             0.5 * pairwise_delta ** 2)

    n_quantiles = quantiles.shape[1]
    tau = tau.view(1, n_quantiles)
    loss = torch.abs(tau - (pairwise_delta.detach() < 0).float()) * huber_loss / kappa
    return loss.sum(dim=1).mean()


class ReplayBuffer:
    def __init__(self, capacity):
        self.buffer = deque(maxlen=capacity)

    def push(self, state, action, reward, next_state, done):
        self.buffer.append((state, action, reward, next_state, done))

    def sample(self, batch_size):
        state, action, reward, next_state, done = zip(*random.sample(self.buffer, batch_size))
        return (np.array(state), np.array(action), np.array(reward),
                np.array(next_state), np.array(done))

    def __len__(self):
        return len(self.buffer)


# 修改后的A-GEM算法，实现柔性梯度融合
class FlexibleAGEM:
    def __init__(self, memory_size, batch_size, gamma=AGEM_GAMMA):
        self.memory_size = memory_size
        self.batch_size = batch_size
        self.gamma = gamma  # 柔性融合参数
        self.memory = deque(maxlen=memory_size)

    def store_experience(self, state, action, reward, next_state, done):
        """存储经验到A-GEM记忆库"""
        self.memory.append((state, action, reward, next_state, done))

    def compute_reference_gradients(self, actor, critic, actor_optimizer, critic_optimizer):
        """计算参考梯度"""
        if len(self.memory) < self.batch_size:
            return None, None

        # 从记忆库中采样
        batch = random.sample(self.memory, self.batch_size)
        states, actions, rewards, next_states, dones = zip(*batch)

        states = torch.FloatTensor(np.array(states)).to(device)
        actions = torch.FloatTensor(np.array(actions)).to(device)
        rewards = torch.FloatTensor(np.array(rewards)).to(device)
        next_states = torch.FloatTensor(np.array(next_states)).to(device)
        dones = torch.FloatTensor(np.array(dones)).to(device)

        # 重置隐藏状态
        actor.reset_hidden(states.size(0))
        critic.reset_q1_hidden(states.size(0))
        critic.reset_q2_hidden(states.size(0))

        # 计算Actor参考梯度
        actor_optimizer.zero_grad()
        pred_actions = actor(states, reset_hidden=True)
        q1_quantiles = critic.Q1(states, pred_actions, reset_hidden=True)
        actor_loss = -q1_quantiles.mean(dim=1).mean()
        actor_loss.backward()

        actor_ref_grads = []
        for param in actor.parameters():
            if param.grad is not None:
                actor_ref_grads.append(param.grad.clone().flatten())
        actor_ref_grad = torch.cat(actor_ref_grads) if actor_ref_grads else None

        # 计算Critic参考梯度
        critic_optimizer.zero_grad()
        with torch.no_grad():
            next_actions = actor(next_states, reset_hidden=True)
            q1_next, q2_next = critic(next_states, next_actions, reset_hidden=True)
            target_q = torch.min(q1_next, q2_next)
            target_quantiles = rewards.unsqueeze(1) + (1 - dones.unsqueeze(1)) * GAMMA * target_q

        current_q1, current_q2 = critic(states, actions, reset_hidden=True)
        critic_loss = (quantile_huber_loss(current_q1, target_quantiles.detach(), QUANTILE_TAU) +
                       quantile_huber_loss(current_q2, target_quantiles.detach(), QUANTILE_TAU))
        critic_loss.backward()

        critic_ref_grads = []
        for param in critic.parameters():
            if param.grad is not None:
                critic_ref_grads.append(param.grad.clone().flatten())
        critic_ref_grad = torch.cat(critic_ref_grads) if critic_ref_grads else None

        return actor_ref_grad, critic_ref_grad

    def apply_flexible_gradient_fusion(self, model, current_grad, reference_grad, model_type):
        """应用柔性梯度融合"""
        if reference_grad is None or current_grad is None:
            return

        # 计算梯度相似性
        cosine_sim = torch.cosine_similarity(current_grad, reference_grad, dim=0)

        # 根据相似性调整融合权重
        if cosine_sim < -0.1:  # 梯度冲突较严重
            # 使用投影方法，但保留部分当前梯度
            dot_product = torch.dot(current_grad, reference_grad)
            ref_norm_sq = torch.dot(reference_grad, reference_grad)

            if ref_norm_sq > 1e-8:
                projection = (dot_product / ref_norm_sq) * reference_grad
                corrected_grad = current_grad - self.gamma * projection
            else:
                corrected_grad = current_grad
        else:
            # 梯度兼容，使用加权融合
            fusion_weight = min(0.3, max(0.1, (cosine_sim.item() + 1) / 2 * 0.3))
            corrected_grad = (1 - fusion_weight) * current_grad + fusion_weight * reference_grad

        # 应用修正后的梯度
        idx = 0
        for param in model.parameters():
            if param.grad is not None:
                param_size = param.grad.numel()
                param.grad.data = corrected_grad[idx:idx + param_size].view_as(param.grad)
                idx += param_size


# 修改后的SD3Agent，添加持续学习控制参数
class SD3Agent:
    def __init__(self, state_dim, action_dim, max_action,
                 continual_learning_rate=0.8, adaptation_threshold=0.3, memory_retention=0.7):
        self.state_dim = state_dim
        self.action_dim = action_dim
        self.max_action = max_action

        # 持续学习控制参数
        self.continual_learning_rate = continual_learning_rate  # 持续学习速率
        self.adaptation_threshold = adaptation_threshold  # 适应阈值
        self.memory_retention = memory_retention  # 记忆保留率

        self.actor = GRUActor(state_dim, action_dim, max_action).to(device)
        self.actor_target = GRUActor(state_dim, action_dim, max_action).to(device)
        self.critic = SD3DistributionalCritic(state_dim, action_dim).to(device)
        self.critic_target = SD3DistributionalCritic(state_dim, action_dim).to(device)

        self.actor_target.load_state_dict(self.actor.state_dict())
        self.critic_target.load_state_dict(self.critic.state_dict())

        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=ACTOR_LR)
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=CRITIC_LR)

        self.replay_buffer = ReplayBuffer(BUFFER_SIZE)
        self.agem = FlexibleAGEM(AGEM_MEMORY_SIZE, AGEM_BATCH_SIZE)

        self.total_it = 0
        self.policy_delay = 2

        # 性能跟踪
        self.recent_rewards = deque(maxlen=100)
        self.phase_performance = {}

    def update_continual_params(self, phase_performance_drop):
        """根据性能变化动态调整持续学习参数"""
        if phase_performance_drop > self.adaptation_threshold:
            # 性能下降较大，提高学习率，降低记忆保留
            self.continual_learning_rate = min(1.0, self.continual_learning_rate * 1.2)
            self.memory_retention = max(0.3, self.memory_retention * 0.9)
        else:
            # 性能稳定，逐渐恢复参数
            self.continual_learning_rate = max(0.5, self.continual_learning_rate * 0.95)
            self.memory_retention = min(0.8, self.memory_retention * 1.05)

    def select_action(self, state, noise=0.1):
        state = torch.FloatTensor(state).unsqueeze(0).to(device)
        self.actor.reset_hidden(1)
        action = self.actor(state, reset_hidden=True).cpu().data.numpy().flatten()

        if noise != 0:
            noise_vec = np.random.normal(0, noise, size=action.shape)
            action = (action + noise_vec).clip(-self.max_action, self.max_action)

        return action

    def store_transition(self, state, action, reward, next_state, done):
        self.replay_buffer.push(state, action, reward, next_state, done)

        # 根据记忆保留率决定是否存储到A-GEM记忆库
        if np.random.random() < self.memory_retention:
            self.agem.store_experience(state, action, reward, next_state, done)

    def train(self):
        if len(self.replay_buffer) < BATCH_SIZE:
            return {}

        self.total_it += 1

        # 获取参考梯度
        actor_ref_grad, critic_ref_grad = self.agem.compute_reference_gradients(
            self.actor, self.critic, self.actor_optimizer, self.critic_optimizer
        )

        state, action, reward, next_state, done = self.replay_buffer.sample(BATCH_SIZE)

        state = torch.FloatTensor(state).to(device)
        action = torch.FloatTensor(action).to(device)
        reward = torch.FloatTensor(reward).to(device)
        next_state = torch.FloatTensor(next_state).to(device)
        done = torch.FloatTensor(done).to(device)

        # 训练Critic
        with torch.no_grad():
            self.actor_target.reset_hidden(BATCH_SIZE)
            next_action = self.actor_target(next_state, reset_hidden=True)

            self.critic_target.reset_q1_hidden(BATCH_SIZE)
            self.critic_target.reset_q2_hidden(BATCH_SIZE)
            target_Q1, target_Q2 = self.critic_target(next_state, next_action, reset_hidden=True)
            target_Q = torch.min(target_Q1, target_Q2)
            target_quantiles = reward.unsqueeze(1) + (1 - done.unsqueeze(1)) * GAMMA * target_Q

        self.critic.reset_q1_hidden(BATCH_SIZE)
        self.critic.reset_q2_hidden(BATCH_SIZE)
        current_Q1, current_Q2 = self.critic(state, action, reset_hidden=True)

        critic_loss = (quantile_huber_loss(current_Q1, target_quantiles, QUANTILE_TAU) +
                       quantile_huber_loss(current_Q2, target_quantiles, QUANTILE_TAU))

        self.critic_optimizer.zero_grad()
        critic_loss.backward()

        # 获取当前Critic梯度
        critic_current_grads = []
        for param in self.critic.parameters():
            if param.grad is not None:
                critic_current_grads.append(param.grad.clone().flatten())
        critic_current_grad = torch.cat(critic_current_grads) if critic_current_grads else None

        # 应用柔性梯度融合
        self.agem.apply_flexible_gradient_fusion(
            self.critic, critic_current_grad, critic_ref_grad, 'critic'
        )

        torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 1.0)
        self.critic_optimizer.step()

        actor_loss_value = 0
        if self.total_it % self.policy_delay == 0:
            # 训练Actor
            self.actor.reset_hidden(BATCH_SIZE)
            pred_action = self.actor(state, reset_hidden=True)

            self.critic.reset_q1_hidden(BATCH_SIZE)
            actor_loss = -self.critic.Q1(state, pred_action, reset_hidden=True).mean(dim=1).mean()
            actor_loss_value = actor_loss.item()

            self.actor_optimizer.zero_grad()
            actor_loss.backward()

            # 获取当前Actor梯度
            actor_current_grads = []
            for param in self.actor.parameters():
                if param.grad is not None:
                    actor_current_grads.append(param.grad.clone().flatten())
            actor_current_grad = torch.cat(actor_current_grads) if actor_current_grads else None

            # 应用柔性梯度融合
            self.agem.apply_flexible_gradient_fusion(
                self.actor, actor_current_grad, actor_ref_grad, 'actor'
            )

            torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 1.0)
            self.actor_optimizer.step()

            # 软更新目标网络
            for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)

            for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)

        return {
            'critic_loss': critic_loss.item(),
            'actor_loss': actor_loss_value,
            'q_values': current_Q1.mean().item()
        }

    def save(self, filename):
        torch.save({
            'actor': self.actor.state_dict(),
            'critic': self.critic.state_dict(),
            'actor_target': self.actor_target.state_dict(),
            'critic_target': self.critic_target.state_dict(),
            'actor_optimizer': self.actor_optimizer.state_dict(),
            'critic_optimizer': self.critic_optimizer.state_dict(),
            'continual_params': {
                'learning_rate': self.continual_learning_rate,
                'adaptation_threshold': self.adaptation_threshold,
                'memory_retention': self.memory_retention
            }
        }, filename)

    def load(self, filename):
        checkpoint = torch.load(filename, map_location=device)
        self.actor.load_state_dict(checkpoint['actor'])
        self.critic.load_state_dict(checkpoint['critic'])
        self.actor_target.load_state_dict(checkpoint['actor_target'])
        self.critic_target.load_state_dict(checkpoint['critic_target'])
        self.actor_optimizer.load_state_dict(checkpoint['actor_optimizer'])
        self.critic_optimizer.load_state_dict(checkpoint['critic_optimizer'])

        # 加载持续学习参数
        if 'continual_params' in checkpoint:
            params = checkpoint['continual_params']
            self.continual_learning_rate = params.get('learning_rate', 0.8)
            self.adaptation_threshold = params.get('adaptation_threshold', 0.3)
            self.memory_retention = params.get('memory_retention', 0.7)


def train_continual_learning():
    os.makedirs("../results", exist_ok=True)
    os.makedirs("models", exist_ok=True)

    env = Environment()
    state_dim = env._get_state().shape[0]
    action_dim = 4
    max_action = 1.0

    agent = SD3Agent(state_dim, action_dim, max_action)

    # 训练参数
    phases = [1, 2, 3, 4]  # 四个阶段
    episodes_per_phase = 800
    total_episodes = 0

    # 记录训练过程
    all_rewards = []
    all_losses = []
    phase_results = {}

    print("开始持续学习训练...")

    for phase_idx, phase in enumerate(phases):
        print(f"\n=== 开始阶段 {phase} ===")
        phase_rewards = []
        phase_losses = []
        phase_performance_metrics = []

        # 更新环境中的任务生成用户
        env.update_task_generating_users(phase)

        # 如果不是第一阶段，计算性能下降并调整参数
        if phase_idx > 0:
            prev_avg_reward = np.mean(phase_results[phases[phase_idx - 1]]['rewards'][-100:])
            # 用前几个episode估算当前性能
            initial_rewards = []
            for _ in range(10):  # 测试10个episode
                state = env.reset()
                episode_reward = 0
                done = False
                step = 0

                while not done and step < MAX_STEPS:
                    action = agent.select_action(state, noise=0.1)
                    next_state, reward, done, info = env.step(action)
                    episode_reward += reward
                    state = next_state
                    step += 1

                initial_rewards.append(episode_reward)

            current_avg_reward = np.mean(initial_rewards)
            performance_drop = (prev_avg_reward - current_avg_reward) / abs(
                prev_avg_reward) if prev_avg_reward != 0 else 0

            print(f"阶段性能变化: {prev_avg_reward:.3f} -> {current_avg_reward:.3f} (下降: {performance_drop:.3f})")
            agent.update_continual_params(performance_drop)
            print(f"更新参数: 学习率={agent.continual_learning_rate:.3f}, 记忆保留={agent.memory_retention:.3f}")

        # 动态调整探索噪声
        start_noise = EXPLORATION_NOISE_START
        end_noise = EXPLORATION_NOISE_END

        for episode in range(episodes_per_phase):
            total_episodes += 1
            state = env.reset()
            episode_reward = 0
            episode_losses = []
            done = False
            step = 0

            # 计算当前噪声
            progress = episode / episodes_per_phase
            current_noise = start_noise * (1 - progress) + end_noise * progress

            while not done and step < MAX_STEPS:
                action = agent.select_action(state, noise=current_noise)
                next_state, reward, done, info = env.step(action)

                agent.store_transition(state, action, reward, next_state, done)
                episode_reward += reward

                if len(agent.replay_buffer) > BATCH_SIZE:
                    loss_info = agent.train()
                    if loss_info:
                        episode_losses.append(loss_info)

                state = next_state
                step += 1

            phase_rewards.append(episode_reward)
            if episode_losses:
                avg_losses = {}
                for key in episode_losses[0].keys():
                    avg_losses[key] = np.mean([loss[key] for loss in episode_losses])
                phase_losses.append(avg_losses)

            # 记录性能指标
            performance_metrics = {
                'collected_ratio': info.get('collected_required', 0) / max(info.get('total_required', 1), 1),
                'energy': info.get('energy', 0),
                'delay': info.get('delay', 0),
                'flight_energy': info.get('flight_energy', 0),
                'comp_energy': info.get('comp_energy', 0)
            }
            phase_performance_metrics.append(performance_metrics)

            # 更新agent的最近奖励
            agent.recent_rewards.append(episode_reward)

            if episode % 100 == 0:
                recent_avg = np.mean(list(agent.recent_rewards))
                collected_ratio = performance_metrics['collected_ratio']
                print(f"阶段 {phase}, Episode {episode}: "
                      f"奖励={episode_reward:.3f}, "
                      f"最近100平均={recent_avg:.3f}, "
                      f"收集率={collected_ratio:.3f}, "
                      f"步数={step}")

                if episode % 200 == 0:
                    env.render(episode=total_episodes)

        # 记录阶段结果
        phase_results[phase] = {
            'rewards': phase_rewards,
            'losses': phase_losses,
            'performance_metrics': phase_performance_metrics
        }

        all_rewards.extend(phase_rewards)
        all_losses.extend(phase_losses)

        # 阶段总结
        avg_reward = np.mean(phase_rewards[-100:])  # 最后100个episode的平均奖励
        avg_collection = np.mean([m['collected_ratio'] for m in phase_performance_metrics[-100:]])
        avg_energy = np.mean([m['energy'] for m in phase_performance_metrics[-100:]])

        print(f"\n阶段 {phase} 完成:")
        print(f"  平均奖励: {avg_reward:.3f}")
        print(f"  平均收集率: {avg_collection:.3f}")
        print(f"  平均能耗: {avg_energy:.3f}")
        print(f"  当前持续学习参数: 学习率={agent.continual_learning_rate:.3f}, 记忆保留={agent.memory_retention:.3f}")

        # 保存阶段模型
        agent.save(f"models/sd3_phase_{phase}_episode_{total_episodes}.pth")
        print(f"已保存阶段 {phase} 模型")

    # 训练完成后的分析和可视化
    print("\n=== 持续学习训练完成 ===")

    # 绘制训练曲线
    plt.figure(figsize=(15, 12))

    # 奖励曲线
    plt.subplot(2, 3, 1)
    window_size = 50
    smoothed_rewards = []
    for i in range(len(all_rewards)):
        start_idx = max(0, i - window_size + 1)
        smoothed_rewards.append(np.mean(all_rewards[start_idx:i + 1]))

    plt.plot(smoothed_rewards)
    for i, phase in enumerate(phases[1:], 1):
        plt.axvline(x=i * episodes_per_phase, color='red', linestyle='--', alpha=0.7,
                    label=f'阶段 {phase} 开始' if i == 1 else '')
    plt.xlabel('Episode')
    plt.ylabel('平滑奖励')
    plt.title('训练奖励曲线')
    plt.legend()
    plt.grid(True)

    # 各阶段性能对比
    plt.subplot(2, 3, 2)
    phase_avg_rewards = []
    phase_labels = []
    for phase in phases:
        rewards = phase_results[phase]['rewards']
        phase_avg_rewards.append(np.mean(rewards[-100:]))  # 最后100个episode
        phase_labels.append(f'阶段 {phase}')

    bars = plt.bar(phase_labels, phase_avg_rewards, alpha=0.7)
    for i, (bar, reward) in enumerate(zip(bars, phase_avg_rewards)):
        plt.text(bar.get_x() + bar.get_width() / 2, bar.get_height() + 0.01,
                 f'{reward:.3f}', ha='center', va='bottom')
    plt.ylabel('平均奖励')
    plt.title('各阶段平均奖励对比')
    plt.xticks(rotation=45)

    # 收集率对比
    plt.subplot(2, 3, 3)
    phase_collection_rates = []
    for phase in phases:
        metrics = phase_results[phase]['performance_metrics']
        phase_collection_rates.append(np.mean([m['collected_ratio'] for m in metrics[-100:]]))

    bars = plt.bar(phase_labels, phase_collection_rates, alpha=0.7, color='green')
    for i, (bar, rate) in enumerate(zip(bars, phase_collection_rates)):
        plt.text(bar.get_x() + bar.get_width() / 2, bar.get_height() + 0.01,
                 f'{rate:.3f}', ha='center', va='bottom')
    plt.ylabel('平均收集率')
    plt.title('各阶段收集率对比')
    plt.xticks(rotation=45)

    # 能耗对比
    plt.subplot(2, 3, 4)
    phase_energies = []
    for phase in phases:
        metrics = phase_results[phase]['performance_metrics']
        phase_energies.append(np.mean([m['energy'] for m in metrics[-100:]]))

    bars = plt.bar(phase_labels, phase_energies, alpha=0.7, color='orange')
    for i, (bar, energy) in enumerate(zip(bars, phase_energies)):
        plt.text(bar.get_x() + bar.get_width() / 2, bar.get_height() + energy * 0.01,
                 f'{energy:.0f}', ha='center', va='bottom')
    plt.ylabel('平均总能耗')
    plt.title('各阶段能耗对比')
    plt.xticks(rotation=45)

    # Loss曲线
    plt.subplot(2, 3, 5)
    if all_losses and len(all_losses) > 0:
        critic_losses = [loss['critic_loss'] for loss in all_losses if 'critic_loss' in loss]
        if critic_losses:
            # 平滑处理
            window = min(50, len(critic_losses) // 10)
            if window > 1:
                smoothed_critic = []
                for i in range(len(critic_losses)):
                    start_idx = max(0, i - window + 1)
                    smoothed_critic.append(np.mean(critic_losses[start_idx:i + 1]))
                plt.plot(smoothed_critic, label='Critic Loss')
            else:
                plt.plot(critic_losses, label='Critic Loss')

    plt.xlabel('训练步骤')
    plt.ylabel('损失')
    plt.title('训练损失曲线')
    plt.legend()
    plt.grid(True)

    # 持续学习效果
    plt.subplot(2, 3, 6)
    # 计算遗忘程度：比较在新阶段开始时在旧任务上的性能
    forgetting_scores = []
    adaptation_scores = []

    for i, phase in enumerate(phases[1:], 1):
        # 适应性：当前阶段最终性能 vs 初始性能
        phase_rewards = phase_results[phases[i]]['rewards']
        initial_perf = np.mean(phase_rewards[:50])  # 前50个episode
        final_perf = np.mean(phase_rewards[-50:])  # 后50个episode
        adaptation = (final_perf - initial_perf) / abs(initial_perf) if initial_perf != 0 else 0
        adaptation_scores.append(adaptation)

        # 遗忘度：当前阶段初始性能 vs 上一阶段最终性能
        prev_final = np.mean(phase_results[phases[i - 1]]['rewards'][-50:])
        forgetting = (prev_final - initial_perf) / abs(prev_final) if prev_final != 0 else 0
        forgetting_scores.append(forgetting)

    x = np.arange(len(phases[1:]))
    width = 0.35

    plt.bar(x - width / 2, adaptation_scores, width, label='适应性得分', alpha=0.7, color='blue')
    plt.bar(x + width / 2, forgetting_scores, width, label='遗忘得分', alpha=0.7, color='red')

    plt.xlabel('阶段转换')
    plt.ylabel('得分')
    plt.title('持续学习效果分析')
    plt.xticks(x, [f'{phases[i]}→{phases[i + 1]}' for i in range(len(phases[1:]))])
    plt.legend()
    plt.grid(True, alpha=0.3)

    plt.tight_layout()
    plt.savefig('results/continual_learning_analysis.png', dpi=300, bbox_inches='tight')
    plt.close()

    # 保存训练数据
    training_data = {
        'all_rewards': all_rewards,
        'phase_results': phase_results,
        'training_config': {
            'phases': phases,
            'episodes_per_phase': episodes_per_phase,
            'total_episodes': total_episodes,
            'hyperparameters': {
                'ACTOR_LR': ACTOR_LR,
                'CRITIC_LR': CRITIC_LR,
                'GAMMA': GAMMA,
                'TAU': TAU,
                'BATCH_SIZE': BATCH_SIZE,
                'AGEM_MEMORY_SIZE': AGEM_MEMORY_SIZE,
                'AGEM_GAMMA': AGEM_GAMMA
            }
        }
    }

    import pickle
    with open('results/training_data.pkl', 'wb') as f:
        pickle.dump(training_data, f)

    print("训练数据已保存到 results/training_data.pkl")
    print("分析图表已保存到 results/continual_learning_analysis.png")

    # 最终模型保存
    agent.save("models/sd3_final_continual.pth")
    print("最终模型已保存到 models/sd3_final_continual.pth")

    return agent, training_data

if __name__ == "__main__":
    train_continual_learning()

