import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import random
from collections import deque
import os
import time

SEED = 45
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

EPISODES_PER_TASK = 200

# 环境参数
AREA_SIZE = 100
NUM_USERS = 15
MAX_STEPS = 200
MAX_DISTANCE_COLLECT = 15

# UAV参数
UAV_SPEED = 20.0
UAV_ENERGY_PER_METER = 0.1
UAV_HOVER_ENERGY = 0.5

# 任务参数
TASK_SIZE = [10, 50]

# TD3参数
ACTOR_LR = 3e-4
CRITIC_LR = 3e-4
GAMMA = 0.99
TAU = 0.005
BUFFER_SIZE = 200000
BATCH_SIZE = 256
EXPLORATION_NOISE_START = 0.4
EXPLORATION_NOISE_END = 0.05
REWARD_SCALE = 0.1

# GEM参数
N_MEMORIES = 256

# Progressive Network参数
COLUMN_WIDTH = 64
SEQUENCE_LENGTH = 10


class Environment:
    def __init__(self):
        self.user_positions = np.random.uniform(0, AREA_SIZE, size=(NUM_USERS, 2))
        self.task_sizes = np.random.uniform(TASK_SIZE[0], TASK_SIZE[1], size=NUM_USERS)
        self.task_generating_users = np.ones(NUM_USERS, dtype=bool)
        self.uav_position = np.array([AREA_SIZE / 2, AREA_SIZE / 2], dtype=float)
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)
        self.step_count = 0
        self.total_delay = 0
        self.total_energy = 0
        self.trajectory = [self.uav_position.copy()]
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])
        self.observation_history = deque(maxlen=SEQUENCE_LENGTH)
        self.current_phase = 1

    def update_task_generating_users(self, phase):
        self.current_phase = phase
        if phase == 1:
            self.task_generating_users = np.ones(NUM_USERS, dtype=bool)
        elif phase == 2:
            indices = np.random.choice(NUM_USERS, 11, replace=False)
            self.task_generating_users = np.zeros(NUM_USERS, dtype=bool)
            self.task_generating_users[indices] = True
            self.phase2_generating_users = set(indices)
            self.phase2_non_generating_users = set(range(NUM_USERS)) - self.phase2_generating_users
        else:
            phase3_users = set(self.phase2_non_generating_users)
            additional_users = np.random.choice(list(self.phase2_generating_users), 3, replace=False)
            phase3_users.update(additional_users)
            self.task_generating_users = np.zeros(NUM_USERS, dtype=bool)
            for user_id in phase3_users:
                self.task_generating_users[user_id] = True

    def reset(self):
        self.uav_position = np.array([AREA_SIZE / 2, AREA_SIZE / 2], dtype=float)
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)
        self.step_count = 0
        self.total_delay = 0
        self.total_energy = 0
        self.trajectory = [self.uav_position.copy()]
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])
        self.observation_history = deque(maxlen=SEQUENCE_LENGTH)
        state = self._get_state()
        for _ in range(SEQUENCE_LENGTH):
            self.observation_history.append(state)
        return self._get_gru_state()

    def step(self, action):
        action = np.clip(action, -1, 1)
        movement = action * UAV_SPEED
        prev_position = self.uav_position.copy()
        self.uav_position += movement
        self.uav_position = np.clip(self.uav_position, 0, AREA_SIZE)
        self.trajectory.append(self.uav_position.copy())

        distance_moved = np.linalg.norm(self.uav_position - prev_position)
        energy_consumed = distance_moved * UAV_ENERGY_PER_METER
        new_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])

        newly_collected = 0
        collected_indices = []
        for i in range(NUM_USERS):
            if self.task_generating_users[i] and not self.collected_tasks[i]:
                if new_distances[i] <= MAX_DISTANCE_COLLECT:
                    self.collected_tasks[i] = True
                    newly_collected += 1
                    collected_indices.append(i)
                    delay = new_distances[i] * self.task_sizes[i] / 10
                    self.total_delay += delay
                    energy_consumed += UAV_HOVER_ENERGY

        self.total_energy += energy_consumed
        self.step_count += 1
        reward = self._calculate_reward(newly_collected, energy_consumed, collected_indices, new_distances,
                                        self.last_distances)
        self.last_distances = new_distances

        total_tasks_to_collect = sum(self.task_generating_users)
        collected_required_tasks = sum(self.collected_tasks & self.task_generating_users)
        done = (self.step_count >= MAX_STEPS) or (collected_required_tasks == total_tasks_to_collect)

        state = self._get_state()
        self.observation_history.append(state)

        return self._get_gru_state(), reward, done, {
            "collected": sum(self.collected_tasks),
            "collected_required": collected_required_tasks,
            "total_required": total_tasks_to_collect,
            "energy": self.total_energy,
            "delay": self.total_delay,
            "newly_collected": newly_collected,
            "total_users": NUM_USERS
        }

    def _get_state(self):
        state = np.zeros(2 + NUM_USERS * 3 + 1)
        state[0:2] = self.uav_position / AREA_SIZE
        for i in range(NUM_USERS):
            dist = np.linalg.norm(self.uav_position - self.user_positions[i])
            idx = 2 + i * 3
            state[idx] = dist / np.sqrt(2 * AREA_SIZE ** 2)
            state[idx + 1] = float(self.collected_tasks[i])
            state[idx + 2] = float(self.task_generating_users[i])
        state[-1] = self.step_count / MAX_STEPS
        return state

    def _get_gru_state(self):
        while len(self.observation_history) < SEQUENCE_LENGTH:
            self.observation_history.append(self._get_state())
        return np.array(list(self.observation_history))

    def _calculate_reward(self, newly_collected, energy_consumed, collected_indices, new_distances, old_distances):
        time_penalty = 0.1
        collected_required = sum(self.collected_tasks & self.task_generating_users)
        total_required = sum(self.task_generating_users)

        collection_reward = newly_collected * 20
        if newly_collected > 0 and total_required > 0:
            progress_bonus = (collected_required / total_required) * 15
            collection_reward += progress_bonus

        energy_penalty = energy_consumed * 0.8
        proximity_reward = 0
        uncollected_tasks_count = 0
        for i in range(NUM_USERS):
            if self.task_generating_users[i] and not self.collected_tasks[i]:
                uncollected_tasks_count += 1
                dist_diff = old_distances[i] - new_distances[i]
                proximity_factor = max(0, 1 - (new_distances[i] / AREA_SIZE) ** 2)
                proximity_reward += dist_diff * 0.5 * proximity_factor

        if uncollected_tasks_count == 0:
            proximity_reward = 0

        completion_reward = 0
        if total_required > 0 and collected_required == total_required:
            completion_reward = 200 - self.step_count * 0.2

        reward = collection_reward + proximity_reward + completion_reward - energy_penalty - time_penalty
        return reward * REWARD_SCALE


# 改进的Progressive Networks - 固定参数结构
class ProgressiveGRUActor(nn.Module):
    def __init__(self, state_dim, action_dim, max_action, n_tasks=3):
        super(ProgressiveGRUActor, self).__init__()

        self.state_dim = state_dim
        self.n_tasks = n_tasks
        self.max_action = max_action
        self.current_task = 0

        # 基础列：始终激活的基础特征提取器
        self.base_gru = nn.GRU(state_dim, COLUMN_WIDTH, batch_first=True)

        # 为每个任务创建独立的GRU列（但参数结构相同）
        self.task_grus = nn.ModuleList([
            nn.GRU(state_dim, COLUMN_WIDTH, batch_first=True)
            for _ in range(n_tasks)
        ])

        # 侧向连接：允许当前任务访问之前任务的知识
        self.lateral_connections = nn.ModuleList([
            nn.Linear(COLUMN_WIDTH, COLUMN_WIDTH)
            for _ in range(n_tasks - 1)
        ])

        # 固定的决策网络（总是使用最大可能的输入维度）
        max_input_dim = COLUMN_WIDTH * (n_tasks + 1)  # +1 for base column
        self.decision_layers = nn.Sequential(
            nn.Linear(max_input_dim, 256),
            nn.ReLU(),
            nn.LayerNorm(256),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.LayerNorm(128),
            nn.Linear(128, action_dim),
            nn.Tanh()
        )

    def forward(self, state):
        batch_size = state.size(0)
        outputs = []

        # 基础列处理
        hidden_base = torch.zeros(1, batch_size, COLUMN_WIDTH).to(state.device)
        base_out, _ = self.base_gru(state, hidden_base)
        base_feature = base_out[:, -1]  # 取最后一个时间步
        outputs.append(base_feature)

        # 任务特定列处理（只处理到当前任务）
        prev_features = []
        for t in range(min(self.current_task + 1, len(self.task_grus))):
            hidden = torch.zeros(1, batch_size, COLUMN_WIDTH).to(state.device)
            task_out, _ = self.task_grus[t](state, hidden)
            task_feature = task_out[:, -1]

            # 应用侧向连接（从之前的任务获得知识）
            if t > 0 and t - 1 < len(self.lateral_connections):
                lateral_input = sum(prev_features)  # 聚合之前任务的特征
                task_feature = task_feature + self.lateral_connections[t - 1](lateral_input)

            outputs.append(task_feature)
            prev_features.append(task_feature)

        # 如果当前任务数小于最大任务数，用零填充
        while len(outputs) < self.n_tasks + 1:
            outputs.append(torch.zeros_like(base_feature))

        # 连接所有特征
        combined = torch.cat(outputs, dim=1)
        action = self.decision_layers(combined)

        return self.max_action * action

    def set_task(self, task_id):
        self.current_task = task_id

    def get_trainable_params_for_task(self, task_id):
        """获取特定任务的可训练参数"""
        params = []

        # 基础列参数（所有任务共享）
        params.extend(list(self.base_gru.parameters()))

        # 当前任务的GRU参数
        if task_id < len(self.task_grus):
            params.extend(list(self.task_grus[task_id].parameters()))

        # 侧向连接参数（如果存在）
        if task_id > 0 and task_id - 1 < len(self.lateral_connections):
            params.extend(list(self.lateral_connections[task_id - 1].parameters()))

        # 决策层参数
        params.extend(list(self.decision_layers.parameters()))

        return params


class ProgressiveGRUCritic(nn.Module):
    def __init__(self, state_dim, action_dim, n_tasks=3):
        super(ProgressiveGRUCritic, self).__init__()

        self.state_dim = state_dim
        self.n_tasks = n_tasks
        self.current_task = 0

        # Q1网络 - Progressive结构
        self.q1_base_gru = nn.GRU(state_dim, COLUMN_WIDTH, batch_first=True)
        self.q1_task_grus = nn.ModuleList([
            nn.GRU(state_dim, COLUMN_WIDTH, batch_first=True)
            for _ in range(n_tasks)
        ])
        self.q1_lateral_connections = nn.ModuleList([
            nn.Linear(COLUMN_WIDTH, COLUMN_WIDTH)
            for _ in range(n_tasks - 1)
        ])

        max_state_dim = COLUMN_WIDTH * (n_tasks + 1)
        self.q1_layers = nn.Sequential(
            nn.Linear(max_state_dim + action_dim, 256),
            nn.ReLU(),
            nn.LayerNorm(256),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.LayerNorm(128),
            nn.Linear(128, 1)
        )

        # Q2网络 - Progressive结构
        self.q2_base_gru = nn.GRU(state_dim, COLUMN_WIDTH, batch_first=True)
        self.q2_task_grus = nn.ModuleList([
            nn.GRU(state_dim, COLUMN_WIDTH, batch_first=True)
            for _ in range(n_tasks)
        ])
        self.q2_lateral_connections = nn.ModuleList([
            nn.Linear(COLUMN_WIDTH, COLUMN_WIDTH)
            for _ in range(n_tasks - 1)
        ])

        self.q2_layers = nn.Sequential(
            nn.Linear(max_state_dim + action_dim, 256),
            nn.ReLU(),
            nn.LayerNorm(256),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.LayerNorm(128),
            nn.Linear(128, 1)
        )

    def _extract_features(self, state, network_type='q1'):
        batch_size = state.size(0)
        outputs = []

        # 选择对应的网络组件
        if network_type == 'q1':
            base_gru = self.q1_base_gru
            task_grus = self.q1_task_grus
            lateral_connections = self.q1_lateral_connections
        else:
            base_gru = self.q2_base_gru
            task_grus = self.q2_task_grus
            lateral_connections = self.q2_lateral_connections

        # 基础列处理
        hidden_base = torch.zeros(1, batch_size, COLUMN_WIDTH).to(state.device)
        base_out, _ = base_gru(state, hidden_base)
        base_feature = base_out[:, -1]
        outputs.append(base_feature)

        # 任务特定列处理
        prev_features = []
        for t in range(min(self.current_task + 1, len(task_grus))):
            hidden = torch.zeros(1, batch_size, COLUMN_WIDTH).to(state.device)
            task_out, _ = task_grus[t](state, hidden)
            task_feature = task_out[:, -1]

            # 应用侧向连接
            if t > 0 and t - 1 < len(lateral_connections):
                lateral_input = sum(prev_features)
                task_feature = task_feature + lateral_connections[t - 1](lateral_input)

            outputs.append(task_feature)
            prev_features.append(task_feature)

        # 用零填充到固定长度
        while len(outputs) < self.n_tasks + 1:
            outputs.append(torch.zeros_like(base_feature))

        return torch.cat(outputs, dim=1)

    def forward(self, state, action):
        q1_state_features = self._extract_features(state, 'q1')
        q1_input = torch.cat([q1_state_features, action], dim=1)
        q1 = self.q1_layers(q1_input)

        q2_state_features = self._extract_features(state, 'q2')
        q2_input = torch.cat([q2_state_features, action], dim=1)
        q2 = self.q2_layers(q2_input)

        return q1, q2

    def Q1(self, state, action):
        q1_state_features = self._extract_features(state, 'q1')
        q1_input = torch.cat([q1_state_features, action], dim=1)
        return self.q1_layers(q1_input)

    def set_task(self, task_id):
        self.current_task = task_id


class ReservoirBuffer:
    """储层采样经验回放缓冲区"""

    def __init__(self, max_size=BUFFER_SIZE):
        self.max_size = max_size
        self.buffer = []
        self.position = 0
        self.task_buffers = {}

    def add(self, state, action, reward, next_state, done, task_id=0):
        experience = (state, action, reward, next_state, done, task_id)

        if len(self.buffer) < self.max_size:
            self.buffer.append(experience)
        else:
            idx = random.randint(0, self.position)
            if idx < self.max_size:
                self.buffer[idx] = experience

        self.position += 1

        # 按任务分类存储
        if task_id not in self.task_buffers:
            self.task_buffers[task_id] = []

        if len(self.task_buffers[task_id]) < N_MEMORIES:
            self.task_buffers[task_id].append(experience)
        else:
            idx = random.randint(0, len(self.task_buffers[task_id]) - 1)
            self.task_buffers[task_id][idx] = experience

    def sample(self, batch_size):
        batch = random.sample(self.buffer, min(len(self.buffer), batch_size))
        state, action, reward, next_state, done, task_id = map(list, zip(*batch))
        return (np.stack(state), np.stack(action), np.array(reward),
                np.stack(next_state), np.array(done), np.array(task_id))

    def sample_task_memories(self, task_id, batch_size=None):
        """采样特定任务的记忆"""
        if task_id not in self.task_buffers or len(self.task_buffers[task_id]) == 0:
            return None

        memories = self.task_buffers[task_id]
        if batch_size is None:
            batch_size = len(memories)

        batch = random.sample(memories, min(len(memories), batch_size))
        state, action, reward, next_state, done, task_id = map(list, zip(*batch))
        return (np.stack(state), np.stack(action), np.array(reward),
                np.stack(next_state), np.array(done), np.array(task_id))

    def __len__(self):
        return len(self.buffer)


class GEM_TD3:
    """改进的GEM-TD3算法 - 保持Progressive特性"""

    def __init__(self, state_dim, action_dim, max_action, n_tasks=3):
        self.n_tasks = n_tasks
        self.current_task = 0
        self.max_action = max_action

        self.actor = ProgressiveGRUActor(state_dim, action_dim, max_action, n_tasks).to(device)
        self.actor_target = ProgressiveGRUActor(state_dim, action_dim, max_action, n_tasks).to(device)
        self.actor_target.load_state_dict(self.actor.state_dict())

        self.critic = ProgressiveGRUCritic(state_dim, action_dim, n_tasks).to(device)
        self.critic_target = ProgressiveGRUCritic(state_dim, action_dim, n_tasks).to(device)
        self.critic_target.load_state_dict(self.critic.state_dict())

        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=ACTOR_LR)
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=CRITIC_LR)

        self.memory = ReservoirBuffer()

        # TD3参数
        self.policy_noise = 0.2 * max_action
        self.noise_clip = 0.5 * max_action
        self.policy_freq = 2
        self.total_it = 0

        # GEM参数
        self.gradient_memory = {}

    def select_action(self, state, noise_scale=0.1):
        if len(state.shape) == 2:
            state = np.expand_dims(state, 0)

        state = torch.FloatTensor(state).to(device)
        action = self.actor(state).cpu().data.numpy().flatten()

        if noise_scale > 0:
            noise = np.random.normal(0, self.max_action * noise_scale, size=action.shape)
            action = action + noise

        return np.clip(action, -self.max_action, self.max_action)

    def switch_task(self, task_id):
        """切换任务"""
        print(f"\nSwitching to task {task_id}")

        # 存储当前任务的梯度参考
        if self.current_task < task_id and len(self.memory) > 0:
            self._store_gradient_references()

        # 更新任务
        self.current_task = task_id
        self.actor.set_task(task_id)
        self.critic.set_task(task_id)
        self.actor_target.set_task(task_id)
        self.critic_target.set_task(task_id)

        # 冻结之前任务的参数（除了共享的基础列）
        self._freeze_previous_task_parameters()

    def _freeze_previous_task_parameters(self):
        """冻结之前任务的特定参数"""
        for t in range(self.current_task):
            if t < len(self.actor.task_grus):
                for param in self.actor.task_grus[t].parameters():
                    param.requires_grad = False
                for param in self.critic.q1_task_grus[t].parameters():
                    param.requires_grad = False
                for param in self.critic.q2_task_grus[t].parameters():
                    param.requires_grad = False

    def _store_gradient_references(self):
        """存储当前任务的梯度参考点"""
        memories = self.memory.sample_task_memories(self.current_task, N_MEMORIES)
        if memories is None:
            print(f"No memories found for task {self.current_task}")
            return

        state, action, reward, next_state, done, _ = memories

        state = torch.FloatTensor(state).to(device)
        action = torch.FloatTensor(action).to(device)
        reward = torch.FloatTensor(reward.reshape(-1, 1)).to(device)
        next_state = torch.FloatTensor(next_state).to(device)
        done = torch.FloatTensor(done.reshape(-1, 1)).to(device)

        # 计算当前任务可训练参数的梯度
        trainable_params = self.actor.get_trainable_params_for_task(self.current_task)

        # 计算Actor梯度
        self.actor.zero_grad()
        actor_action = self.actor(state)
        actor_loss = -self.critic.Q1(state, actor_action).mean()
        actor_loss.backward(retain_graph=True)

        actor_grads = []
        for param in trainable_params:
            if param.grad is not None:
                actor_grads.append(param.grad.clone().flatten())

        # 计算Critic梯度
        self.critic.zero_grad()
        current_q1, current_q2 = self.critic(state, action)

        with torch.no_grad():
            noise = torch.randn_like(action) * self.policy_noise
            noise = noise.clamp(-self.noise_clip, self.noise_clip)
            next_action = (self.actor_target(next_state) + noise).clamp(-self.max_action, self.max_action)
            target_q1, target_q2 = self.critic_target(next_state, next_action)
            target_q = torch.min(target_q1, target_q2)
            target_q = reward + (1 - done) * GAMMA * target_q

        critic_loss = nn.MSELoss()(current_q1, target_q) + nn.MSELoss()(current_q2, target_q)
        critic_loss.backward()

        critic_grads = []
        for param in self.critic.parameters():
            if param.grad is not None and param.requires_grad:
                critic_grads.append(param.grad.clone().flatten())

        # 存储梯度参考
        self.gradient_memory[self.current_task] = {
            'actor_grads': torch.cat(actor_grads) if actor_grads else torch.tensor([]).to(device),
            'critic_grads': torch.cat(critic_grads) if critic_grads else torch.tensor([]).to(device)
        }

        print(f"Stored gradient references for task {self.current_task}")

    def _project_gradients(self, model_type='actor'):
        """使用GEM梯度投影方法 - 只对可训练参数进行投影"""
        if len(self.gradient_memory) == 0:
            return

        # 收集当前可训练参数的梯度
        current_grads = []
        if model_type == 'actor':
            trainable_params = self.actor.get_trainable_params_for_task(self.current_task)
            for param in trainable_params:
                if param.grad is not None and param.requires_grad:
                    current_grads.append(param.grad.flatten())
        else:
            for param in self.critic.parameters():
                if param.grad is not None and param.requires_grad:
                    current_grads.append(param.grad.flatten())

        if not current_grads:
            return

        current_grad = torch.cat(current_grads)

        # 对每个旧任务进行投影
        for task_id in range(self.current_task):
            if task_id in self.gradient_memory:
                grad_key = f'{model_type}_grads'
                if grad_key in self.gradient_memory[task_id]:
                    old_grad = self.gradient_memory[task_id][grad_key]
                    if len(old_grad) > 0:
                        # 调整旧梯度的长度以匹配当前梯度（如果需要）
                        if len(old_grad) != len(current_grad):
                            # 简单的截断或填充策略
                            if len(old_grad) > len(current_grad):
                                old_grad = old_grad[:len(current_grad)]
                            else:
                                padding = torch.zeros(len(current_grad) - len(old_grad)).to(device)
                                old_grad = torch.cat([old_grad, padding])

                        # 如果当前梯度与旧任务梯度冲突，则进行投影
                        dot_product = torch.dot(current_grad, old_grad)
                        if dot_product < 0:
                            norm_sq = torch.dot(old_grad, old_grad)
                            if norm_sq > 1e-8:
                                current_grad = current_grad - dot_product * old_grad / norm_sq

        # 将投影后的梯度设置回模型
        idx = 0
        if model_type == 'actor':
            trainable_params = self.actor.get_trainable_params_for_task(self.current_task)
            for param in trainable_params:
                if param.grad is not None and param.requires_grad:
                    param_size = param.grad.numel()
                    param.grad.data = current_grad[idx:idx + param_size].view(param.grad.shape)
                    idx += param_size
        else:
            for param in self.critic.parameters():
                if param.grad is not None and param.requires_grad:
                    param_size = param.grad.numel()
                    param.grad.data = current_grad[idx:idx + param_size].view(param.grad.shape)
                    idx += param_size

    def train(self):
        self.total_it += 1

        if len(self.memory) < BATCH_SIZE:
            return {"critic_loss": 0, "actor_loss": 0}

        # 采样经验
        state, action, reward, next_state, done, task_ids = self.memory.sample(BATCH_SIZE)

        state = torch.FloatTensor(state).to(device)
        action = torch.FloatTensor(action).to(device)
        reward = torch.FloatTensor(reward.reshape(-1, 1)).to(device)
        next_state = torch.FloatTensor(next_state).to(device)
        done = torch.FloatTensor(done.reshape(-1, 1)).to(device)

        # 计算目标Q值
        with torch.no_grad():
            noise = torch.randn_like(action) * self.policy_noise
            noise = noise.clamp(-self.noise_clip, self.noise_clip)
            next_action = (self.actor_target(next_state) + noise).clamp(-self.max_action, self.max_action)

            target_q1, target_q2 = self.critic_target(next_state, next_action)
            target_q = torch.min(target_q1, target_q2)
            target_q = reward + (1 - done) * GAMMA * target_q

        # 训练Critic
        current_q1, current_q2 = self.critic(state, action)
        critic_loss = nn.MSELoss()(current_q1, target_q) + nn.MSELoss()(current_q2, target_q)

        self.critic_optimizer.zero_grad()
        critic_loss.backward()
        self._project_gradients('critic')
        torch.nn.utils.clip_grad_norm_([p for p in self.critic.parameters() if p.requires_grad], 1.0)
        self.critic_optimizer.step()

        # 训练Actor
        actor_loss = 0
        if self.total_it % self.policy_freq == 0:
            actor_action = self.actor(state)
            actor_loss = -self.critic.Q1(state, actor_action).mean()

            self.actor_optimizer.zero_grad()
            actor_loss.backward()
            self._project_gradients('actor')
            torch.nn.utils.clip_grad_norm_([p for p in self.actor.parameters() if p.requires_grad], 1.0)
            self.actor_optimizer.step()

            # 更新目标网络（只更新可训练的参数）
            for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
                if param.requires_grad:
                    target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)

            for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
                if param.requires_grad:
                    target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)

        return {
            "critic_loss": critic_loss.item(),
            "actor_loss": actor_loss.item() if isinstance(actor_loss, torch.Tensor) else actor_loss
        }


# 基础TD3保持简化版本
class BasicTD3:
    """基础TD3算法用于对比"""

    def __init__(self, state_dim, action_dim, max_action):
        self.max_action = max_action

        # 使用相同的网络结构但只有一个任务
        self.actor = ProgressiveGRUActor(state_dim, action_dim, max_action, n_tasks=1).to(device)
        self.actor_target = ProgressiveGRUActor(state_dim, action_dim, max_action, n_tasks=1).to(device)
        self.actor_target.load_state_dict(self.actor.state_dict())

        self.critic = ProgressiveGRUCritic(state_dim, action_dim, n_tasks=1).to(device)
        self.critic_target = ProgressiveGRUCritic(state_dim, action_dim, n_tasks=1).to(device)
        self.critic_target.load_state_dict(self.critic.state_dict())

        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=ACTOR_LR)
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=CRITIC_LR)

        self.memory = ReservoirBuffer()

        # TD3参数
        self.policy_noise = 0.2 * max_action
        self.noise_clip = 0.5 * max_action
        self.policy_freq = 2
        self.total_it = 0

    def select_action(self, state, noise_scale=0.1):
        if len(state.shape) == 2:
            state = np.expand_dims(state, 0)

        state = torch.FloatTensor(state).to(device)
        action = self.actor(state).cpu().data.numpy().flatten()

        if noise_scale > 0:
            noise = np.random.normal(0, self.max_action * noise_scale, size=action.shape)
            action = action + noise

        return np.clip(action, -self.max_action, self.max_action)

    def train(self):
        self.total_it += 1

        if len(self.memory) < BATCH_SIZE:
            return {"critic_loss": 0, "actor_loss": 0}

        # 采样经验
        state, action, reward, next_state, done, task_ids = self.memory.sample(BATCH_SIZE)

        state = torch.FloatTensor(state).to(device)
        action = torch.FloatTensor(action).to(device)
        reward = torch.FloatTensor(reward.reshape(-1, 1)).to(device)
        next_state = torch.FloatTensor(next_state).to(device)
        done = torch.FloatTensor(done.reshape(-1, 1)).to(device)

        # 计算目标Q值
        with torch.no_grad():
            noise = torch.randn_like(action) * self.policy_noise
            noise = noise.clamp(-self.noise_clip, self.noise_clip)
            next_action = (self.actor_target(next_state) + noise).clamp(-self.max_action, self.max_action)

            target_q1, target_q2 = self.critic_target(next_state, next_action)
            target_q = torch.min(target_q1, target_q2)
            target_q = reward + (1 - done) * GAMMA * target_q

        # 训练Critic
        current_q1, current_q2 = self.critic(state, action)
        critic_loss = nn.MSELoss()(current_q1, target_q) + nn.MSELoss()(current_q2, target_q)

        self.critic_optimizer.zero_grad()
        critic_loss.backward()
        torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 1.0)
        self.critic_optimizer.step()

        # 训练Actor
        actor_loss = 0
        if self.total_it % self.policy_freq == 0:
            actor_action = self.actor(state)
            actor_loss = -self.critic.Q1(state, actor_action).mean()

            self.actor_optimizer.zero_grad()
            actor_loss.backward()
            torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 1.0)
            self.actor_optimizer.step()

            # 更新目标网络
            for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)

            for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)

        return {
            "critic_loss": critic_loss.item(),
            "actor_loss": actor_loss.item() if isinstance(actor_loss, torch.Tensor) else actor_loss
        }


def train():
    """训练函数保持不变"""
    os.makedirs("results", exist_ok=True)

    env = Environment()
    state_dim = 2 + NUM_USERS * 3 + 1
    action_dim = 2
    max_action = 1

    # 初始化两个智能体
    agent_gem = GEM_TD3(state_dim, action_dim, max_action, n_tasks=3)
    agent_basic = BasicTD3(state_dim, action_dim, max_action)

    total_episodes = 600
    episodes_per_task = 200
    eval_freq = 50

    rewards_history_gem = []
    rewards_history_basic = []
    smoothed_rewards_gem = []
    smoothed_rewards_basic = []
    best_reward_gem = -float('inf')
    best_reward_basic = -float('inf')

    start_time = time.time()

    for phase in range(1, 4):
        env.update_task_generating_users(phase)
        agent_gem.switch_task(phase - 1)  # 0-indexed

        # 基线智能体在每个新任务时重新开始
        if phase > 1:
            agent_basic.memory.buffer.clear()

        phase_noise = np.linspace(
            EXPLORATION_NOISE_START * (0.9 ** (phase - 1)),
            EXPLORATION_NOISE_END,
            episodes_per_task
        )

        for episode in range(1, episodes_per_task + 1):
            global_episode = (phase - 1) * episodes_per_task + episode
            current_noise = phase_noise[episode - 1]

            # 训练GEM智能体
            state = env.reset()
            episode_reward_gem = 0
            for step in range(1, MAX_STEPS + 1):
                action = agent_gem.select_action(state, noise_scale=current_noise)
                next_state, reward, done, info = env.step(action)
                agent_gem.memory.add(state, action, reward, next_state, done, phase - 1)
                agent_gem.train()
                state = next_state
                episode_reward_gem += reward
                if done:
                    break

            # 训练基线智能体
            state = env.reset()
            episode_reward_basic = 0
            for step in range(1, MAX_STEPS + 1):
                action = agent_basic.select_action(state, noise_scale=current_noise)
                next_state, reward, done, info = env.step(action)
                agent_basic.memory.add(state, action, reward, next_state, done, 0)
                agent_basic.train()
                state = next_state
                episode_reward_basic += reward
                if done:
                    break

            rewards_history_gem.append(episode_reward_gem)
            rewards_history_basic.append(episode_reward_basic)

            # 平滑奖励
            if len(rewards_history_gem) >= 10:
                smoothed_rewards_gem.append(np.mean(rewards_history_gem[-10:]))
                smoothed_rewards_basic.append(np.mean(rewards_history_basic[-10:]))
            else:
                smoothed_rewards_gem.append(episode_reward_gem)
                smoothed_rewards_basic.append(episode_reward_basic)

            # 更新最佳结果
            if episode_reward_gem > best_reward_gem:
                best_reward_gem = episode_reward_gem
                torch.save(agent_gem.actor.state_dict(), f"results/best_actor_gem_phase_{phase}.pth")

            if episode_reward_basic > best_reward_basic:
                best_reward_basic = episode_reward_basic
                torch.save(agent_basic.actor.state_dict(), f"results/best_actor_basic_phase_{phase}.pth")

            elapsed_time = time.time() - start_time
            print(f"Phase: {phase} | Episode: {episode}/{episodes_per_task} | "
                  f"Global Episode: {global_episode}/{total_episodes} | "
                  f"Reward GEM: {episode_reward_gem:.2f} | "
                  f"Reward Basic: {episode_reward_basic:.2f} | "
                  f"Noise: {current_noise:.3f} | "
                  f"Time: {elapsed_time:.2f}s")

            # 生成图表
            if global_episode % eval_freq == 0 or global_episode == total_episodes:
                plt.figure(figsize=(15, 5))

                # 奖励对比
                plt.subplot(1, 2, 1)
                plt.plot(rewards_history_gem, alpha=0.3, color='blue', label='GEM Raw')
                plt.plot(smoothed_rewards_gem, color='blue', linewidth=2, label='GEM Smoothed')
                plt.plot(rewards_history_basic, alpha=0.3, color='red', label='Basic Raw')
                plt.plot(smoothed_rewards_basic, color='red', linewidth=2, label='Basic Smoothed')
                plt.axvline(x=episodes_per_task, color='green', linestyle='--', label='Phase 1->2')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--', label='Phase 2->3')
                plt.title("奖励对比 (GEM vs Basic)")
                plt.xlabel("Episode")
                plt.ylabel("Reward")
                plt.legend()
                plt.grid(True)

                # 生成轨迹图
                plt.subplot(1, 2, 2)
                state = env.reset()
                trajectory = [env.uav_position.copy()]

                for step in range(1, MAX_STEPS + 1):
                    action = agent_gem.select_action(state, noise_scale=0)
                    next_state, reward, done, info = env.step(action)
                    trajectory.append(env.uav_position.copy())
                    state = next_state
                    if done:
                        break

                trajectory = np.array(trajectory)

                for i, pos in enumerate(env.user_positions):
                    if env.task_generating_users[i]:
                        color = 'green' if env.collected_tasks[i] else 'red'
                    else:
                        color = 'gray'
                    plt.scatter(pos[0], pos[1], s=100, c=color)
                    plt.annotate(f"{i + 1}", (pos[0], pos[1]), fontsize=10)

                plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', alpha=0.7, label='UAV轨迹')
                plt.scatter(trajectory[0, 0], trajectory[0, 1], s=200, c='blue', marker='^', label='起点')
                plt.scatter(trajectory[-1, 0], trajectory[-1, 1], s=200, c='purple', marker='*', label='终点')

                plt.title(f"UAV轨迹 (Episode {global_episode})")
                plt.xlabel("X坐标 (m)")
                plt.ylabel("Y坐标 (m)")
                plt.xlim(0, AREA_SIZE)
                plt.ylim(0, AREA_SIZE)
                plt.grid(True)
                plt.legend()

                plt.tight_layout()
                plt.savefig(f"results/gem_comparison_episode_{global_episode}.png")
                plt.close()

        print(f"训练完成!")
        print(f"GEM最佳结果: {best_reward_gem:.2f}")
        print(f"基线最佳结果: {best_reward_basic:.2f}")
        print(f"GEM优势: {best_reward_gem - best_reward_basic:.2f}")

    print(f"训练完成!")
    print(f"GEM最佳结果: {best_reward_gem:.2f}")
    print(f"基线最佳结果: {best_reward_basic:.2f}")
    print(f"GEM优势: {best_reward_gem - best_reward_basic:.2f}")

    return agent_gem, agent_basic, env


if __name__ == "__main__":
    agent_gem, agent_basic, env = train()
