import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import random
from collections import deque
import os
import time
import matplotlib
'''使用没加时延能耗等的目标函数，对比没有重新学习的强化学习'''
SEED = 45
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)

# 检查GPU可用性
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

EPISODES_PER_TASK = 200

# 环境参数
AREA_SIZE = 100  # 区域大小 100m x 100m
NUM_USERS = 15  # 用户数量
MAX_STEPS = 200  # 每个episode的最大步数
MAX_DISTANCE_COLLECT = 15  # UAV可收集任务的最大距离

# UAV参数
UAV_SPEED = 20.0  # UAV速度 (m/s)
UAV_ENERGY_PER_METER = 0.1  # 每米能耗
UAV_HOVER_ENERGY = 0.5  # 悬停能耗

# 任务参数
TASK_SIZE = [10, 50]  # 任务大小范围 (MB)

# TD3参数
ACTOR_LR = 3e-4
CRITIC_LR = 3e-4
GAMMA = 0.99
TAU = 0.005
BUFFER_SIZE = 200000
BATCH_SIZE = 256
EXPLORATION_NOISE_START = 0.4
EXPLORATION_NOISE_END = 0.05
REWARD_SCALE = 0.1

# EWC参数
EWC_LAMBDA = 2  # EWC正则化强度
FISHER_SAMPLE_SIZE = 2000  # 计算Fisher信息矩阵的样本数

# GRU参数
SEQUENCE_LENGTH = 10  # GRU序列长度
HIDDEN_SIZE = 128  # GRU隐藏层大小


class Environment:
    def __init__(self):
        # 初始化用户位置 (固定)
        self.user_positions = np.random.uniform(0, AREA_SIZE, size=(NUM_USERS, 2))

        # 初始化任务大小 (固定)
        self.task_sizes = np.random.uniform(TASK_SIZE[0], TASK_SIZE[1], size=NUM_USERS)

        # 任务生成状态 - 初始所有用户都生成任务
        self.task_generating_users = np.ones(NUM_USERS, dtype=bool)

        # UAV初始位置
        self.uav_position = np.array([AREA_SIZE / 2, AREA_SIZE / 2], dtype=float)

        # 任务收集状态
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)

        # 步数计数器
        self.step_count = 0

        # 总延迟和能耗
        self.total_delay = 0
        self.total_energy = 0

        # 历史轨迹
        self.trajectory = [self.uav_position.copy()]

        # 上一次的距离，用于计算奖励
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])

        # 存储观测历史用于GRU
        self.observation_history = deque(maxlen=SEQUENCE_LENGTH)

        # 当前环境阶段
        self.current_phase = 1

    def update_task_generating_users(self, phase):
        """根据训练阶段更新生成任务的用户"""
        self.current_phase = phase

        if phase == 1:
            # 第一阶段：所有15个用户都产生任务
            self.task_generating_users = np.ones(NUM_USERS, dtype=bool)
            print(f"Phase {phase}: All {NUM_USERS} users are generating tasks")

        elif phase == 2:
            # 第二阶段：随机选择11个用户产生任务
            indices = np.random.choice(NUM_USERS, 11, replace=False)
            self.task_generating_users = np.zeros(NUM_USERS, dtype=bool)
            self.task_generating_users[indices] = True

            # 保存第二阶段的用户选择，供第三阶段使用
            self.phase2_generating_users = set(indices)
            self.phase2_non_generating_users = set(range(NUM_USERS)) - self.phase2_generating_users

            print(f"Phase {phase}: {len(self.phase2_generating_users)} users are generating tasks")
            print(f"Task generating users: {sorted(list(self.phase2_generating_users))}")
            print(f"Non-generating users: {sorted(list(self.phase2_non_generating_users))}")

        else:  # phase == 3
            # 第三阶段：7个用户产生任务
            # 规则：第二阶段中未产生任务的4个用户在第三阶段中都会产生任务
            # 再从第二阶段产生任务的11个用户中随机选择3个

            # 确保第二阶段的非任务生成用户都参与第三阶段
            phase3_users = set(self.phase2_non_generating_users)  # 4个用户

            # 从第二阶段的任务生成用户中随机选择3个
            additional_users = np.random.choice(list(self.phase2_generating_users), 3, replace=False)
            phase3_users.update(additional_users)

            # 设置第三阶段的任务生成用户
            self.task_generating_users = np.zeros(NUM_USERS, dtype=bool)
            for user_id in phase3_users:
                self.task_generating_users[user_id] = True

            print(f"Phase {phase}: {len(phase3_users)} users are generating tasks")
            print(f"Task generating users: {sorted(list(phase3_users))}")
            print(f"From phase 2 non-generating: {sorted(list(self.phase2_non_generating_users))}")
            print(f"From phase 2 generating: {sorted(list(additional_users))}")

        print(f"Final task generating users for phase {phase}: {np.where(self.task_generating_users)[0]}")
        print("-" * 50)

    def reset(self):
        # 重置UAV位置到中心点
        self.uav_position = np.array([AREA_SIZE / 2, AREA_SIZE / 2], dtype=float)

        # 重置任务收集状态
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)

        # 重置步数
        self.step_count = 0

        # 重置总延迟和能耗
        self.total_delay = 0
        self.total_energy = 0

        # 重置轨迹
        self.trajectory = [self.uav_position.copy()]

        # 重置上一次的距离
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])

        # 重置观测历史
        self.observation_history = deque(maxlen=SEQUENCE_LENGTH)

        # 初始状态
        state = self._get_state()

        # 填充观测历史
        for _ in range(SEQUENCE_LENGTH):
            self.observation_history.append(state)

        return self._get_gru_state()

    def step(self, action):
        # 更新UAV位置 (action是相对移动，范围[-1,1])
        action = np.clip(action, -1, 1)
        movement = action * UAV_SPEED
        prev_position = self.uav_position.copy()
        self.uav_position += movement
        self.uav_position = np.clip(self.uav_position, 0, AREA_SIZE)

        # 记录轨迹
        self.trajectory.append(self.uav_position.copy())

        # 计算移动距离和能耗
        distance_moved = np.linalg.norm(self.uav_position - prev_position)
        energy_consumed = distance_moved * UAV_ENERGY_PER_METER

        # 计算与所有用户的新距离
        new_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])

        # 收集任务
        newly_collected = 0
        collected_indices = []

        for i in range(NUM_USERS):
            # 只收集生成任务的用户的任务
            if self.task_generating_users[i] and not self.collected_tasks[i]:
                if new_distances[i] <= MAX_DISTANCE_COLLECT:
                    self.collected_tasks[i] = True
                    newly_collected += 1
                    collected_indices.append(i)

                    # 计算任务延迟
                    delay = new_distances[i] * self.task_sizes[i] / 10
                    self.total_delay += delay

                    # 悬停能耗
                    energy_consumed += UAV_HOVER_ENERGY

        # 累计总能耗
        self.total_energy += energy_consumed

        # 更新步数
        self.step_count += 1

        # 计算奖励
        reward = self._calculate_reward(newly_collected, energy_consumed, collected_indices, new_distances,
                                        self.last_distances)

        # 更新上一次的距离
        self.last_distances = new_distances

        # 判断是否结束 - 收集完所有任务生成用户的任务或达到最大步数
        total_tasks_to_collect = sum(self.task_generating_users)
        collected_required_tasks = sum(self.collected_tasks & self.task_generating_users)
        done = (self.step_count >= MAX_STEPS) or (collected_required_tasks == total_tasks_to_collect)

        # 获取当前状态
        state = self._get_state()

        # 更新观测历史
        self.observation_history.append(state)

        return self._get_gru_state(), reward, done, {
            "collected": sum(self.collected_tasks),
            "collected_required": collected_required_tasks,
            "total_required": total_tasks_to_collect,
            "energy": self.total_energy,
            "delay": self.total_delay,
            "newly_collected": newly_collected,
            "total_users": NUM_USERS
        }

    def _get_state(self):
        # 状态表示: UAV位置, 与每个用户的距离, 任务收集状态, 任务生成状态, 归一化步数
        state = np.zeros(2 + NUM_USERS * 3 + 1)

        # UAV位置 (归一化)
        state[0:2] = self.uav_position / AREA_SIZE

        # 与每个用户的距离, 任务收集状态和任务生成状态
        for i in range(NUM_USERS):
            dist = np.linalg.norm(self.uav_position - self.user_positions[i])
            idx = 2 + i * 3
            state[idx] = dist / np.sqrt(2 * AREA_SIZE ** 2)  # 归一化距离
            state[idx + 1] = float(self.collected_tasks[i])
            state[idx + 2] = float(self.task_generating_users[i])

        # 归一化步数
        state[-1] = self.step_count / MAX_STEPS

        return state

    def _get_gru_state(self):
        """返回用于GRU的序列状态"""
        # 确保观测历史已满
        while len(self.observation_history) < SEQUENCE_LENGTH:
            self.observation_history.append(self._get_state())

        return np.array(list(self.observation_history))

    def _calculate_reward(self, newly_collected, energy_consumed, collected_indices, new_distances, old_distances):
        # --- 核心修改 ---
        # 1. 增加一个固定的时间/步数惩罚，鼓励效率
        time_penalty = 0.1  # 每走一步就有一个小的惩罚

        # 2. 将 progress_reward 的概念融入 collection_reward
        # 当收集到新任务时，给予更大的奖励
        collected_required = sum(self.collected_tasks & self.task_generating_users)
        total_required = sum(self.task_generating_users)

        # 基础收集奖励保持不变
        collection_reward = newly_collected * 20

        # 当收集到任务时，给予额外的进度奖励
        # 任务越往后，完成的价值越大
        if newly_collected > 0 and total_required > 0:
            progress_bonus = (collected_required / total_required) * 15  # 例如，收集第9个任务(共10个)时，奖励 0.9*15
            collection_reward += progress_bonus

        # 能耗惩罚 - 可以稍微加大一点点，让长途飞行代价更高
        energy_penalty = energy_consumed * 0.8  # 从 0.5 增加到 0.8

        # 接近​未收集任务的奖励 (基本保持，但权重可以调整)
        proximity_reward = 0
        uncollected_tasks_count = 0
        for i in range(NUM_USERS):
            if self.task_generating_users[i] and not self.collected_tasks[i]:
                uncollected_tasks_count += 1
                dist_diff = old_distances[i] - new_distances[i]
                # 简化 proximity_reward, 使其更专注于"接近"这件事本身
                # 移除复杂的 progress_factor，因为进度奖励已经改为一次性事件
                proximity_factor = max(0, 1 - (new_distances[i] / (AREA_SIZE)) ** 2)  # 用区域大小归一化
                proximity_reward += dist_diff * 0.5 * proximity_factor  # 降低权重，避免其主导奖励

        # 如果没有未收集任务，不提供接近奖励
        if uncollected_tasks_count == 0:
            proximity_reward = 0

        # 4. 最终完成奖励，这个值必须足够大，是智能体的"终极目标"
        completion_reward = 0
        if total_required > 0 and collected_required == total_required:
            # 完成奖励 = 一个巨大的正数 - (步数 * 步数惩罚因子)，鼓励用更少的步数完成
            completion_reward = 200 - self.step_count * 0.2

        # --- 移除有问题的奖励项 ---
        # 移除原有的 progress_reward 和 efficiency_penalty，因为它们已被新的机制替代且容易导致问题
        # efficiency_penalty 逻辑复杂，可能引入不必要的干扰，先简化掉

        # 综合奖励
        # reward = collection_reward + proximity_reward + completion_reward - energy_penalty - time_penalty
        # print(f"Step {self.step_count}: C_Rew={collection_reward:.2f}, P_Rew={proximity_reward:.2f}, Comp_Rew={completion_reward:.2f}, E_Pen={energy_penalty:.2f}, T_Pen={time_penalty:.2f}")

        reward = collection_reward + proximity_reward + completion_reward - energy_penalty - time_penalty

        # 奖励缩放
        return reward * REWARD_SCALE

    def render(self, episode=0, clear_output=True):
        """可视化当前环境状态"""
        plt.figure(figsize=(10, 10))

        # 绘制用户位置和任务状态
        for i, pos in enumerate(self.user_positions):
            if self.task_generating_users[i]:
                # 生成任务的用户
                if self.collected_tasks[i]:
                    color = 'green'  # 已收集
                else:
                    color = 'red'  # 未收集
            else:
                # 不生成任务的用户
                color = 'gray'  # 灰色表示不生成任务

            plt.scatter(pos[0], pos[1], s=100, c=color)
            plt.annotate(f"{i + 1}", (pos[0], pos[1]), fontsize=12)

        # 绘制UAV当前位置和轨迹
        trajectory = np.array(self.trajectory)
        plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', alpha=0.5)
        plt.scatter(self.uav_position[0], self.uav_position[1], s=200, c='blue', marker='*')

        # 绘制收集范围
        circle = plt.Circle((self.uav_position[0], self.uav_position[1]),
                            MAX_DISTANCE_COLLECT, color='blue', fill=False, alpha=0.3)
        plt.gca().add_patch(circle)

        plt.xlim(0, AREA_SIZE)
        plt.ylim(0, AREA_SIZE)

        # 添加任务状态信息
        title = f"Episode {episode}, Step {self.step_count}\n"
        title += f"收集: {sum(self.collected_tasks & self.task_generating_users)}/{sum(self.task_generating_users)} 任务"
        plt.title(title)
        plt.grid(True)

        plt.savefig(f" results/step_{episode}_{self.step_count}.png")
        plt.close()


class GRUActor(nn.Module):
    def __init__(self, state_dim, action_dim, max_action):
        super(GRUActor, self).__init__()

        self.state_dim = state_dim
        self.seq_len = SEQUENCE_LENGTH
        self.hidden_size = HIDDEN_SIZE

        # GRU层
        self.gru = nn.GRU(
            input_size=state_dim,
            hidden_size=self.hidden_size,
            num_layers=1,
            batch_first=True
        )

        # 全连接层
        self.layer1 = nn.Linear(self.hidden_size, 256)
        self.layer2 = nn.Linear(256, 128)
        self.layer3 = nn.Linear(128, action_dim)

        self.max_action = max_action

        self.ln1 = nn.LayerNorm(256)
        self.ln2 = nn.LayerNorm(128)

        # 存储GRU隐藏状态
        self.hidden = None

        # 初始化网络权重
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0.0)

    def forward(self, state, reset_hidden=False):
        # 输入形状: [batch_size, seq_len, state_dim]

        # 如果需要重置隐藏状态或隐藏状态不存在
        if reset_hidden or self.hidden is None:
            self.reset_hidden(state.size(0))

        # GRU处理序列
        gru_out, self.hidden = self.gru(state, self.hidden)

        # 我们只使用序列中的最后一个输出
        x = gru_out[:, -1]

        # 全连接层
        x = self.ln1(torch.relu(self.layer1(x)))
        x = self.ln2(torch.relu(self.layer2(x)))
        action = torch.tanh(self.layer3(x))

        return self.max_action * action

    def reset_hidden(self, batch_size=1):
        """重置GRU的隐藏状态"""
        self.hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)


class GRUCritic(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(GRUCritic, self).__init__()

        self.state_dim = state_dim
        self.seq_len = SEQUENCE_LENGTH
        self.hidden_size = HIDDEN_SIZE

        # 两个独立的GRU处理状态序列
        self.q1_gru = nn.GRU(
            input_size=state_dim,
            hidden_size=self.hidden_size,
            num_layers=1,
            batch_first=True
        )

        self.q2_gru = nn.GRU(
            input_size=state_dim,
            hidden_size=self.hidden_size,
            num_layers=1,
            batch_first=True
        )

        # Q1网络
        self.q1_layer1 = nn.Linear(self.hidden_size + action_dim, 256)
        self.q1_layer2 = nn.Linear(256, 128)
        self.q1_output = nn.Linear(128, 1)

        self.q1_ln1 = nn.LayerNorm(256)
        self.q1_ln2 = nn.LayerNorm(128)

        # Q2网络
        self.q2_layer1 = nn.Linear(self.hidden_size + action_dim, 256)
        self.q2_layer2 = nn.Linear(256, 128)
        self.q2_output = nn.Linear(128, 1)

        self.q2_ln1 = nn.LayerNorm(256)
        self.q2_ln2 = nn.LayerNorm(128)

        # 存储GRU隐藏状态
        self.q1_hidden = None
        self.q2_hidden = None

        # 初始化网络权重
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0.01)

    def forward(self, state, action, reset_hidden=False):
        # 状态输入形状: [batch_size, seq_len, state_dim]
        # 动作输入形状: [batch_size, action_dim]

        # 如果需要重置隐藏状态或隐藏状态不存在
        if reset_hidden or self.q1_hidden is None or self.q2_hidden is None:
            self.reset_hidden(state.size(0))

        # GRU处理序列
        q1_gru_out, self.q1_hidden = self.q1_gru(state, self.q1_hidden)
        q2_gru_out, self.q2_hidden = self.q2_gru(state, self.q2_hidden)

        # 提取序列中的最后一个输出
        q1_state = q1_gru_out[:, -1]
        q2_state = q2_gru_out[:, -1]

        # 合并状态表示和动作
        q1_x = torch.cat([q1_state, action], dim=1)
        q2_x = torch.cat([q2_state, action], dim=1)

        # Q1网络
        q1 = self.q1_ln1(torch.relu(self.q1_layer1(q1_x)))
        q1 = self.q1_ln2(torch.relu(self.q1_layer2(q1)))
        q1 = self.q1_output(q1)

        # Q2网络
        q2 = self.q2_ln1(torch.relu(self.q2_layer1(q2_x)))
        q2 = self.q2_ln2(torch.relu(self.q2_layer2(q2)))
        q2 = self.q2_output(q2)

        return q1, q2

    def Q1(self, state, action, reset_hidden=False):
        # 用于Actor训练
        if reset_hidden or self.q1_hidden is None:
            self.reset_q1_hidden(state.size(0))

        q1_gru_out, self.q1_hidden = self.q1_gru(state, self.q1_hidden)
        q1_state = q1_gru_out[:, -1]
        q1_x = torch.cat([q1_state, action], dim=1)

        q1 = self.q1_ln1(torch.relu(self.q1_layer1(q1_x)))
        q1 = self.q1_ln2(torch.relu(self.q1_layer2(q1)))
        q1 = self.q1_output(q1)

        return q1

    def reset_hidden(self, batch_size=1):
        """重置两个GRU网络的隐藏状态"""
        self.reset_q1_hidden(batch_size)
        self.reset_q2_hidden(batch_size)

    def reset_q1_hidden(self, batch_size=1):
        self.q1_hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)

    def reset_q2_hidden(self, batch_size=1):
        self.q2_hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)


class ReplayBuffer:
    def __init__(self, max_size=BUFFER_SIZE):
        self.buffer = deque(maxlen=max_size)

    def add(self, state, action, reward, next_state, done):
        self.buffer.append((state, action, reward, next_state, done))

    def sample(self, batch_size):
        batch = random.sample(self.buffer, min(len(self.buffer), batch_size))
        state, action, reward, next_state, done = map(np.stack, zip(*batch))
        return state, action, reward, next_state, done

    def __len__(self):
        return len(self.buffer)


class EWC:
    def __init__(self, model, fisher_sample_size=FISHER_SAMPLE_SIZE):
        self.model = model
        self.fisher_sample_size = fisher_sample_size
        self.importance = {}
        self.old_params = {}
        self.task_importances = {}  # 存储多个任务的重要性

    def _calculate_fisher_info(self, replay_buffer):
        """改进的Fisher信息矩阵计算"""
        fisher = {}
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                fisher[name] = torch.zeros_like(param).to(device)

        if len(replay_buffer) == 0:
            return fisher

        # 增加采样数量和多样性
        samples_count = min(self.fisher_sample_size * 2, len(replay_buffer))  # 增加采样数量

        self.model.train()

        for _ in range(samples_count):
            # 随机采样
            states, actions, rewards, next_states, dones = replay_buffer.sample(1)
            states = torch.FloatTensor(states).to(device)
            actions = torch.FloatTensor(actions).to(device)

            self.model.zero_grad()

            if isinstance(self.model, GRUActor):
                self.model.reset_hidden(1)
                outputs = self.model(states)
                # 使用输出的对数概率而不是MSE
                log_likelihood = -((outputs - actions) ** 2).sum()
            else:
                self.model.reset_hidden(1)
                outputs, _ = self.model(states, actions)
                log_likelihood = outputs.sum()

            # 计算梯度
            log_likelihood.backward()

            # 累加梯度的平方
            for name, param in self.model.named_parameters():
                if param.requires_grad and param.grad is not None:
                    fisher[name] += param.grad.pow(2) / samples_count

        return fisher

    def calculate_ewc_loss(self, lam=EWC_LAMBDA):
        """计算多任务EWC损失"""
        loss = 0

        # 对所有之前的任务计算损失
        for task_id, (old_params, importance) in self.task_importances.items():
            task_loss = 0
            for name, param in self.model.named_parameters():
                if name in old_params and name in importance and param.requires_grad:
                    # 增加重要性权重
                    importance_weight = importance[name] + 1e-8  # 避免除零
                    param_diff = (param - old_params[name]).pow(2)
                    task_loss += torch.sum(importance_weight * param_diff)
            loss += task_loss

        return lam * loss

    def store_task_parameters(self, task_id, replay_buffer):
        """存储任务参数，支持多任务"""
        print(f"Storing parameters for task {task_id}")

        # 存储当前参数
        old_params = {}
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                old_params[name] = param.data.clone()

        # 计算Fisher信息矩阵
        importance = self._calculate_fisher_info(replay_buffer)

        # 存储到任务字典中
        self.task_importances[task_id] = (old_params, importance)

        print(f"Stored parameters for {len(old_params)} layers")


class TD3:
    """Twin Delayed DDPG (TD3)和弹性权重巩固(EWC)的结合"""

    def __init__(self, state_dim, action_dim, max_action, use_ewc=True):
        self.actor = GRUActor(state_dim, action_dim, max_action).to(device)
        self.actor_target = GRUActor(state_dim, action_dim, max_action).to(device)
        self.actor_target.load_state_dict(self.actor.state_dict())
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=ACTOR_LR)

        self.critic = GRUCritic(state_dim, action_dim).to(device)
        self.critic_target = GRUCritic(state_dim, action_dim).to(device)
        self.critic_target.load_state_dict(self.critic.state_dict())
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=CRITIC_LR)

        self.max_action = max_action
        self.memory = ReplayBuffer()
        self.use_ewc = use_ewc
        self.adaptive_ewc_lambda = EWC_LAMBDA

        # TD3特定参数
        self.policy_noise = 0.2 * max_action
        self.noise_clip = 0.5 * max_action
        self.policy_freq = 2
        self.total_it = 0

        # EWC相关组件
        if self.use_ewc:
            self.ewc_actor = EWC(self.actor)
            self.ewc_critic = EWC(self.critic)
        self.current_task = 1  # 初始任务ID

        # 任务特定探索噪声
        self.task_noise = {
            1: np.linspace(EXPLORATION_NOISE_START, EXPLORATION_NOISE_END, EPISODES_PER_TASK),
            2: np.linspace(EXPLORATION_NOISE_START * 0.8, EXPLORATION_NOISE_END, EPISODES_PER_TASK),
            3: np.linspace(EXPLORATION_NOISE_START * 0.7, EXPLORATION_NOISE_END, EPISODES_PER_TASK)
        }

        # 学习率调度器
        self.actor_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.actor_optimizer, mode='max', factor=0.5, patience=100, verbose=True
        )
        self.critic_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.critic_optimizer, mode='max', factor=0.5, patience=100, verbose=True
        )

    def select_action(self, state, noise_scale=EXPLORATION_NOISE_START):
        # 确保状态是GRU所需的序列格式 [batch, seq_len, feature]
        if len(state.shape) == 2:
            state = np.expand_dims(state, 0)  # 添加batch维度

        state = torch.FloatTensor(state).to(device)

        # 显式重置隐藏状态为批次大小1
        self.actor.reset_hidden(1)

        action = self.actor(state).cpu().data.numpy().flatten()

        if noise_scale > 0:
            noise = np.random.normal(0, self.max_action * noise_scale, size=action.shape)
            action = action + noise

        return np.clip(action, -self.max_action, self.max_action)

    def switch_task(self, task_id):
        """切换到新任务"""
        print(f"\nSwitching to task {task_id} (EWC: {self.use_ewc})")

        # 存储旧任务参数和计算Fisher信息矩阵
        if self.use_ewc and self.current_task > 0 and len(self.memory) > 0:
            self.ewc_actor.store_task_parameters(self.current_task, self.memory)
            self.ewc_critic.store_task_parameters(self.current_task, self.memory)

        # 为新任务清空经验回放池，避免数据污染
        print(f"Clearing replay buffer for new task.")
        self.memory.buffer.clear()

        # 更新当前任务
        self.current_task = task_id

        # 重置Actor和Critic的GRU状态
        self.actor.reset_hidden()
        self.critic.reset_hidden()

        print(f"Reset GRU states for new task {task_id}")

    def train(self):
        self.total_it += 1

        if len(self.memory) < BATCH_SIZE:
            return

        # 从经验回放中采样
        state, action, reward, next_state, done = self.memory.sample(BATCH_SIZE)

        state = torch.FloatTensor(state).to(device)
        action = torch.FloatTensor(action).to(device)
        reward = torch.FloatTensor(reward.reshape(-1, 1)).to(device)
        next_state = torch.FloatTensor(next_state).to(device)
        done = torch.FloatTensor(done.reshape(-1, 1)).to(device)

        # 为批处理重置GRU隐藏状态
        self.critic.reset_hidden(BATCH_SIZE)
        self.actor.reset_hidden(BATCH_SIZE)

        with torch.no_grad():
            # 为目标网络也重置隐藏状态
            self.critic_target.reset_hidden(BATCH_SIZE)
            self.actor_target.reset_hidden(BATCH_SIZE)

            # 添加目标策略平滑
            noise = torch.FloatTensor(action.shape).data.normal_(0, self.policy_noise).to(device)
            noise = noise.clamp(-self.noise_clip, self.noise_clip)

            next_action = (self.actor_target(next_state) + noise).clamp(-self.max_action, self.max_action)

            # 计算目标Q值
            target_q1, target_q2 = self.critic_target(next_state, next_action)
            target_q = torch.min(target_q1, target_q2)
            target_q = reward + (1 - done) * GAMMA * target_q

        # 计算当前Q值
        current_q1, current_q2 = self.critic(state, action)

        # 计算Critic损失 (包括EWC正则化)
        critic_loss = nn.MSELoss()(current_q1, target_q) + nn.MSELoss()(current_q2, target_q)

        # 添加EWC正则化
        if self.use_ewc and self.current_task > 1:
            critic_ewc_loss = self.ewc_critic.calculate_ewc_loss()
            critic_loss += critic_ewc_loss

        # 优化Critic
        self.critic_optimizer.zero_grad()
        critic_loss.backward(retain_graph=True)  # 添加retain_graph=True
        torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 1.0)
        self.critic_optimizer.step()

        # 延迟策略更新
        actor_loss = 0
        if self.total_it % self.policy_freq == 0:
            # 重置Actor GRU状态
            self.actor.reset_hidden(BATCH_SIZE)
            # 重置Critic GRU状态用于Q1计算
            self.critic.reset_q1_hidden(BATCH_SIZE)

            # 计算Actor损失 (包括EWC正则化)
            actor_loss = -self.critic.Q1(state, self.actor(state)).mean()

            # 自适应调整EWC强度
            if self.use_ewc and self.current_task > 1:
                # 根据任务数量调整EWC强度
                task_factor = min(self.current_task * 0.5, 2.0)
                adaptive_lambda = self.adaptive_ewc_lambda * task_factor

                critic_ewc_loss = self.ewc_critic.calculate_ewc_loss(adaptive_lambda)
                actor_ewc_loss = self.ewc_actor.calculate_ewc_loss(adaptive_lambda)

                critic_loss += critic_ewc_loss
                actor_loss += actor_ewc_loss

            # 优化Actor
            self.actor_optimizer.zero_grad()
            actor_loss.backward()
            torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 1.0)
            self.actor_optimizer.step()

            # 更新目标网络
            for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)

            for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)

        return {
            "critic_loss": critic_loss.item(),
            "actor_loss": actor_loss if isinstance(actor_loss, (int, float)) else actor_loss.item()
        }

    def update_lr_schedulers(self, reward):
        """更新学习率调度器"""
        self.actor_scheduler.step(reward)
        self.critic_scheduler.step(reward)


def train():
    """训练TD3智能体"""

    # 创建保存结果的目录
    os.makedirs(" results", exist_ok=True)

    # 初始化环境
    env = Environment()

    # 计算状态和动作维度
    state_dim = 2 + NUM_USERS * 3 + 1  # UAV位置(2) + 用户信息(3*NUM_USERS) + 步数(1)
    action_dim = 2
    max_action = 1

    # 初始化两个TD3智能体：有EWC和无EWC
    agent_with_ewc = TD3(state_dim, action_dim, max_action, use_ewc=True)
    agent_without_ewc = TD3(state_dim, action_dim, max_action, use_ewc=False)

    # 训练参数
    total_episodes = 600  # 总共600轮
    episodes_per_task = 200  # 每个任务200轮
    eval_freq = 50  # 每50轮次生成一次图表

    # 记录训练过程 - 分别记录两个智能体
    rewards_history_ewc = []
    rewards_history_no_ewc = []
    smoothed_rewards_ewc = []
    smoothed_rewards_no_ewc = []
    best_reward_ewc = -float('inf')
    best_reward_no_ewc = -float('inf')

    start_time = time.time()

    for phase in range(1, 4):
        # 更新任务生成状态
        env.update_task_generating_users(phase)

        # 切换任务
        agent_with_ewc.switch_task(phase)
        agent_without_ewc.switch_task(phase)

        # 阶段性提高探索噪声，促进适应新环境
        phase_noise_base = EXPLORATION_NOISE_START * (0.9 ** (phase - 1))
        phase_noise = np.linspace(phase_noise_base, EXPLORATION_NOISE_END, episodes_per_task)

        for episode in range(1, episodes_per_task + 1):
            global_episode = (phase - 1) * episodes_per_task + episode

            # 训练有EWC的智能体
            state = env.reset()
            agent_with_ewc.actor.reset_hidden()
            agent_with_ewc.critic.reset_hidden()

            episode_reward_ewc = 0
            current_noise = phase_noise[episode - 1]

            for step in range(1, MAX_STEPS + 1):
                action = agent_with_ewc.select_action(state, noise_scale=current_noise)
                next_state, reward, done, info = env.step(action)
                agent_with_ewc.memory.add(state, action, reward, next_state, done)
                agent_with_ewc.train()
                state = next_state
                episode_reward_ewc += reward
                if done:
                    break

            # 训练无EWC的智能体
            state = env.reset()
            agent_without_ewc.actor.reset_hidden()
            agent_without_ewc.critic.reset_hidden()

            episode_reward_no_ewc = 0

            for step in range(1, MAX_STEPS + 1):
                action = agent_without_ewc.select_action(state, noise_scale=current_noise)
                next_state, reward, done, info = env.step(action)
                agent_without_ewc.memory.add(state, action, reward, next_state, done)
                agent_without_ewc.train()
                state = next_state
                episode_reward_no_ewc += reward
                if done:
                    break

            # 记录历史数据
            rewards_history_ewc.append(episode_reward_ewc)
            rewards_history_no_ewc.append(episode_reward_no_ewc)

            # 平滑奖励曲线
            if len(rewards_history_ewc) >= 10:
                smoothed_rewards_ewc.append(np.mean(rewards_history_ewc[-10:]))
                smoothed_rewards_no_ewc.append(np.mean(rewards_history_no_ewc[-10:]))
            else:
                smoothed_rewards_ewc.append(episode_reward_ewc)
                smoothed_rewards_no_ewc.append(episode_reward_no_ewc)

            # 更新最佳结果
            if episode_reward_ewc > best_reward_ewc:
                best_reward_ewc = episode_reward_ewc
                torch.save(agent_with_ewc.actor.state_dict(), f" results/best_actor_ewc_phase_{phase}.pth")

            if episode_reward_no_ewc > best_reward_no_ewc:
                best_reward_no_ewc = episode_reward_no_ewc
                torch.save(agent_without_ewc.actor.state_dict(), f" results/best_actor_no_ewc_phase_{phase}.pth")

            # 更新学习率调度器
            agent_with_ewc.update_lr_schedulers(episode_reward_ewc)
            agent_without_ewc.update_lr_schedulers(episode_reward_no_ewc)

            # 打印训练信息
            elapsed_time = time.time() - start_time
            print(f"Phase: {phase} | Episode: {episode}/{episodes_per_task} | "
                  f"Global Episode: {global_episode}/{total_episodes} | "
                  f"Reward EWC: {episode_reward_ewc:.2f} | "
                  f"Reward No-EWC: {episode_reward_no_ewc:.2f} | "
                  f"Noise: {current_noise:.3f} | "
                  f"Time: {elapsed_time:.2f}s")

            # 每隔一定轮次生成图表
            if global_episode % eval_freq == 0 or global_episode == total_episodes:
                plt.figure(figsize=(15, 5))

                # 奖励曲线对比
                plt.subplot(1, 2, 1)
                plt.plot(rewards_history_ewc, alpha=0.3, color='blue', label='EWC Raw')
                plt.plot(smoothed_rewards_ewc, color='blue', linewidth=2, label='EWC Smoothed')
                plt.plot(rewards_history_no_ewc, alpha=0.3, color='red', label='No-EWC Raw')
                plt.plot(smoothed_rewards_no_ewc, color='red', linewidth=2, label='No-EWC Smoothed')
                plt.axvline(x=episodes_per_task, color='green', linestyle='--', label='Phase 1->2')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--', label='Phase 2->3')
                plt.title("奖励对比 (EWC vs No-EWC)")
                plt.xlabel("Episode")
                plt.ylabel("Reward")
                plt.legend()
                plt.grid(True)

                # 生成轨迹图（使用有EWC的智能体）
                plt.subplot(1, 2, 2)

                # 测试有EWC的智能体并生成轨迹
                state = env.reset()
                agent_with_ewc.actor.reset_hidden()
                trajectory = [env.uav_position.copy()]

                for step in range(1, MAX_STEPS + 1):
                    action = agent_with_ewc.select_action(state, noise_scale=0)
                    next_state, reward, done, info = env.step(action)
                    trajectory.append(env.uav_position.copy())
                    state = next_state
                    if done:
                        break

                trajectory = np.array(trajectory)

                # 绘制用户位置
                for i, pos in enumerate(env.user_positions):
                    if env.task_generating_users[i]:
                        color = 'green' if env.collected_tasks[i] else 'red'
                    else:
                        color = 'gray'
                    plt.scatter(pos[0], pos[1], s=100, c=color)
                    plt.annotate(f"{i + 1}", (pos[0], pos[1]), fontsize=10)

                # 绘制UAV轨迹
                plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', alpha=0.7, label='UAV轨迹')
                plt.scatter(trajectory[0, 0], trajectory[0, 1], s=200, c='blue', marker='^', label='起点')
                plt.scatter(trajectory[-1, 0], trajectory[-1, 1], s=200, c='purple', marker='*', label='终点')

                plt.title(f"UAV轨迹 (Episode {global_episode})")
                plt.xlabel("X坐标 (m)")
                plt.ylabel("Y坐标 (m)")
                plt.xlim(0, AREA_SIZE)
                plt.ylim(0, AREA_SIZE)
                plt.grid(True)
                plt.legend()

                plt.tight_layout()
                plt.savefig(f" results/comparison_episode_{global_episode}.png")
                plt.close()

                # 保存检查点
                torch.save({
                    'actor_ewc_state_dict': agent_with_ewc.actor.state_dict(),
                    'actor_no_ewc_state_dict': agent_without_ewc.actor.state_dict(),
                    'episode': global_episode,
                    'phase': phase,
                    'rewards_history_ewc': rewards_history_ewc,
                    'rewards_history_no_ewc': rewards_history_no_ewc,
                    'best_reward_ewc': best_reward_ewc,
                    'best_reward_no_ewc': best_reward_no_ewc
                }, f" results/checkpoint_episode_{global_episode}.pt")

        # 每个任务阶段结束后保存模型
        torch.save(agent_with_ewc.actor.state_dict(), f" results/actor_ewc_phase_{phase}.pth")
        torch.save(agent_without_ewc.actor.state_dict(), f" results/actor_no_ewc_phase_{phase}.pth")

    print(f"Training completed!")
    print(f"Best result with EWC: {best_reward_ewc:.2f}")
    print(f"Best result without EWC: {best_reward_no_ewc:.2f}")

    return agent_with_ewc, agent_without_ewc, env


def test_and_visualize(agent_ewc, agent_no_ewc, env, phase=3):
    """测试训练好的模型并生成可视化结果"""

    # 加载最佳模型
    agent_ewc.actor.load_state_dict(torch.load(f" results/best_actor_ewc_phase_{phase}.pth"))
    agent_no_ewc.actor.load_state_dict(torch.load(f" results/best_actor_no_ewc_phase_{phase}.pth"))
    agent_ewc.actor.eval()
    agent_no_ewc.actor.eval()

    # 设置环境阶段
    env.update_task_generating_users(phase)

    def test_agent(agent, agent_name):
        # 重置环境和GRU状态
        state = env.reset()
        agent.actor.reset_hidden()

        total_reward = 0
        trajectory = [env.uav_position.copy()]

        for step in range(1, MAX_STEPS + 1):
            action = agent.select_action(state, noise_scale=0)
            trajectory.append(env.uav_position.copy())
            next_state, reward, done, info = env.step(action)
            total_reward += reward
            state = next_state
            if done:
                break

        return total_reward, np.array(trajectory), info

    # 测试两个智能体
    reward_ewc, trajectory_ewc, info_ewc = test_agent(agent_ewc, "EWC")
    reward_no_ewc, trajectory_no_ewc, info_no_ewc = test_agent(agent_no_ewc, "No-EWC")

    # 绘制对比轨迹图
    plt.figure(figsize=(15, 6))

    # EWC智能体轨迹
    plt.subplot(1, 2, 1)
    env.update_task_generating_users(phase)
    state = env.reset()
    reward_ewc, trajectory_ewc, info_ewc = test_agent(agent_ewc, "EWC")

    for i, pos in enumerate(env.user_positions):
        if env.task_generating_users[i]:
            color = 'green' if env.collected_tasks[i] else 'red'
        else:
            color = 'gray'
        plt.scatter(pos[0], pos[1], s=100, c=color)
        plt.annotate(f"{i + 1}", (pos[0], pos[1]), fontsize=10)

    plt.plot(trajectory_ewc[:, 0], trajectory_ewc[:, 1], 'b-', alpha=0.7)
    plt.scatter(trajectory_ewc[0, 0], trajectory_ewc[0, 1], s=200, c='blue', marker='^')
    plt.scatter(trajectory_ewc[-1, 0], trajectory_ewc[-1, 1], s=200, c='purple', marker='*')
    plt.title(
        f"有EWC智能体轨迹\n收集: {sum(env.collected_tasks & env.task_generating_users)}/{sum(env.task_generating_users)}, 奖励: {reward_ewc:.2f}")
    plt.xlabel("X坐标 (m)")
    plt.ylabel("Y坐标 (m)")
    plt.xlim(0, AREA_SIZE)
    plt.ylim(0, AREA_SIZE)
    plt.grid(True)

    # 无EWC智能体轨迹
    plt.subplot(1, 2, 2)
    env.update_task_generating_users(phase)
    state = env.reset()
    reward_no_ewc, trajectory_no_ewc, info_no_ewc = test_agent(agent_no_ewc, "No-EWC")

    for i, pos in enumerate(env.user_positions):
        if env.task_generating_users[i]:
            color = 'green' if env.collected_tasks[i] else 'red'
        else:
            color = 'gray'
        plt.scatter(pos[0], pos[1], s=100, c=color)
        plt.annotate(f"{i + 1}", (pos[0], pos[1]), fontsize=10)

    plt.plot(trajectory_no_ewc[:, 0], trajectory_no_ewc[:, 1], 'r-', alpha=0.7)
    plt.scatter(trajectory_no_ewc[0, 0], trajectory_no_ewc[0, 1], s=200, c='blue', marker='^')
    plt.scatter(trajectory_no_ewc[-1, 0], trajectory_no_ewc[-1, 1], s=200, c='purple', marker='*')
    plt.title(
        f"无EWC智能体轨迹\n收集: {sum(env.collected_tasks & env.task_generating_users)}/{sum(env.task_generating_users)}, 奖励: {reward_no_ewc:.2f}")
    plt.xlabel("X坐标 (m)")
    plt.ylabel("Y坐标 (m)")
    plt.xlim(0, AREA_SIZE)
    plt.ylim(0, AREA_SIZE)
    plt.grid(True)

    plt.tight_layout()
    plt.savefig(f" results/final_comparison_phase_{phase}.png")
    plt.close()

    # 打印测试结果
    print(f"\n测试结果对比 (阶段 {phase}):")
    print(f"有EWC智能体 - 奖励: {reward_ewc:.2f}")
    print(f"无EWC智能体 - 奖励: {reward_no_ewc:.2f}")
    print(f"EWC优势: {reward_ewc - reward_no_ewc:.2f}")


if __name__ == "__main__":
    # 训练智能体
    agent_ewc, agent_no_ewc, env = train()

    # 测试各阶段的性能
    for phase in range(1, 4):
        print(f"\n测试阶段 {phase} 的模型性能对比:")
        test_and_visualize(agent_ewc, agent_no_ewc, env, phase=phase)
