import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import random
from collections import deque
import os
import time
import matplotlib

# 设置随机种子确保结果可复现
SEED = 45
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)

# 检查GPU可用性
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

EPISODES_PER_TASK = 1500

# 环境参数
AREA_SIZE = 100  # 区域大小 100m x 100m
NUM_USERS = 10  # 用户数量
MAX_STEPS = 200  # 每个episode的最大步数
MAX_DISTANCE_COLLECT = 15  # UAV可收集任务的最大距离

# UAV参数
UAV_SPEED = 5.0  # UAV速度 (m/s)
UAV_ENERGY_PER_METER = 0.1  # 每米能耗
UAV_HOVER_ENERGY = 0.5  # 悬停能耗

# 任务参数
TASK_SIZE = [10, 50]  # 任务大小范围 (MB)
TASK_DELAY_WEIGHT = 0.6  # 延迟权重
ENERGY_WEIGHT = 0.4  # 能耗权重

# TD3参数
ACTOR_LR = 3e-4
CRITIC_LR = 3e-4
GAMMA = 0.99
TAU = 0.005
BUFFER_SIZE = 200000
BATCH_SIZE = 256
EXPLORATION_NOISE_START = 0.4
EXPLORATION_NOISE_END = 0.05
REWARD_SCALE = 0.1

# EWC参数
EWC_LAMBDA = 100  # EWC正则化强度
FISHER_SAMPLE_SIZE = 1000  # 计算Fisher信息矩阵的样本数

# LSTM参数
SEQUENCE_LENGTH = 10  # LSTM序列长度
HIDDEN_SIZE = 128  # LSTM隐藏层大小


class Environment:
    def __init__(self):
        # 初始化用户位置 (固定)
        self.user_positions = np.random.uniform(0, AREA_SIZE, size=(NUM_USERS, 2))

        # 初始化任务大小 (固定)
        self.task_sizes = np.random.uniform(TASK_SIZE[0], TASK_SIZE[1], size=NUM_USERS)

        # 任务生成状态 - 初始所有用户都生成任务
        self.task_generating_users = np.ones(NUM_USERS, dtype=bool)

        # UAV初始位置
        self.uav_position = np.array([AREA_SIZE / 2, AREA_SIZE / 2], dtype=float)

        # 任务收集状态
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)

        # 步数计数器
        self.step_count = 0

        # 总延迟和能耗
        self.total_delay = 0
        self.total_energy = 0

        # 历史轨迹
        self.trajectory = [self.uav_position.copy()]

        # 上一次的距离，用于计算奖励
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])

        # 存储观测历史用于LSTM
        self.observation_history = deque(maxlen=SEQUENCE_LENGTH)

        # 当前环境阶段
        self.current_phase = 1

    def update_task_generating_users(self, phase):
        """根据训练阶段更新生成任务的用户"""
        self.current_phase = phase

        if phase == 1:  # 第一阶段：所有用户生成任务
            self.task_generating_users = np.ones(NUM_USERS, dtype=bool)
        elif phase == 2:  # 第二阶段：随机9个用户生成任务
            indices = np.random.choice(NUM_USERS, 9, replace=False)
            self.task_generating_users = np.zeros(NUM_USERS, dtype=bool)
            self.task_generating_users[indices] = True
        else:  # 第三阶段：随机8个用户生成任务
            indices = np.random.choice(NUM_USERS, 8, replace=False)
            self.task_generating_users = np.zeros(NUM_USERS, dtype=bool)
            self.task_generating_users[indices] = True

        print(f"Phase {phase}: {sum(self.task_generating_users)} users are generating tasks")
        print(f"Task generating users: {np.where(self.task_generating_users)[0]}")

    def reset(self):
        # 重置UAV位置到中心点
        self.uav_position = np.array([AREA_SIZE / 2, AREA_SIZE / 2], dtype=float)

        # 重置任务收集状态
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)

        # 重置步数
        self.step_count = 0

        # 重置总延迟和能耗
        self.total_delay = 0
        self.total_energy = 0

        # 重置轨迹
        self.trajectory = [self.uav_position.copy()]

        # 重置上一次的距离
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])

        # 重置观测历史
        self.observation_history = deque(maxlen=SEQUENCE_LENGTH)

        # 初始状态
        state = self._get_state()

        # 填充观测历史
        for _ in range(SEQUENCE_LENGTH):
            self.observation_history.append(state)

        return self._get_lstm_state()

    def step(self, action):
        # 更新UAV位置 (action是相对移动，范围[-1,1])
        action = np.clip(action, -1, 1)
        movement = action * UAV_SPEED
        prev_position = self.uav_position.copy()
        self.uav_position += movement
        self.uav_position = np.clip(self.uav_position, 0, AREA_SIZE)

        # 记录轨迹
        self.trajectory.append(self.uav_position.copy())

        # 计算移动距离和能耗
        distance_moved = np.linalg.norm(self.uav_position - prev_position)
        energy_consumed = distance_moved * UAV_ENERGY_PER_METER

        # 计算与所有用户的新距离
        new_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])

        # 收集任务
        newly_collected = 0
        collected_indices = []

        for i in range(NUM_USERS):
            # 只收集生成任务的用户的任务
            if self.task_generating_users[i] and not self.collected_tasks[i]:
                if new_distances[i] <= MAX_DISTANCE_COLLECT:
                    self.collected_tasks[i] = True
                    newly_collected += 1
                    collected_indices.append(i)

                    # 计算任务延迟
                    delay = new_distances[i] * self.task_sizes[i] / 10
                    self.total_delay += delay

                    # 悬停能耗
                    energy_consumed += UAV_HOVER_ENERGY

        # 累计总能耗
        self.total_energy += energy_consumed

        # 更新步数
        self.step_count += 1

        # 计算奖励
        reward = self._calculate_reward(newly_collected, energy_consumed, collected_indices, new_distances,
                                        self.last_distances)

        # 更新上一次的距离
        self.last_distances = new_distances

        # 判断是否结束 - 收集完所有任务生成用户的任务或达到最大步数
        total_tasks_to_collect = sum(self.task_generating_users)
        collected_required_tasks = sum(self.collected_tasks & self.task_generating_users)
        done = (self.step_count >= MAX_STEPS) or (collected_required_tasks == total_tasks_to_collect)

        # 获取当前状态
        state = self._get_state()

        # 更新观测历史
        self.observation_history.append(state)

        return self._get_lstm_state(), reward, done, {
            "collected": sum(self.collected_tasks),
            "collected_required": collected_required_tasks,
            "total_required": total_tasks_to_collect,
            "energy": self.total_energy,
            "delay": self.total_delay,
            "newly_collected": newly_collected,
            "total_users": NUM_USERS
        }

    def _get_state(self):
        # 状态表示: UAV位置, 与每个用户的距离, 任务收集状态, 任务生成状态, 归一化步数
        state = np.zeros(2 + NUM_USERS * 3 + 1)

        # UAV位置 (归一化)
        state[0:2] = self.uav_position / AREA_SIZE

        # 与每个用户的距离, 任务收集状态和任务生成状态
        for i in range(NUM_USERS):
            dist = np.linalg.norm(self.uav_position - self.user_positions[i])
            idx = 2 + i * 3
            state[idx] = dist / np.sqrt(2 * AREA_SIZE ** 2)  # 归一化距离
            state[idx + 1] = float(self.collected_tasks[i])
            state[idx + 2] = float(self.task_generating_users[i])

        # 归一化步数
        state[-1] = self.step_count / MAX_STEPS

        return state

    def _get_lstm_state(self):
        """返回用于LSTM的序列状态"""
        # 确保观测历史已满
        while len(self.observation_history) < SEQUENCE_LENGTH:
            self.observation_history.append(self._get_state())

        return np.array(list(self.observation_history))

    def _calculate_reward(self, newly_collected, energy_consumed, collected_indices, new_distances, old_distances):
        # 基础任务收集奖励 - 只计算生成任务的用户
        collection_reward = newly_collected * 20

        # 能耗惩罚 - 减少惩罚以鼓励必要的移动
        energy_penalty = energy_consumed * 0.5

        # 任务完成进度奖励 - 针对生成任务的用户
        collected_required = sum(self.collected_tasks & self.task_generating_users)
        total_required = sum(self.task_generating_users)
        progress_reward = collected_required / total_required * 10 if total_required > 0 else 0

        # 奖励收集最后一个任务 - 增加完成激励
        if collected_required == total_required and total_required > 0:
            progress_reward += 20

        # 针对剩余任务越来越少的情况提供更多激励
        if total_required > 0 and collected_required > 0:
            remaining_ratio = (total_required - collected_required) / total_required
            # 当剩余任务比例小时，给予更大的收集奖励
            if remaining_ratio < 0.5:
                collection_reward *= (1.0 + (0.5 - remaining_ratio) * 2)

        # 接近未收集任务的奖励 - 只关注生成任务的用户
        proximity_reward = 0
        uncollected_tasks = 0

        for i in range(NUM_USERS):
            if self.task_generating_users[i] and not self.collected_tasks[i]:
                uncollected_tasks += 1
                # 计算距离差 - 正值表示距离减少
                dist_diff = old_distances[i] - new_distances[i]

                # 随着剩余任务减少，增加接近奖励权重
                if collected_required > 0 and total_required > 0:
                    progress_factor = 1.0 + collected_required / total_required
                else:
                    progress_factor = 1.0

                # 使用二次函数使接近任务的奖励更加明显
                proximity_factor = max(0, 1 - (new_distances[i] / (MAX_DISTANCE_COLLECT * 3)) ** 2)
                proximity_reward += dist_diff * 2 * proximity_factor * progress_factor

        # 如果没有未收集任务，不提供接近奖励
        if uncollected_tasks == 0:
            proximity_reward = 0

        # 全部完成奖励 - 收集所有生成任务的用户的任务
        completion_reward = 100 if collected_required == total_required and total_required > 0 else 0

        # 效率惩罚 - 调整以更宽容地对待新环境适应期
        efficiency_penalty = 0
        adaptation_period = 30  # 给智能体一段适应期

        # 仅当超过适应期且收集效率低下时才施加惩罚
        if self.step_count > adaptation_period and collected_required / total_required < 0.3 and total_required > 0:
            # 使惩罚随时间逐渐增加，但不会过于严厉
            efficiency_penalty = (self.step_count - adaptation_period) * 0.03

        # 综合奖励
        reward = collection_reward + progress_reward + proximity_reward + completion_reward - energy_penalty - efficiency_penalty

        # 奖励缩放
        reward = reward * REWARD_SCALE

        return reward

    def render(self, episode=0, clear_output=True):
        """可视化当前环境状态"""
        plt.figure(figsize=(10, 10))

        # 绘制用户位置和任务状态
        for i, pos in enumerate(self.user_positions):
            if self.task_generating_users[i]:
                # 生成任务的用户
                if self.collected_tasks[i]:
                    color = 'green'  # 已收集
                else:
                    color = 'red'  # 未收集
            else:
                # 不生成任务的用户
                color = 'gray'  # 灰色表示不生成任务

            plt.scatter(pos[0], pos[1], s=100, c=color)
            plt.annotate(f"{i + 1}", (pos[0], pos[1]), fontsize=12)

        # 绘制UAV当前位置和轨迹
        trajectory = np.array(self.trajectory)
        plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', alpha=0.5)
        plt.scatter(self.uav_position[0], self.uav_position[1], s=200, c='blue', marker='*')

        # 绘制收集范围
        circle = plt.Circle((self.uav_position[0], self.uav_position[1]),
                            MAX_DISTANCE_COLLECT, color='blue', fill=False, alpha=0.3)
        plt.gca().add_patch(circle)

        plt.xlim(0, AREA_SIZE)
        plt.ylim(0, AREA_SIZE)

        # 添加任务状态信息
        title = f"Episode {episode}, Step {self.step_count}\n"
        title += f"收集: {sum(self.collected_tasks & self.task_generating_users)}/{sum(self.task_generating_users)} 任务"
        plt.title(title)
        plt.grid(True)

        plt.savefig(f"results/step_{episode}_{self.step_count}.png")
        plt.close()


class LSTMActor(nn.Module):
    def __init__(self, state_dim, action_dim, max_action):
        super(LSTMActor, self).__init__()

        self.state_dim = state_dim
        self.seq_len = SEQUENCE_LENGTH
        self.hidden_size = HIDDEN_SIZE

        # LSTM层
        self.lstm = nn.LSTM(
            input_size=state_dim,
            hidden_size=self.hidden_size,
            num_layers=1,
            batch_first=True
        )

        # 全连接层
        self.layer1 = nn.Linear(self.hidden_size, 256)
        self.layer2 = nn.Linear(256, 128)
        self.layer3 = nn.Linear(128, action_dim)

        self.max_action = max_action

        self.ln1 = nn.LayerNorm(256)
        self.ln2 = nn.LayerNorm(128)

        # 存储LSTM隐藏状态
        self.hidden = None

        # 初始化网络权重
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0.0)

    def forward(self, state, reset_hidden=False):
        # 输入形状: [batch_size, seq_len, state_dim]

        # 如果需要重置隐藏状态或隐藏状态不存在
        if reset_hidden or self.hidden is None:
            self.reset_hidden(state.size(0))

        # LSTM处理序列
        lstm_out, self.hidden = self.lstm(state, self.hidden)

        # 我们只使用序列中的最后一个输出
        x = lstm_out[:, -1]

        # 全连接层
        x = self.ln1(torch.relu(self.layer1(x)))
        x = self.ln2(torch.relu(self.layer2(x)))
        action = torch.tanh(self.layer3(x))

        return self.max_action * action

    def reset_hidden(self, batch_size=1):
        """重置LSTM的隐藏状态"""
        self.hidden = (
            torch.zeros(1, batch_size, self.hidden_size).to(device),
            torch.zeros(1, batch_size, self.hidden_size).to(device)
        )


class LSTMCritic(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(LSTMCritic, self).__init__()

        self.state_dim = state_dim
        self.seq_len = SEQUENCE_LENGTH
        self.hidden_size = HIDDEN_SIZE

        # 两个独立的LSTM处理状态序列
        self.q1_lstm = nn.LSTM(
            input_size=state_dim,
            hidden_size=self.hidden_size,
            num_layers=1,
            batch_first=True
        )

        self.q2_lstm = nn.LSTM(
            input_size=state_dim,
            hidden_size=self.hidden_size,
            num_layers=1,
            batch_first=True
        )

        # Q1网络
        self.q1_layer1 = nn.Linear(self.hidden_size + action_dim, 256)
        self.q1_layer2 = nn.Linear(256, 128)
        self.q1_output = nn.Linear(128, 1)

        self.q1_ln1 = nn.LayerNorm(256)
        self.q1_ln2 = nn.LayerNorm(128)

        # Q2网络
        self.q2_layer1 = nn.Linear(self.hidden_size + action_dim, 256)
        self.q2_layer2 = nn.Linear(256, 128)
        self.q2_output = nn.Linear(128, 1)

        self.q2_ln1 = nn.LayerNorm(256)
        self.q2_ln2 = nn.LayerNorm(128)

        # 存储LSTM隐藏状态
        self.q1_hidden = None
        self.q2_hidden = None

        # 初始化网络权重
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0.01)

    def forward(self, state, action, reset_hidden=False):
        # 状态输入形状: [batch_size, seq_len, state_dim]
        # 动作输入形状: [batch_size, action_dim]

        # 如果需要重置隐藏状态或隐藏状态不存在
        if reset_hidden or self.q1_hidden is None or self.q2_hidden is None:
            self.reset_hidden(state.size(0))

        # LSTM处理序列
        q1_lstm_out, self.q1_hidden = self.q1_lstm(state, self.q1_hidden)
        q2_lstm_out, self.q2_hidden = self.q2_lstm(state, self.q2_hidden)

        # 提取序列中的最后一个输出
        q1_state = q1_lstm_out[:, -1]
        q2_state = q2_lstm_out[:, -1]

        # 合并状态表示和动作
        q1_x = torch.cat([q1_state, action], dim=1)
        q2_x = torch.cat([q2_state, action], dim=1)

        # Q1网络
        q1 = self.q1_ln1(torch.relu(self.q1_layer1(q1_x)))
        q1 = self.q1_ln2(torch.relu(self.q1_layer2(q1)))
        q1 = self.q1_output(q1)

        # Q2网络
        q2 = self.q2_ln1(torch.relu(self.q2_layer1(q2_x)))
        q2 = self.q2_ln2(torch.relu(self.q2_layer2(q2)))
        q2 = self.q2_output(q2)

        return q1, q2

    def Q1(self, state, action, reset_hidden=False):
        # 用于Actor训练
        if reset_hidden or self.q1_hidden is None:
            self.reset_q1_hidden(state.size(0))

        q1_lstm_out, self.q1_hidden = self.q1_lstm(state, self.q1_hidden)
        q1_state = q1_lstm_out[:, -1]
        q1_x = torch.cat([q1_state, action], dim=1)

        q1 = self.q1_ln1(torch.relu(self.q1_layer1(q1_x)))
        q1 = self.q1_ln2(torch.relu(self.q1_layer2(q1)))
        q1 = self.q1_output(q1)

        return q1

    def reset_hidden(self, batch_size=1):
        """重置两个LSTM网络的隐藏状态"""
        self.reset_q1_hidden(batch_size)
        self.reset_q2_hidden(batch_size)

    def reset_q1_hidden(self, batch_size=1):
        self.q1_hidden = (
            torch.zeros(1, batch_size, self.hidden_size).to(device),
            torch.zeros(1, batch_size, self.hidden_size).to(device)
        )

    def reset_q2_hidden(self, batch_size=1):
        self.q2_hidden = (
            torch.zeros(1, batch_size, self.hidden_size).to(device),
            torch.zeros(1, batch_size, self.hidden_size).to(device)
        )


class ReplayBuffer:
    def __init__(self, max_size=BUFFER_SIZE):
        self.buffer = deque(maxlen=max_size)

    def add(self, state, action, reward, next_state, done):
        self.buffer.append((state, action, reward, next_state, done))

    def sample(self, batch_size):
        batch = random.sample(self.buffer, min(len(self.buffer), batch_size))
        state, action, reward, next_state, done = map(np.stack, zip(*batch))
        return state, action, reward, next_state, done

    def __len__(self):
        return len(self.buffer)


class EWC:
    """弹性权重巩固(Elastic Weight Consolidation)"""

    def __init__(self, model, fisher_sample_size=FISHER_SAMPLE_SIZE):
        self.model = model
        self.fisher_sample_size = fisher_sample_size
        self.importance = {}  # Fisher信息矩阵
        self.old_params = {}  # 上一个任务的参数
        self.fisher_diagonal = {}  # Fisher对角线

    def _calculate_fisher_info(self, replay_buffer):
        """计算Fisher信息矩阵"""
        # 初始化Fisher矩阵为0
        fisher = {}
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                fisher[name] = torch.zeros_like(param).to(device)

        # 采样计算梯度
        self.model.train()  # 改为训练模式，而不是eval()模式
        samples_count = min(self.fisher_sample_size, len(replay_buffer))
        if samples_count <= 0:
            return fisher

        for _ in range(samples_count):
            # 获取随机样本
            states, actions, _, _, _ = replay_buffer.sample(1)
            states = torch.FloatTensor(states).to(device)
            actions = torch.FloatTensor(actions).to(device)

            # 前向传播
            self.model.zero_grad()

            # 根据模型类型执行不同操作
            if isinstance(self.model, LSTMActor):
                # 重置隐藏状态为批次大小1
                self.model.reset_hidden(1)
                # Actor模型输出动作
                outputs = self.model(states)
                # 我们想要保持当前输出，所以损失是当前输出与自身的MSE
                loss = ((outputs - actions) ** 2).mean()
            else:
                # 重置Critic隐藏状态为批次大小1
                self.model.reset_hidden(1)
                # Critic模型输出Q值
                outputs, _ = self.model(states, actions)
                # 类似地，我们使用当前输出作为目标
                loss = outputs.mean()

            # 反向传播计算梯度
            loss.backward()

            # 累加梯度的平方
            for name, param in self.model.named_parameters():
                if param.requires_grad and param.grad is not None:
                    fisher[name] += param.grad.pow(2) / samples_count

        return fisher

    def store_task_parameters(self, task_id, replay_buffer):
        """存储当前任务的参数和计算Fisher信息矩阵"""
        print(f"Storing parameters for task {task_id} and computing Fisher information matrix")

        # 存储当前参数
        self.old_params = {}
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                self.old_params[name] = param.data.clone()

        # 计算Fisher信息矩阵
        self.importance = self._calculate_fisher_info(replay_buffer)

        print(f"Stored {len(self.old_params)} parameters and computed Fisher matrices")

    def calculate_ewc_loss(self, lam=EWC_LAMBDA):
        """计算EWC正则化损失"""
        loss = 0

        if not self.old_params or not self.importance:
            return loss

        for name, param in self.model.named_parameters():
            if name in self.old_params and name in self.importance and param.requires_grad:
                # 计算与旧参数的差异，并通过Fisher信息进行加权
                loss += torch.sum(self.importance[name] * (param - self.old_params[name]).pow(2))

        return lam * loss


class TD3:
    """Twin Delayed DDPG (TD3)和弹性权重巩固(EWC)的结合"""

    def __init__(self, state_dim, action_dim, max_action):
        self.actor = LSTMActor(state_dim, action_dim, max_action).to(device)
        self.actor_target = LSTMActor(state_dim, action_dim, max_action).to(device)
        self.actor_target.load_state_dict(self.actor.state_dict())
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=ACTOR_LR)

        self.critic = LSTMCritic(state_dim, action_dim).to(device)
        self.critic_target = LSTMCritic(state_dim, action_dim).to(device)
        self.critic_target.load_state_dict(self.critic.state_dict())
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=CRITIC_LR)

        self.max_action = max_action
        self.memory = ReplayBuffer()

        # TD3特定参数
        self.policy_noise = 0.2 * max_action
        self.noise_clip = 0.5 * max_action
        self.policy_freq = 2
        self.total_it = 0

        # EWC相关组件
        self.ewc_actor = EWC(self.actor)
        self.ewc_critic = EWC(self.critic)
        self.current_task = 1  # 初始任务ID

        # 任务特定探索噪声
        self.task_noise = {
            1: np.linspace(EXPLORATION_NOISE_START, EXPLORATION_NOISE_END, EPISODES_PER_TASK),
            2: np.linspace(EXPLORATION_NOISE_START * 0.8, EXPLORATION_NOISE_END, EPISODES_PER_TASK),
            3: np.linspace(EXPLORATION_NOISE_START * 0.7, EXPLORATION_NOISE_END, EPISODES_PER_TASK)
        }

        # 学习率调度器
        self.actor_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.actor_optimizer, mode='max', factor=0.5, patience=100, verbose=True
        )
        self.critic_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.critic_optimizer, mode='max', factor=0.5, patience=100, verbose=True
        )

    def select_action(self, state, noise_scale=EXPLORATION_NOISE_START):
        # 确保状态是LSTM所需的序列格式 [batch, seq_len, feature]
        if len(state.shape) == 2:
            state = np.expand_dims(state, 0)  # 添加batch维度

        state = torch.FloatTensor(state).to(device)

        # 显式重置隐藏状态为批次大小1
        self.actor.reset_hidden(1)

        action = self.actor(state).cpu().data.numpy().flatten()

        if noise_scale > 0:
            noise = np.random.normal(0, self.max_action * noise_scale, size=action.shape)
            action = action + noise

        return np.clip(action, -self.max_action, self.max_action)

    def switch_task(self, task_id):
        """切换到新任务"""
        print(f"\nSwitching to task {task_id}")

        # 存储旧任务参数和计算Fisher信息矩阵
        if self.current_task > 0 and len(self.memory) > 0:
            self.ewc_actor.store_task_parameters(self.current_task, self.memory)
            self.ewc_critic.store_task_parameters(self.current_task, self.memory)

        # 更新当前任务
        self.current_task = task_id

        # 重置Actor和Critic的LSTM状态
        self.actor.reset_hidden()
        self.critic.reset_hidden()

        print(f"Reset LSTM states for new task {task_id}")

    def train(self):
        self.total_it += 1

        if len(self.memory) < BATCH_SIZE:
            return

        # 从经验回放中采样
        state, action, reward, next_state, done = self.memory.sample(BATCH_SIZE)

        state = torch.FloatTensor(state).to(device)
        action = torch.FloatTensor(action).to(device)
        reward = torch.FloatTensor(reward.reshape(-1, 1)).to(device)
        next_state = torch.FloatTensor(next_state).to(device)
        done = torch.FloatTensor(done.reshape(-1, 1)).to(device)

        # 为批处理重置LSTM隐藏状态
        self.critic.reset_hidden(BATCH_SIZE)
        self.actor.reset_hidden(BATCH_SIZE)

        with torch.no_grad():
            # 为目标网络也重置隐藏状态
            self.critic_target.reset_hidden(BATCH_SIZE)
            self.actor_target.reset_hidden(BATCH_SIZE)

            # 添加目标策略平滑
            noise = torch.FloatTensor(action.shape).data.normal_(0, self.policy_noise).to(device)
            noise = noise.clamp(-self.noise_clip, self.noise_clip)

            next_action = (self.actor_target(next_state) + noise).clamp(-self.max_action, self.max_action)

            # 计算目标Q值
            target_q1, target_q2 = self.critic_target(next_state, next_action)
            target_q = torch.min(target_q1, target_q2)
            target_q = reward + (1 - done) * GAMMA * target_q

        # 计算当前Q值
        current_q1, current_q2 = self.critic(state, action)

        # 计算Critic损失 (包括EWC正则化)
        critic_loss = nn.MSELoss()(current_q1, target_q) + nn.MSELoss()(current_q2, target_q)

        # 添加EWC正则化
        if self.current_task > 1:
            critic_ewc_loss = self.ewc_critic.calculate_ewc_loss()
            critic_loss += critic_ewc_loss

        # 优化Critic
        self.critic_optimizer.zero_grad()
        critic_loss.backward(retain_graph=True)  # 添加retain_graph=True
        torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 1.0)
        self.critic_optimizer.step()

        # 延迟策略更新
        actor_loss = 0
        if self.total_it % self.policy_freq == 0:
            # 重置Actor LSTM状态
            self.actor.reset_hidden(BATCH_SIZE)
            # 重置Critic LSTM状态用于Q1计算
            self.critic.reset_q1_hidden(BATCH_SIZE)

            # 计算Actor损失 (包括EWC正则化)
            actor_loss = -self.critic.Q1(state, self.actor(state)).mean()

            # 添加EWC正则化
            if self.current_task > 1:
                actor_ewc_loss = self.ewc_actor.calculate_ewc_loss()
                actor_loss += actor_ewc_loss

            # 优化Actor
            self.actor_optimizer.zero_grad()
            actor_loss.backward()
            torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 1.0)
            self.actor_optimizer.step()

            # 更新目标网络
            for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)

            for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)

        return {
            "critic_loss": critic_loss.item(),
            "actor_loss": actor_loss if isinstance(actor_loss, (int, float)) else actor_loss.item()
        }

    def update_lr_schedulers(self, reward):
        """更新学习率调度器"""
        self.actor_scheduler.step(reward)
        self.critic_scheduler.step(reward)


def train():
    """训练TD3智能体"""

    # 创建保存结果的目录
    os.makedirs("results", exist_ok=True)

    # 初始化环境
    env = Environment()

    # 计算状态和动作维度
    state_dim = 2 + NUM_USERS * 3 + 1  # UAV位置(2) + 用户信息(3*NUM_USERS) + 步数(1)
    action_dim = 2
    max_action = 1

    # 初始化TD3智能体
    agent = TD3(state_dim, action_dim, max_action)

    # 训练参数
    total_episodes = 4500  # 总共1500轮
    episodes_per_task = 1500  # 每个任务500轮
    eval_freq = 500  # 每200轮次生成一次图表

    # 记录训练过程
    rewards_history = []
    smoothed_rewards = []
    collection_history = []
    energy_history = []
    best_reward = -float('inf')
    best_collection = 0
    losses = {"critic": [], "actor": []}

    # 探索噪声衰减
    noise_schedule = np.linspace(EXPLORATION_NOISE_START, EXPLORATION_NOISE_END, total_episodes)

    start_time = time.time()

    for phase in range(1, 4):
        # 更新任务生成状态
        env.update_task_generating_users(phase)

        # 切换任务
        agent.switch_task(phase)

        # 阶段性提高探索噪声，促进适应新环境
        phase_noise_base = EXPLORATION_NOISE_START * (0.9 ** (phase - 1))
        phase_noise = np.linspace(phase_noise_base, EXPLORATION_NOISE_END, episodes_per_task)

        for episode in range(1, episodes_per_task + 1):
            global_episode = (phase - 1) * episodes_per_task + episode

            # 重置环境和LSTM状态
            state = env.reset()
            agent.actor.reset_hidden()
            agent.critic.reset_hidden()

            episode_reward = 0
            last_collection = 0
            episode_losses = {"critic": [], "actor": []}

            # 当前探索噪声
            current_noise = phase_noise[episode - 1]

            for step in range(1, MAX_STEPS + 1):
                # 选择动作
                action = agent.select_action(state, noise_scale=current_noise)

                # 执行动作
                next_state, reward, done, info = env.step(action)

                # 存储经验
                agent.memory.add(state, action, reward, next_state, done)

                # 训练智能体
                loss_info = agent.train()
                if loss_info:
                    episode_losses["critic"].append(loss_info["critic_loss"])
                    episode_losses["actor"].append(loss_info["actor_loss"])

                # 更新状态和累计奖励
                state = next_state
                episode_reward += reward
                last_collection = info["collected_required"]

                # 每隔一定步数可视化当前状态
                if global_episode % eval_freq == 0 and step % 20 == 0:
                    env.render(global_episode)

                if done:
                    break

            # 记录历史数据
            rewards_history.append(episode_reward)
            collection_history.append(last_collection)
            energy_history.append(info["energy"])

            # 平滑奖励曲线
            if len(rewards_history) >= 10:
                smoothed_rewards.append(np.mean(rewards_history[-10:]))
            else:
                smoothed_rewards.append(episode_reward)

            # 记录平均损失
            if episode_losses["critic"]:
                losses["critic"].append(np.mean(episode_losses["critic"]))
            if episode_losses["actor"]:
                losses["actor"].append(np.mean(episode_losses["actor"]))

            # 更新学习率调度器
            agent.update_lr_schedulers(episode_reward)

            # 更新最佳结果 - 考虑不同阶段的任务数量
            current_required = info["total_required"]
            collection_ratio = last_collection / current_required if current_required > 0 else 0

            if collection_ratio > best_collection or (
                    collection_ratio == best_collection and episode_reward > best_reward):
                best_reward = episode_reward
                best_collection = collection_ratio

                # 保存最佳模型
                torch.save(agent.actor.state_dict(), f"results/best_actor_phase_{phase}.pth")

            # 打印训练信息
            elapsed_time = time.time() - start_time
            print(f"Phase: {phase} | Episode: {episode}/{episodes_per_task} | "
                  f"Global Episode: {global_episode}/{total_episodes} | "
                  f"Tasks: {last_collection}/{info['total_required']} | "
                  f"Reward: {episode_reward:.2f} | "
                  f"Energy: {info['energy']:.2f} | "
                  f"Steps: {env.step_count} | "
                  f"Noise: {current_noise:.3f} | "
                  f"Time: {elapsed_time:.2f}s")

            # 每隔一定轮次生成图表
            if global_episode % eval_freq == 0 or global_episode == total_episodes:
                plt.figure(figsize=(20, 5))

                # 奖励曲线
                plt.subplot(1, 4, 1)
                plt.plot(rewards_history, alpha=0.3, color='blue', label='Raw')
                plt.plot(smoothed_rewards, color='red', label='Smoothed')
                plt.axvline(x=episodes_per_task, color='green', linestyle='--', label='Phase 1->2')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--', label='Phase 2->3')
                plt.title("Reward")
                plt.xlabel("Episode")
                plt.ylabel("Reward")
                plt.legend()
                plt.grid(True)

                # 收集任务数量曲线
                plt.subplot(1, 4, 2)
                plt.plot(collection_history)
                plt.axvline(x=episodes_per_task, color='green', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.title("Collected Tasks")
                plt.xlabel("Episode")
                plt.ylabel("Number of Tasks")
                plt.grid(True)

                # 能耗曲线
                plt.subplot(1, 4, 3)
                plt.plot(energy_history)
                plt.axvline(x=episodes_per_task, color='green', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.title("Total Energy")
                plt.xlabel("Episode")
                plt.ylabel("Energy")
                plt.grid(True)

                # 损失曲线
                plt.subplot(1, 4, 4)
                if losses["critic"]:
                    plt.plot(losses["critic"], label='Critic Loss')
                if losses["actor"]:
                    plt.plot(losses["actor"], label='Actor Loss')
                plt.axvline(x=episodes_per_task, color='green', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.title("Training Loss")
                plt.xlabel("Episode")
                plt.ylabel("Loss")
                plt.legend()
                plt.grid(True)

                plt.tight_layout()
                plt.savefig(f"results/training_curves_episode_{global_episode}.png")
                plt.close()

                # 保存检查点
                torch.save({
                    'actor_state_dict': agent.actor.state_dict(),
                    'critic_state_dict': agent.critic.state_dict(),
                    'actor_optimizer': agent.actor_optimizer.state_dict(),
                    'critic_optimizer': agent.critic_optimizer.state_dict(),
                    'episode': global_episode,
                    'phase': phase,
                    'rewards_history': rewards_history,
                    'collection_history': collection_history,
                    'best_reward': best_reward,
                    'best_collection': best_collection
                }, f"results/checkpoint_episode_{global_episode}.pt")

        # 每个任务阶段结束后保存模型
        torch.save(agent.actor.state_dict(), f"results/actor_phase_{phase}.pth")
        torch.save(agent.critic.state_dict(), f"results/critic_phase_{phase}.pth")

    print(f"Training completed! Best result: {best_collection * 100:.1f}% tasks, Reward: {best_reward:.2f}")
    return agent, env


def test_and_visualize(agent, env, model_path="results/actor_phase_3.pth", phase=3):
    """测试训练好的模型并生成可视化结果"""

    # 加载模型
    agent.actor.load_state_dict(torch.load(model_path))
    agent.actor.eval()

    # 设置环境阶段
    env.update_task_generating_users(phase)

    # 重置环境和LSTM状态
    state = env.reset()
    agent.actor.reset_hidden()

    total_reward = 0
    step_rewards = []

    # 记录测试过程
    trajectory = [env.uav_position.copy()]
    collection_times = np.zeros(NUM_USERS)
    collection_order = []

    for step in range(1, MAX_STEPS + 1):
        # 选择动作 (不添加噪声)
        action = agent.select_action(state, noise_scale=0)

        # 记录UAV位置
        trajectory.append(env.uav_position.copy())

        # 记录本次收集的任务
        collected_before = env.collected_tasks.copy()

        # 执行动作
        next_state, reward, done, info = env.step(action)

        # 更新收集时间
        for i in range(NUM_USERS):
            if env.task_generating_users[i] and env.collected_tasks[i] and not collected_before[i]:
                collection_times[i] = step
                collection_order.append(i)

        # 累计奖励
        total_reward += reward
        step_rewards.append(reward)

        # 更新状态
        state = next_state

        # 可视化当前状态
        if step % 5 == 0 or done:
            env.render(step)

        if done:
            break

    # 转换为numpy数组便于绘图
    trajectory = np.array(trajectory)

    # 绘制完整轨迹图
    plt.figure(figsize=(12, 10))

    # 绘制用户位置
    for i, (x, y) in enumerate(env.user_positions):
        if env.task_generating_users[i]:
            if env.collected_tasks[i]:
                color = 'green'  # 已收集的任务生成用户
                plt.scatter(x, y, s=150, c=color, marker='o')
                # 标注用户编号和收集时间
                plt.annotate(f"用户 {i + 1}\n(步数 {int(collection_times[i])})",
                             (x, y), textcoords="offset points",
                             xytext=(0, 10), ha='center', fontsize=10)
            else:
                color = 'red'  # 未收集的任务生成用户
                plt.scatter(x, y, s=150, c=color, marker='o')
                plt.annotate(f"用户 {i + 1}\n(未收集)",
                             (x, y), textcoords="offset points",
                             xytext=(0, 10), ha='center', fontsize=10)
        else:
            color = 'gray'  # 不生成任务的用户
            plt.scatter(x, y, s=100, c=color, marker='o')
            plt.annotate(f"用户 {i + 1}\n(不产生任务)",
                         (x, y), textcoords="offset points",
                         xytext=(0, 10), ha='center', fontsize=10)

    # 绘制UAV轨迹
    plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', label='UAV轨迹', alpha=0.7)

    # 标注起点和终点
    plt.scatter(trajectory[0, 0], trajectory[0, 1], s=200, c='blue', marker='^', label='起点')
    plt.scatter(trajectory[-1, 0], trajectory[-1, 1], s=200, c='purple', marker='*', label='终点')

    # 每隔一定步数标记UAV位置
    for i in range(0, len(trajectory), 10):
        plt.annotate(f"{i}", (trajectory[i, 0], trajectory[i, 1]),
                     fontsize=8, ha='center', va='center',
                     bbox=dict(boxstyle="circle,pad=0.2", fc="white", alpha=0.7))

    # 为收集到的任务绘制连接线
    for i in range(NUM_USERS):
        if env.task_generating_users[i] and env.collected_tasks[i]:
            # 找到收集该任务时UAV的位置
            step = int(collection_times[i])
            if step < len(trajectory):
                uav_pos = trajectory[step]
                plt.plot([uav_pos[0], env.user_positions[i, 0]],
                         [uav_pos[1], env.user_positions[i, 1]],
                         'g--', alpha=0.5)

    plt.title(
        f"UAV任务收集轨迹 (阶段{phase}: 收集 {sum(env.collected_tasks & env.task_generating_users)}/{sum(env.task_generating_users)} 任务, 步数: {env.step_count})")
    plt.xlabel("X坐标 (m)")
    plt.ylabel("Y坐标 (m)")
    plt.grid(True)
    plt.legend()
    plt.xlim(0, AREA_SIZE)
    plt.ylim(0, AREA_SIZE)
    plt.savefig(f"results/final_uav_trajectory_phase_{phase}.png")
    plt.close()

    # 绘制奖励曲线
    plt.figure(figsize=(15, 5))

    plt.subplot(1, 2, 1)
    plt.plot(step_rewards)
    plt.title("步奖励")
    plt.xlabel("步数")
    plt.ylabel("奖励")
    plt.grid(True)

    plt.subplot(1, 2, 2)
    plt.plot(np.cumsum(step_rewards))
    plt.title("累计奖励")
    plt.xlabel("步数")
    plt.ylabel("累计奖励")
    plt.grid(True)

    plt.tight_layout()
    plt.savefig(f"results/test_rewards_phase_{phase}.png")
    plt.close()

    # 打印测试结果
    print(f"\n测试结果 (阶段 {phase}):")
    print(f"收集任务: {sum(env.collected_tasks & env.task_generating_users)}/{sum(env.task_generating_users)} "
          f"({sum(env.collected_tasks & env.task_generating_users) / sum(env.task_generating_users) * 100:.1f}%)")
    print(f"总奖励: {total_reward:.2f}")
    print(f"总能耗: {info['energy']:.2f}")
    print(f"总延迟: {info['delay']:.2f}")
    print(f"总步数: {env.step_count}")

    # 输出任务收集详情
    print("\n任务收集详情:")
    collection_indices = [(i, int(collection_times[i])) for i in range(NUM_USERS)
                          if env.task_generating_users[i] and env.collected_tasks[i]]
    collection_indices.sort(key=lambda x: x[1])

    for i, step in collection_indices:
        print(f"用户 {i + 1}: 在步数 {step} 收集")

    for i in range(NUM_USERS):
        if env.task_generating_users[i] and not env.collected_tasks[i]:
            print(f"用户 {i + 1}: 未收集")


if __name__ == "__main__":
    # 训练智能体
    agent, env = train()

    # 测试各阶段的性能
    for phase in range(1, 4):
        print(f"\n测试阶段 {phase} 的模型性能:")
        test_and_visualize(agent, env, model_path=f"results/actor_phase_{phase}.pth", phase=phase)
