import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import random
import matplotlib.pyplot as plt
from collections import deque, namedtuple
import copy
import math
import os
import time
from torch.utils.tensorboard import SummaryWriter
# 收集到足够任务（测试依然差），但是奖励没有收敛，且
# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

# 检查GPU可用性
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")

# 创建保存目录
os.makedirs("../trajectory_images", exist_ok=True)
os.makedirs("../models", exist_ok=True)
os.makedirs("../logs", exist_ok=True)

# 设置随机种子以保证可复现性
torch.manual_seed(42)
np.random.seed(42)
random.seed(42)

# 经验回放的数据结构
Experience = namedtuple('Experience', ['state', 'action', 'reward', 'next_state', 'done'])


# 使用SumTree实现高效优先级采样
class SumTree:
    def __init__(self, capacity):
        self.capacity = capacity
        self.tree = np.zeros(2 * capacity - 1)
        self.data = np.zeros(capacity, dtype=object)
        self.write = 0
        self.n_entries = 0

    def _propagate(self, idx, change):
        parent = (idx - 1) // 2
        self.tree[parent] += change
        if parent != 0:
            self._propagate(parent, change)

    def _retrieve(self, idx, s):
        left = 2 * idx + 1
        right = left + 1

        if left >= len(self.tree):
            return idx

        if s <= self.tree[left]:
            return self._retrieve(left, s)
        else:
            return self._retrieve(right, s - self.tree[left])

    def total(self):
        return self.tree[0]

    def add(self, priority, data):
        idx = self.write + self.capacity - 1

        self.data[self.write] = data
        self.update(idx, priority)

        self.write += 1
        if self.write >= self.capacity:
            self.write = 0
        if self.n_entries < self.capacity:
            self.n_entries += 1

    def update(self, idx, priority):
        change = priority - self.tree[idx]
        self.tree[idx] = priority
        self._propagate(idx, change)

    def get(self, s):
        idx = self._retrieve(0, s)
        data_idx = idx - self.capacity + 1
        return idx, self.tree[idx], self.data[data_idx]


class PrioritizedReplayBuffer:
    def __init__(self, capacity, alpha=0.6, beta=0.4, beta_increment=0.001):
        self.alpha = alpha
        self.beta = beta
        self.beta_increment = beta_increment
        self.tree = SumTree(capacity)
        self.capacity = capacity
        self.eps = 1e-5

    def _get_priority(self, error):
        return (np.abs(error) + self.eps) ** self.alpha

    def push(self, state, action, reward, next_state, done):
        experience = Experience(state, action, reward, next_state, done)
        max_priority = np.max(self.tree.tree[-self.tree.capacity:]) if self.tree.n_entries > 0 else 1.0
        priority = max_priority
        self.tree.add(priority, experience)

    def sample(self, batch_size):
        batch = []
        idxs = []
        priorities = []
        segment = self.tree.total() / batch_size

        self.beta = np.min([1.0, self.beta + self.beta_increment])

        for i in range(batch_size):
            a = segment * i
            b = segment * (i + 1)
            s = random.uniform(a, b)
            idx, priority, data = self.tree.get(s)
            batch.append(data)
            idxs.append(idx)
            priorities.append(priority)

        states = torch.tensor([e.state for e in batch], dtype=torch.float).to(device)
        actions = torch.tensor([e.action for e in batch], dtype=torch.long).to(device)
        rewards = torch.tensor([e.reward for e in batch], dtype=torch.float).to(device)
        next_states = torch.tensor([e.next_state for e in batch], dtype=torch.float).to(device)
        dones = torch.tensor([e.done for e in batch], dtype=torch.float).to(device)

        # 计算重要性采样权重
        sampling_weights = np.array(priorities) / self.tree.total()
        is_weights = np.power(self.tree.n_entries * sampling_weights, -self.beta)
        is_weights /= is_weights.max()
        is_weights = torch.tensor(is_weights, dtype=torch.float).to(device)

        return states, actions, rewards, next_states, dones, idxs, is_weights

    def update_priorities(self, indices, errors):
        for idx, error in zip(indices, errors):
            priority = self._get_priority(error)
            self.tree.update(idx, priority)

    def __len__(self):
        return self.tree.n_entries


class DuelingQNetwork(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(DuelingQNetwork, self).__init__()
        # 扩大网络容量
        self.feature_layer = nn.Sequential(
            nn.Linear(state_dim, 1024),
            nn.ReLU(),
            nn.Linear(1024, 768),
            nn.ReLU(),
            nn.Linear(768, 512),
            nn.ReLU()
        )

        # 价值流
        self.value_stream = nn.Sequential(
            nn.Linear(512, 256),
            nn.ReLU(),
            nn.Linear(256, 1)
        )

        # 优势流
        self.advantage_stream = nn.Sequential(
            nn.Linear(512, 256),
            nn.ReLU(),
            nn.Linear(256, action_dim)
        )

        # 初始化权重
        self.apply(self._init_weights)

    def _init_weights(self, module):
        if isinstance(module, nn.Linear):
            nn.init.kaiming_normal_(module.weight, nonlinearity='relu')
            if module.bias is not None:
                nn.init.constant_(module.bias, 0.1)

    def forward(self, x):
        features = self.feature_layer(x)
        values = self.value_stream(features)
        advantages = self.advantage_stream(features)
        # 结合值流和优势流，获取Q值
        qvals = values + (advantages - advantages.mean(dim=1, keepdim=True))
        return qvals


class User:
    def __init__(self, x, y):
        self.x = x
        self.y = y
        self.active = False
        self.visited = False  # 记录用户是否已被服务


class DroneEnvironment:
    def __init__(self, config):
        self.width = 100
        self.height = 100
        self.service_radius = config["service_radius"]
        self.max_steps = 300
        self.steps = 0
        self.episode_num = 0

        # 创建固定位置的用户
        self.all_users = []
        for _ in range(10):
            x = random.uniform(0, self.width)
            y = random.uniform(0, self.height)
            self.all_users.append(User(x, y))

        # 激活用户
        self.active_users = random.sample(self.all_users, k=8)
        for user in self.all_users:
            user.active = user in self.active_users
            user.visited = False

        # 无人机状态
        self.drone_x = random.uniform(0, self.width)
        self.drone_y = random.uniform(0, self.height)
        self.speed = 5

        # 轨迹记录
        self.trajectory_x = [self.drone_x]
        self.trajectory_y = [self.drone_y]

        # 动作空间 (8个方向 + 悬停)
        self.actions = ["N", "NE", "E", "SE", "S", "SW", "W", "NW", "hover"]

    def reset(self):
        self.episode_num += 1
        # 重置用户访问状态
        for user in self.all_users:
            user.visited = False

        # 智能初始化：将无人机放在靠近激活用户的位置
        if self.episode_num < 300:  # 前300轮使用课程学习
            # 随机选择一个激活用户，将无人机初始化在其附近
            if self.active_users:
                random_user = random.choice(self.active_users)
                # 在用户周围随机位置初始化无人机
                angle = random.uniform(0, 2 * math.pi)
                distance = random.uniform(10, 30)  # 10-30米范围内
                self.drone_x = max(0, min(self.width, random_user.x + distance * math.cos(angle)))
                self.drone_y = max(0, min(self.height, random_user.y + distance * math.sin(angle)))
            else:
                self.drone_x = random.uniform(0, self.width)
                self.drone_y = random.uniform(0, self.height)
        else:
            # 后期随机初始化
            self.drone_x = random.uniform(0, self.width)
            self.drone_y = random.uniform(0, self.height)

        self.steps = 0

        # 清空轨迹
        self.trajectory_x = [self.drone_x]
        self.trajectory_y = [self.drone_y]

        return self.get_state()

    def switch_task(self):
        # 切换激活用户，模拟任务变化
        self.active_users = random.sample(self.all_users, k=8)
        for user in self.all_users:
            user.active = user in self.active_users
            user.visited = False

    def get_state(self):
        # 构建更丰富的状态向量
        state = []

        # 1. 无人机坐标（归一化）
        state.extend([self.drone_x / self.width, self.drone_y / self.height])

        # 2. 每个用户的相关信息
        unvisited_count = 0
        unvisited_vec = [0, 0]  # 未服务用户方向向量

        for user in self.all_users:
            # 用户激活状态
            active = 1.0 if user.active else 0.0
            state.append(active)

            # 用户是否已被访问
            visited = 1.0 if user.visited else 0.0
            state.append(visited)

            # 用户相对于无人机的相对位置（归一化）
            dx = (user.x - self.drone_x) / self.width
            dy = (user.y - self.drone_y) / self.height
            state.extend([dx, dy])

            # 到用户的距离（使用对数缩放）
            distance = math.sqrt((user.x - self.drone_x) ** 2 + (user.y - self.drone_y) ** 2)
            scaled_dist = math.log(1 + distance) / math.log(1 + 100)  # 100为环境宽度
            state.append(scaled_dist)

            # 用户是否在服务范围内
            in_range = 1.0 if distance < self.service_radius else 0.0
            state.append(in_range)

            # 计算未服务用户方向向量
            if user.active and not user.visited:
                unvisited_count += 1
                unvisited_vec[0] += dx
                unvisited_vec[1] += dy

        # 3. 添加未服务用户方向向量（归一化）
        if unvisited_count > 0:
            unvisited_vec[0] /= unvisited_count
            unvisited_vec[1] /= unvisited_count
        state.extend(unvisited_vec)

        # 4. 无人机已移动步数（归一化）
        state.append(self.steps / self.max_steps)

        # 5. 已服务用户数量（归一化）
        visited_count = sum(1 for user in self.all_users if user.active and user.visited)
        state.append(visited_count / 8)  # 8个激活用户

        return np.array(state)

    def step(self, action_idx):
        action = self.actions[action_idx]
        prev_x, prev_y = self.drone_x, self.drone_y

        # 执行动作（8个方向 + 悬停）
        if action == "N":
            self.drone_y = min(self.height, self.drone_y + self.speed)
        elif action == "S":
            self.drone_y = max(0, self.drone_y - self.speed)
        elif action == "E":
            self.drone_x = min(self.width, self.drone_x + self.speed)
        elif action == "W":
            self.drone_x = max(0, self.drone_x - self.speed)
        elif action == "NE":
            self.drone_x = min(self.width, self.drone_x + self.speed * 0.7071)
            self.drone_y = min(self.height, self.drone_y + self.speed * 0.7071)
        elif action == "SE":
            self.drone_x = min(self.width, self.drone_x + self.speed * 0.7071)
            self.drone_y = max(0, self.drone_y - self.speed * 0.7071)
        elif action == "SW":
            self.drone_x = max(0, self.drone_x - self.speed * 0.7071)
            self.drone_y = max(0, self.drone_y - self.speed * 0.7071)
        elif action == "NW":
            self.drone_x = max(0, self.drone_x - self.speed * 0.7071)
            self.drone_y = min(self.height, self.drone_y + self.speed * 0.7071)
        # hover不需要动作

        # 计算移动成本
        movement_cost = math.sqrt((self.drone_x - prev_x) ** 2 + (self.drone_y - prev_y) ** 2)

        # 记录轨迹
        self.trajectory_x.append(self.drone_x)
        self.trajectory_y.append(self.drone_y)

        # 计算奖励
        reward = 0
        serviced_users = 0
        newly_serviced = 0  # 新服务的用户数
        min_distance = float('inf')  # 到最近未服务用户的距离

        # 1. 基本移动惩罚（与距离相关）
        reward -= 0.02 * movement_cost

        # 2. 计算服务用户的奖励
        for user in self.all_users:
            if user.active:
                distance = math.sqrt((self.drone_x - user.x) ** 2 + (self.drone_y - user.y) ** 2)

                # 更新最近未服务用户距离
                if not user.visited and distance < min_distance:
                    min_distance = distance

                if distance < self.service_radius:
                    serviced_users += 1

                    # 首次访问用户给额外奖励
                    if not user.visited:
                        reward += 15  # 降低首次服务奖励
                        newly_serviced += 1
                        user.visited = True
                    else:
                        reward += 1  # 已访问过的用户奖励降低

        # 3. 接近未服务用户的奖励（指数衰减）
        if min_distance < float('inf'):
            proximity_reward = 8 * math.exp(-min_distance / 30)
            reward += proximity_reward

        # 4. 全部服务完成奖励
        active_count = sum(1 for user in self.all_users if user.active)
        visited_count = sum(1 for user in self.all_users if user.active and user.visited)

        # 进度奖励（鼓励连续服务）
        progress = (visited_count - self.prev_visited_count) if hasattr(self, 'prev_visited_count') else 0
        reward += 5 * progress

        # 保存当前已服务数量用于下次计算
        self.prev_visited_count = visited_count

        if visited_count == active_count and active_count > 0:
            # 完成奖励 + 时间节省奖励
            reward += 200 + 100 * (1 - self.steps / self.max_steps)

        # 5. 未服务用户惩罚（鼓励探索）
        unvisited_penalty = -0.5 * (1 - visited_count / max(active_count, 1))
        reward += unvisited_penalty

        # 更新步数
        self.steps += 1
        done = self.steps >= self.max_steps

        # 如果所有激活用户都已被服务，可以提前结束
        if visited_count == active_count and active_count > 0:
            done = True

        info = {
            "serviced_users": serviced_users,
            "newly_serviced": newly_serviced,
            "total_visited": visited_count,
            "total_active": active_count
        }

        return self.get_state(), reward, done, info

    def render(self, episode, step=None):
        plt.figure(figsize=(10, 10))
        plt.xlim(0, self.width)
        plt.ylim(0, self.height)

        # 绘制无人机轨迹
        plt.plot(self.trajectory_x, self.trajectory_y, 'ro-', alpha=0.6, label='Drone Path')

        # 绘制无人机当前位置
        plt.scatter(self.drone_x, self.drone_y, c='red', s=100, marker='*', label='Drone')

        # 绘制服务半径
        circle = plt.Circle((self.drone_x, self.drone_y), self.service_radius,
                            color='blue', fill=False, alpha=0.3, linestyle='--')
        plt.gca().add_patch(circle)

        # 绘制用户
        for user in self.all_users:
            if user.active and user.visited:
                color = 'limegreen'  # 已访问的激活用户
                marker = 'o'
            elif user.active and not user.visited:
                color = 'green'  # 未访问的激活用户
                marker = 'o'
            else:
                color = 'gray'  # 未激活用户
                marker = 'x'

            plt.scatter(user.x, user.y, c=color, s=80, marker=marker)

            # 如果用户在服务范围内，绘制服务圆圈
            distance = math.sqrt((self.drone_x - user.x) ** 2 + (self.drone_y - user.y) ** 2)
            if distance < self.service_radius and user.active:
                circle = plt.Circle((user.x, user.y), 2, color='green', alpha=0.5)
                plt.gca().add_patch(circle)

        # 添加图例
        visited_handle = plt.scatter([], [], c='limegreen', marker='o', label='Visited User')
        active_handle = plt.scatter([], [], c='green', marker='o', label='Active User')
        inactive_handle = plt.scatter([], [], c='gray', marker='x', label='Inactive User')
        plt.legend(handles=[visited_handle, active_handle, inactive_handle])

        # 添加文本信息
        visited_count = sum(1 for user in self.all_users if user.active and user.visited)
        active_count = sum(1 for user in self.all_users if user.active)
        plt.title(f'Episode {episode}: Visited {visited_count}/{active_count} users (Step {self.steps})')
        plt.xlabel('X Position (m)')
        plt.ylabel('Y Position (m)')
        plt.grid(True)

        # 使用唯一文件名保存图像
        filename = f'trajectory_images/episode_{episode}_step_{self.steps}.png'
        plt.savefig(filename)
        plt.close()


class ContinualRLAgent:
    def __init__(self, state_dim, action_dim, config):
        self.state_dim = state_dim
        self.action_dim = action_dim
        self.gamma = config["gamma"]
        self.lr = config["learning_rate"]
        self.epsilon = 1.0
        self.epsilon_decay = config["epsilon_decay"]
        self.epsilon_min = config["epsilon_min"]
        self.batch_size = config["batch_size"]
        self.ewc_lambda_base = config["ewc_lambda_base"]
        self.target_update_freq = config["target_update_freq"]
        self.alpha = config["alpha"]  # 用于Double DQN
        self.grad_clip = config["grad_clip"]
        self.task_correlation = {}  # 存储任务相关性

        # 神经网络 - 使用Dueling DQN架构
        self.q_network = DuelingQNetwork(state_dim, action_dim).to(device)
        self.target_network = DuelingQNetwork(state_dim, action_dim).to(device)
        self.target_network.load_state_dict(self.q_network.state_dict())

        # 使用AdamW优化器（带权重衰减）
        self.optimizer = optim.AdamW(
            self.q_network.parameters(),
            lr=self.lr,
            weight_decay=1e-4
        )

        # 学习率调度器
        self.scheduler = optim.lr_scheduler.CyclicLR(
            self.optimizer,
            base_lr=self.lr / 10,
            max_lr=self.lr,
            step_size_up=1000,
            cycle_momentum=False
        )

        # 优先级经验回放（使用SumTree）
        self.memory = PrioritizedReplayBuffer(config["memory_capacity"])

        # 持续学习相关变量
        self.previous_tasks_fisher = {}  # 存储每个任务的Fisher信息矩阵
        self.previous_tasks_params = {}  # 存储每个任务的最优参数
        self.current_task_id = 0
        self.task_seen = 0

        # 训练统计
        self.training_steps = 0

    def select_action(self, state, eval_mode=False):
        # 基于覆盖率的自适应探索
        coverage_ratio = state[-1]  # 已服务比例

        if not eval_mode:
            # 未覆盖率越高，探索率越高
            adaptive_epsilon = min(
                self.epsilon * (1 + 2 * (1 - coverage_ratio)),
                0.8  # 探索率上限80%
            )
            if random.random() < adaptive_epsilon:
                return random.randrange(self.action_dim)

        # 使用网络选择动作
        state_tensor = torch.tensor(state, dtype=torch.float).unsqueeze(0).to(device)
        with torch.no_grad():
            q_values = self.q_network(state_tensor)
        return q_values.argmax().item()

    def update_model(self):
        if len(self.memory) < self.batch_size:
            return 0.0

        # 从优先级经验回放中采样
        states, actions, rewards, next_states, dones, indices, is_weights = self.memory.sample(self.batch_size)

        # 计算当前Q值
        current_q = self.q_network(states).gather(1, actions.unsqueeze(1)).squeeze(1)

        # Double DQN: 使用主网络选择动作，目标网络评估价值
        with torch.no_grad():
            # 使用主网络选择最佳动作
            next_actions = self.q_network(next_states).argmax(1).unsqueeze(1)
            # 使用目标网络获取这些动作的Q值
            next_q = self.target_network(next_states).gather(1, next_actions).squeeze(1)
            # 计算目标Q值
            target_q = rewards + (1 - dones) * self.gamma * next_q

        # 计算TD误差用于更新优先级
        td_errors = torch.abs(current_q - target_q).detach().cpu().numpy()
        self.memory.update_priorities(indices, td_errors)

        # 计算TD误差损失（使用重要性采样权重）
        td_loss = (is_weights * (current_q - target_q.detach()).pow(2)).mean()

        # 加入EWC正则化项防止灾难性遗忘（带任务相关性和衰减）
        ewc_loss = 0
        total_weight = 0
        if self.task_seen > 0:
            for task_id in range(self.task_seen):
                # 获取任务相关性（默认0.5）
                correlation = self.task_correlation.get((self.current_task_id, task_id), 0.5)

                # 基于任务距离的衰减
                decay = math.exp(-0.3 * abs(self.current_task_id - task_id))

                # 组合权重
                weight = correlation * decay
                total_weight += weight

                for name, param in self.q_network.named_parameters():
                    if name in self.previous_tasks_fisher[task_id]:
                        fisher = self.previous_tasks_fisher[task_id][name].to(device)
                        old_param = self.previous_tasks_params[task_id][name].to(device)
                        ewc_loss += weight * (fisher * (param - old_param).pow(2)).sum()

        # 动态正则化强度（随任务数量增加而增强）
        ewc_lambda = self.ewc_lambda_base * (1 - 0.5 ** self.task_seen)

        # 总损失 = TD损失 + EWC正则化
        loss = td_loss
        if total_weight > 0:
            loss += (ewc_lambda / total_weight) * ewc_loss

        # 更新参数
        self.optimizer.zero_grad()
        loss.backward()

        # 梯度裁剪，防止梯度爆炸
        torch.nn.utils.clip_grad_norm_(self.q_network.parameters(), self.grad_clip)

        self.optimizer.step()
        self.scheduler.step()  # 更新学习率

        # 增加训练步数计数
        self.training_steps += 1

        # 定期更新目标网络
        if self.training_steps % self.target_update_freq == 0:
            self.update_target_network()

        return loss.item()

    def update_target_network(self):
        # Polyak平均法更新目标网络 (软更新)
        for target_param, param in zip(self.target_network.parameters(), self.q_network.parameters()):
            target_param.data.copy_(self.alpha * param.data + (1 - self.alpha) * target_param.data)

    def calculate_fisher_information(self, env, num_samples=100):
        """计算Fisher信息矩阵，用于EWC正则化"""
        fisher_dict = {}
        param_dict = {}

        # 初始化Fisher字典
        for name, param in self.q_network.named_parameters():
            fisher_dict[name] = torch.zeros_like(param).to(device)
            param_dict[name] = param.data.clone().to(device)

        # 收集样本并计算Fisher信息
        self.q_network.eval()
        for _ in range(num_samples):
            state = env.reset()
            done = False

            while not done:
                action = self.select_action(state, eval_mode=True)
                next_state, reward, done, _ = env.step(action)

                # 计算损失
                state_tensor = torch.tensor(state, dtype=torch.float).unsqueeze(0).to(device)
                q_values = self.q_network(state_tensor)

                # 获取对应动作的Q值
                log_prob = torch.log_softmax(q_values, dim=1)[0, action]

                # 计算梯度
                self.optimizer.zero_grad()
                (-log_prob).backward()

                # 累积Fisher信息
                for name, param in self.q_network.named_parameters():
                    if param.grad is not None:
                        fisher_dict[name] += param.grad.data.pow(2)

                state = next_state
                if done:
                    break

        # 归一化Fisher信息
        for name in fisher_dict:
            fisher_dict[name] /= num_samples

        self.q_network.train()
        return fisher_dict, param_dict

    def finish_task(self, env):
        """当前任务完成时的处理"""
        # 计算并存储当前任务的Fisher信息和参数
        fisher, params = self.calculate_fisher_information(env)
        self.previous_tasks_fisher[self.current_task_id] = fisher
        self.previous_tasks_params[self.current_task_id] = params

        # 初始化任务相关性（默认0.5）
        for i in range(self.task_seen):
            self.task_correlation[(self.current_task_id, i)] = 0.5
            self.task_correlation[(i, self.current_task_id)] = 0.5

        self.task_seen += 1
        self.current_task_id += 1

        # 保存模型
        torch.save(self.q_network.state_dict(), f"../models/task_{self.current_task_id}.pth")
        print(f"任务 {self.current_task_id - 1} 完成，模型已保存")


def train():
    # 配置参数（基于问题分析优化）
    config = {
        "learning_rate": 0.0003,  # 降低学习率提高稳定性
        "gamma": 0.99,  # 折扣因子
        "epsilon_decay": 0.999,  # 减缓探索衰减
        "epsilon_min": 0.1,  # 保持最低探索率
        "batch_size": 512,  # 增大批量大小
        "memory_capacity": 200000,  # 扩大记忆库
        "service_radius": 15,  # 减小服务半径促使精确移动
        "ewc_lambda_base": 5000,  # 增强EWC正则化强度
        "target_update_freq": 50,  # 减少目标网络更新频率
        "alpha": 0.01,  # 软更新系数
        "grad_clip": 1.0  # 梯度裁剪阈值
    }

    # 创建TensorBoard记录器
    writer = SummaryWriter(log_dir='../logs/drone_rl_optimized')

    # 创建环境
    env = DroneEnvironment(config)

    # 状态和动作空间（状态维度因新增特征而增加）
    state_dim = 2 + 10 * 6 + 2 + 2  # 无人机坐标(2) + 10用户*(6特征) + 步数+覆盖率(2) + 方向向量(2)
    action_dim = 9

    # 创建智能体
    agent = ContinualRLAgent(state_dim, action_dim, config)

    # 训练参数
    num_episodes = 1500
    task_switch_interval = 500
    render_interval = 100
    update_frequency = 4  # 每收集4个经验更新一次
    last_save = 0
    best_avg_reward = -float('inf')

    # 训练循环
    all_rewards = []
    task_rewards = []
    coverage_rates = []
    task_boundaries = [0]
    start_time = time.time()

    # 预热阶段：先收集一些经验
    print("预热经验收集...")
    state = env.reset()
    while len(agent.memory) < 10000:  # 增大预热样本量
        action = agent.select_action(state)
        next_state, reward, done, _ = env.step(action)
        agent.memory.push(state, action, reward, next_state, done)
        state = next_state
        if done:
            state = env.reset()
    print(f"预热完成，已收集 {len(agent.memory)} 条经验")

    # 主训练循环
    for episode in range(num_episodes):
        # 每N轮切换用户激活模式（任务）
        if episode % task_switch_interval == 0 and episode > 0:
            print(f"\n===== 切换到新任务 (Episode {episode}) =====")
            agent.finish_task(env)  # 完成当前任务，保存Fisher信息
            env.switch_task()  # 切换环境任务
            task_rewards = []  # 重置当前任务奖励记录
            task_boundaries.append(episode)  # 记录任务边界

        state = env.reset()
        total_reward = 0
        done = False
        step_count = 0
        update_counter = 0

        while not done:
            # 选择动作
            action = agent.select_action(state)

            # 执行动作
            next_state, reward, done, info = env.step(action)

            # 存储经验
            agent.memory.push(state, action, reward, next_state, done)

            # 更新模型
            update_counter += 1
            if update_counter >= update_frequency:
                loss = agent.update_model()
                if loss > 0:
                    writer.add_scalar('Training/Loss', loss, agent.training_steps)
                update_counter = 0

            # 更新状态和奖励
            state = next_state
            total_reward += reward
            step_count += 1

        # 衰减探索率
        agent.epsilon = max(agent.epsilon_min, agent.epsilon * agent.epsilon_decay)
        writer.add_scalar('Training/Epsilon', agent.epsilon, episode)

        # 记录奖励
        all_rewards.append(total_reward)
        task_rewards.append(total_reward)
        writer.add_scalar('Training/Reward', total_reward, episode)

        # 计算平均奖励
        avg_reward = np.mean(all_rewards[-100:]) if len(all_rewards) >= 100 else np.mean(all_rewards)
        # avg_rewards.append(avg_reward)
        writer.add_scalar('Training/Avg Reward (100ep)', avg_reward, episode)
        # 记录覆盖率
        coverage = info["total_visited"] / max(info["total_active"], 1)
        coverage_rates.append(coverage)
        writer.add_scalar('Metrics/Coverage Rate', coverage, episode)

        # 输出进度
        if (episode + 1) % 10 == 0:
            visited = info["total_visited"]
            active = info["total_active"]
            print(f"Episode {episode + 1}/{num_episodes}, Reward: {total_reward:.2f}, "
                  f"Avg Reward: {avg_reward:.2f}, Epsilon: {agent.epsilon:.4f}, "
                  f"Coverage: {coverage * 100:.1f}% ({visited}/{active})")

            # 保存最佳模型
            if avg_reward > best_avg_reward:
                best_avg_reward = avg_reward
                torch.save(agent.q_network.state_dict(), "../models/best_model.pth")
                print(f"最佳模型已保存，平均奖励: {best_avg_reward:.2f}")

        # 定期保存模型
        if episode - last_save >= 100 or episode == num_episodes - 1:
            torch.save(agent.q_network.state_dict(), f"../models/model_ep{episode}.pth")
            last_save = episode
            print(f"模型已保存: models/model_ep{episode}.pth")

        # 渲染环境
        if (episode + 1) % render_interval == 0 or episode == 0:
            env.render(episode + 1)

    # 计算训练时间
    training_time = time.time() - start_time
    print(f"训练完成! 总耗时: {training_time / 60:.2f} 分钟")

    # 绘制奖励曲线
    plt.figure(figsize=(12, 6))
    plt.plot(all_rewards, alpha=0.3, label='Episode Reward')

    # 计算和绘制滑动平均奖励
    window_size = 50  # 增大窗口平滑曲线
    smoothed_rewards = []
    for i in range(len(all_rewards)):
        start_idx = max(0, i - window_size + 1)
        smoothed_rewards.append(sum(all_rewards[start_idx:(i + 1)]) / (i - start_idx + 1))

    plt.plot(smoothed_rewards, linewidth=2, label=f'Smoothed Reward (window={window_size})')

    # 标记任务切换点
    for boundary in task_boundaries[1:]:
        plt.axvline(x=boundary, color='r', linestyle='--', alpha=0.7)

    plt.xlabel('Episode')
    plt.ylabel('Reward')
    plt.title('优化后的训练进度 (任务切换)')
    plt.legend()
    plt.grid(True)
    plt.savefig('optimized_training_rewards.png')
    plt.close()

    # 绘制覆盖率曲线
    plt.figure(figsize=(12, 6))
    plt.plot(coverage_rates, alpha=0.7)
    plt.xlabel('Episode')
    plt.ylabel('Coverage Rate')
    plt.title('用户服务覆盖率变化')
    plt.grid(True)
    plt.savefig('coverage_rates.png')
    plt.close()

    # 关闭TensorBoard写入器
    writer.close()

    # 最终测试
    print("\n===== 最终评估 =====")
    test_episodes = 5  # 增加测试回合数

    # 加载最佳模型
    agent.q_network.load_state_dict(torch.load("../models/best_model.pth"))
    agent.q_network.eval()

    # 测试每个任务
    for task_id in range(len(task_boundaries)):
        print(f"\n测试任务 {task_id}:")

        # 设置相应的任务环境
        if task_id > 0:
            env.switch_task()

        task_coverage = []
        for test_ep in range(test_episodes):
            state = env.reset()
            total_reward = 0
            done = False
            step_count = 0

            while not done:
                action = agent.select_action(state, eval_mode=True)
                next_state, reward, done, info = env.step(action)
                state = next_state
                total_reward += reward
                step_count += 1

            visited = info["total_visited"]
            active = info["total_active"]
            coverage = visited / active if active > 0 else 0
            task_coverage.append(coverage)
            print(f"  测试回合 {test_ep + 1}, 奖励: {total_reward:.2f}, "
                  f"服务覆盖率: {coverage * 100:.1f}% ({visited}/{active}), 步数: {step_count}")

            # 保存测试轨迹
            if test_ep == 0:
                env.render(f"final_task{task_id}_ep{test_ep + 1}")

        # 输出任务平均覆盖率
        avg_coverage = np.mean(task_coverage) * 100
        print(f"任务 {task_id} 平均覆盖率: {avg_coverage:.1f}%")


if __name__ == "__main__":
    train()