import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import random
import matplotlib.pyplot as plt
from collections import deque, namedtuple
import copy
import math
import os
import time
from torch.utils.tensorboard import SummaryWriter

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

# 检查GPU可用性
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")

# 创建保存目录
os.makedirs("../trajectory_images", exist_ok=True)
os.makedirs("../models", exist_ok=True)
os.makedirs("../logs", exist_ok=True)

# 设置随机种子以保证可复现性
SEED = 42
torch.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)

# 经验回放的数据结构
Experience = namedtuple('Experience', ['state', 'action', 'reward', 'next_state', 'done'])


# 使用SumTree实现高效优先级采样
class SumTree:
    def __init__(self, capacity):
        self.capacity = capacity
        self.tree = np.zeros(2 * capacity - 1)
        self.data = np.zeros(capacity, dtype=object)
        self.write = 0
        self.n_entries = 0

    def _propagate(self, idx, change):
        parent = (idx - 1) // 2
        self.tree[parent] += change
        if parent != 0:
            self._propagate(parent, change)

    def _retrieve(self, idx, s):
        left = 2 * idx + 1
        right = left + 1

        if left >= len(self.tree):
            return idx

        if s <= self.tree[left]:
            return self._retrieve(left, s)
        else:
            return self._retrieve(right, s - self.tree[left])

    def total(self):
        return self.tree[0]

    def add(self, priority, data):
        idx = self.write + self.capacity - 1

        self.data[self.write] = data
        self.update(idx, priority)

        self.write += 1
        if self.write >= self.capacity:
            self.write = 0
        if self.n_entries < self.capacity:
            self.n_entries += 1

    def update(self, idx, priority):
        change = priority - self.tree[idx]
        self.tree[idx] = priority
        self._propagate(idx, change)

    def get(self, s):
        idx = self._retrieve(0, s)
        data_idx = idx - self.capacity + 1
        return idx, self.tree[idx], self.data[data_idx]


class PrioritizedReplayBuffer:
    def __init__(self, capacity, alpha=0.7, beta=0.5, beta_increment=0.0005):
        self.alpha = alpha  # 优先级强度参数，调高使优先级更强
        self.beta = beta  # 重要性采样权重，从0.5开始逐步增加到1
        self.beta_increment = beta_increment  # 每次采样后beta的增加量
        self.tree = SumTree(capacity)
        self.capacity = capacity
        self.eps = 1e-6  # 稍微增大，确保所有TD误差有非零概率

    def _get_priority(self, error):
        return (np.abs(error) + self.eps) ** self.alpha

    def push(self, state, action, reward, next_state, done):
        experience = Experience(state, action, reward, next_state, done)
        max_priority = np.max(self.tree.tree[-self.tree.capacity:]) if self.tree.n_entries > 0 else 1.0
        priority = max_priority
        self.tree.add(priority, experience)

    def sample(self, batch_size):
        batch = []
        idxs = []
        priorities = []
        segment = self.tree.total() / batch_size

        self.beta = np.min([1.0, self.beta + self.beta_increment])

        for i in range(batch_size):
            a = segment * i
            b = segment * (i + 1)
            s = random.uniform(a, b)
            idx, priority, data = self.tree.get(s)
            batch.append(data)
            idxs.append(idx)
            priorities.append(priority)

        states = torch.tensor([e.state for e in batch], dtype=torch.float).to(device)
        actions = torch.tensor([e.action for e in batch], dtype=torch.long).to(device)
        rewards = torch.tensor([e.reward for e in batch], dtype=torch.float).to(device)
        next_states = torch.tensor([e.next_state for e in batch], dtype=torch.float).to(device)
        dones = torch.tensor([e.done for e in batch], dtype=torch.float).to(device)

        # 计算重要性采样权重
        sampling_weights = np.array(priorities) / self.tree.total()
        is_weights = np.power(self.tree.n_entries * sampling_weights, -self.beta)
        is_weights /= is_weights.max()
        is_weights = torch.tensor(is_weights, dtype=torch.float).to(device)

        return states, actions, rewards, next_states, dones, idxs, is_weights

    def update_priorities(self, indices, errors):
        for idx, error in zip(indices, errors):
            priority = self._get_priority(error)
            self.tree.update(idx, priority)

    def __len__(self):
        return self.tree.n_entries


class NoisyLinear(nn.Module):
    """具有参数噪声的线性层，改善探索"""

    def __init__(self, in_features, out_features, std_init=0.4):
        super(NoisyLinear, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.std_init = std_init

        # 均值权重和偏置
        self.weight_mu = nn.Parameter(torch.FloatTensor(out_features, in_features))
        self.weight_sigma = nn.Parameter(torch.FloatTensor(out_features, in_features))
        self.bias_mu = nn.Parameter(torch.FloatTensor(out_features))
        self.bias_sigma = nn.Parameter(torch.FloatTensor(out_features))

        # 注册缓冲区，不会被当作优化参数
        self.register_buffer('weight_epsilon', torch.FloatTensor(out_features, in_features))
        self.register_buffer('bias_epsilon', torch.FloatTensor(out_features))

        # 初始化参数
        self.reset_parameters()
        self.reset_noise()

    def reset_parameters(self):
        mu_range = 1 / math.sqrt(self.in_features)
        self.weight_mu.data.uniform_(-mu_range, mu_range)
        self.weight_sigma.data.fill_(self.std_init / math.sqrt(self.in_features))
        self.bias_mu.data.uniform_(-mu_range, mu_range)
        self.bias_sigma.data.fill_(self.std_init / math.sqrt(self.out_features))

    def _scale_noise(self, size):
        x = torch.randn(size, device=self.weight_mu.device)
        return x.sign().mul_(x.abs().sqrt_())

    def reset_noise(self):
        epsilon_in = self._scale_noise(self.in_features)
        epsilon_out = self._scale_noise(self.out_features)

        self.weight_epsilon.copy_(torch.outer(epsilon_out, epsilon_in))
        self.bias_epsilon.copy_(epsilon_out)

    def forward(self, x):
        # 如果处于训练模式，使用噪声
        if self.training:
            weight = self.weight_mu + self.weight_sigma * self.weight_epsilon
            bias = self.bias_mu + self.bias_sigma * self.bias_epsilon
        else:
            weight = self.weight_mu
            bias = self.bias_mu

        return nn.functional.linear(x, weight, bias)


class DuelingQNetwork(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(DuelingQNetwork, self).__init__()
        # 共享特征层 - 更大且更深
        self.feature_layer = nn.Sequential(
            nn.Linear(state_dim, 512),
            nn.ReLU(),
            nn.Linear(512, 512),
            nn.ReLU(),
            nn.Linear(512, 256),
            nn.ReLU()
        )

        # 价值流 - 使用NoisyLinear提高探索效率
        self.value_stream = nn.Sequential(
            NoisyLinear(256, 128),
            nn.ReLU(),
            NoisyLinear(128, 1)
        )

        # 优势流 - 同样使用NoisyLinear
        self.advantage_stream = nn.Sequential(
            NoisyLinear(256, 128),
            nn.ReLU(),
            NoisyLinear(128, action_dim)
        )

        # 应用权重初始化
        self.apply(self._init_weights)

    def _init_weights(self, m):
        if isinstance(m, nn.Linear):
            torch.nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='relu')
            if m.bias is not None:
                torch.nn.init.constant_(m.bias, 0.0)

    def reset_noise(self):
        # 重置所有NoisyLinear层的噪声
        for module in self.modules():
            if isinstance(module, NoisyLinear):
                module.reset_noise()

    def forward(self, x):
        features = self.feature_layer(x)
        values = self.value_stream(features)
        advantages = self.advantage_stream(features)
        # 结合值流和优势流，获取Q值
        qvals = values + (advantages - advantages.mean(dim=1, keepdim=True))
        return qvals


class User:
    def __init__(self, x, y):
        self.x = x
        self.y = y
        self.active = False
        self.visited = False  # 记录用户是否已被服务
        self.visit_time = 0  # 记录用户被访问的时间


class DroneEnvironment:
    def __init__(self, config):
        self.width = 100
        self.height = 100
        self.service_radius = config["service_radius"]
        self.max_steps = 300
        self.steps = 0
        self.episode_num = 0

        # 创建固定位置的用户 - 分布更均匀
        self.all_users = []
        grid_size = 5  # 将环境分成5x5的区域
        for i in range(grid_size):
            for j in range(grid_size):
                if len(self.all_users) < 10:  # 最多10个用户
                    # 在每个区域内随机放置用户
                    x = random.uniform(i * self.width / grid_size, (i + 1) * self.width / grid_size)
                    y = random.uniform(j * self.height / grid_size, (j + 1) * self.height / grid_size)
                    self.all_users.append(User(x, y))

        # 激活用户
        self.active_users = random.sample(self.all_users, k=8)
        for user in self.all_users:
            user.active = user in self.active_users
            user.visited = False

        # 无人机状态
        self.drone_x = random.uniform(0, self.width)
        self.drone_y = random.uniform(0, self.height)
        self.speed = 5

        # 轨迹记录
        self.trajectory_x = [self.drone_x]
        self.trajectory_y = [self.drone_y]

        # 用于追踪最近访问的用户
        self.recently_visited = []

        # 动作空间 (8个方向 + 悬停)
        self.actions = ["N", "NE", "E", "SE", "S", "SW", "W", "NW", "hover"]

    def reset(self):
        self.episode_num += 1
        # 重置用户访问状态
        for user in self.all_users:
            user.visited = False
            user.visit_time = 0

        self.recently_visited = []

        # 智能初始化：自适应难度的课程学习
        if self.episode_num < 200:  # 前200轮更容易
            # 随机选择一个激活用户，将无人机初始化在其附近
            if self.active_users:
                random_user = random.choice(self.active_users)
                # 在用户周围随机位置初始化无人机，距离随着训练进行逐渐增加
                max_distance = 10 + (self.episode_num / 20.0) * 20  # 从10米开始，逐渐增加到30米
                angle = random.uniform(0, 2 * math.pi)
                distance = random.uniform(5, max_distance)
                self.drone_x = max(0, min(self.width, random_user.x + distance * math.cos(angle)))
                self.drone_y = max(0, min(self.height, random_user.y + distance * math.sin(angle)))
            else:
                self.drone_x = random.uniform(0, self.width)
                self.drone_y = random.uniform(0, self.height)
        else:
            # 后期随机初始化，但偏向环境中心区域
            if random.random() < 0.7:
                # 70%几率在中心区域初始化
                self.drone_x = random.uniform(20, self.width - 20)
                self.drone_y = random.uniform(20, self.height - 20)
            else:
                # 30%几率完全随机
                self.drone_x = random.uniform(0, self.width)
                self.drone_y = random.uniform(0, self.height)

        self.steps = 0

        # 清空轨迹
        self.trajectory_x = [self.drone_x]
        self.trajectory_y = [self.drone_y]

        return self.get_state()

    def switch_task(self):
        # 切换激活用户，模拟任务变化
        self.active_users = random.sample(self.all_users, k=8)
        for user in self.all_users:
            user.active = user in self.active_users
            user.visited = False
            user.visit_time = 0

    def get_state(self):
        # 构建更丰富的状态向量
        state = []

        # 1. 无人机坐标（归一化）
        state.extend([self.drone_x / self.width, self.drone_y / self.height])

        # 2. 每个用户的相关信息
        for user in self.all_users:
            # 用户激活状态
            state.append(1.0 if user.active else 0.0)

            # 用户是否已被访问
            state.append(1.0 if user.visited else 0.0)

            # 用户相对于无人机的相对位置（归一化）
            dx = (user.x - self.drone_x) / self.width
            dy = (user.y - self.drone_y) / self.height
            state.extend([dx, dy])

            # 到用户的距离（使用对数缩放）
            distance = math.sqrt((user.x - self.drone_x) ** 2 + (user.y - self.drone_y) ** 2)
            scaled_dist = math.log(1 + distance) / math.log(1 + math.sqrt(self.width ** 2 + self.height ** 2))
            state.append(scaled_dist)

            # 用户是否在服务范围内
            in_range = 1.0 if distance < self.service_radius else 0.0
            state.append(in_range)

        # 3. 无人机已移动步数（归一化）
        state.append(self.steps / self.max_steps)

        # 4. 已服务用户数量（归一化）
        visited_count = sum(1 for user in self.all_users if user.active and user.visited)
        total_active = sum(1 for user in self.all_users if user.active)
        state.append(visited_count / max(1, total_active))

        # 5. 轨迹效率指标 - 添加防止无人机在相同位置徘徊的特征
        if len(self.trajectory_x) > 10:
            recent_x = self.trajectory_x[-10:]
            recent_y = self.trajectory_y[-10:]
            x_var = np.var(recent_x) / self.width
            y_var = np.var(recent_y) / self.height
            # 轨迹多样性 - 高值表示移动更多样化
            state.append(min(1.0, (x_var + y_var) * 10))
        else:
            state.append(0.5)  # 默认中等多样性

        return np.array(state)

    def step(self, action_idx):
        action = self.actions[action_idx]
        prev_x, prev_y = self.drone_x, self.drone_y

        # 执行动作（8个方向 + 悬停）
        if action == "N":
            self.drone_y = min(self.height, self.drone_y + self.speed)
        elif action == "S":
            self.drone_y = max(0, self.drone_y - self.speed)
        elif action == "E":
            self.drone_x = min(self.width, self.drone_x + self.speed)
        elif action == "W":
            self.drone_x = max(0, self.drone_x - self.speed)
        elif action == "NE":
            self.drone_x = min(self.width, self.drone_x + self.speed * 0.7071)
            self.drone_y = min(self.height, self.drone_y + self.speed * 0.7071)
        elif action == "SE":
            self.drone_x = min(self.width, self.drone_x + self.speed * 0.7071)
            self.drone_y = max(0, self.drone_y - self.speed * 0.7071)
        elif action == "SW":
            self.drone_x = max(0, self.drone_x - self.speed * 0.7071)
            self.drone_y = max(0, self.drone_y - self.speed * 0.7071)
        elif action == "NW":
            self.drone_x = max(0, self.drone_x - self.speed * 0.7071)
            self.drone_y = min(self.height, self.drone_y + self.speed * 0.7071)
        # hover不需要动作

        # 计算移动成本
        movement_cost = math.sqrt((self.drone_x - prev_x) ** 2 + (self.drone_y - prev_y) ** 2)

        # 记录轨迹
        self.trajectory_x.append(self.drone_x)
        self.trajectory_y.append(self.drone_y)

        # 计算奖励
        reward = 0
        serviced_users = 0
        newly_serviced = 0  # 新服务的用户数

        # 检测循环路径惩罚
        path_inefficiency = self._check_path_inefficiency()

        # 每个步骤的基本时间惩罚（与移动距离相关）
        move_penalty = 0.005 * movement_cost  # 降低移动惩罚
        reward -= move_penalty

        # 惩罚无人机在同一区域徘徊
        reward -= path_inefficiency * 0.5

        # 计算服务用户的奖励
        curr_visited = []
        for user in self.all_users:
            if user.active:
                distance = math.sqrt((self.drone_x - user.x) ** 2 + (self.drone_y - user.y) ** 2)

                if distance < self.service_radius:
                    serviced_users += 1
                    curr_visited.append(user)

                    # 首次访问用户给额外奖励
                    if not user.visited:
                        base_reward = 25  # 增加首次服务奖励

                        # 奖励较早访问用户
                        time_factor = 1.0 - (self.steps / self.max_steps) * 0.5
                        visit_reward = base_reward * time_factor

                        reward += visit_reward
                        newly_serviced += 1
                        user.visited = True
                        user.visit_time = self.steps
                        self.recently_visited.append(user)
                    else:
                        # 已访问过的用户给较小奖励，防止重复访问
                        revisit_penalty = 0.2 * (self.steps - user.visit_time) / self.max_steps
                        reward += 0.5 - revisit_penalty  # 低奖励，随时间增加而降低

        # 添加更高效的路径规划奖励
        if newly_serviced > 0:
            # 计算最近一次访问到当前访问的距离
            if len(self.recently_visited) >= 2:
                last_user = self.recently_visited[-2]
                current_user = self.recently_visited[-1]
                optimal_distance = math.sqrt((last_user.x - current_user.x) ** 2 +
                                             (last_user.y - current_user.y) ** 2)
                actual_distance = self.steps - last_user.visit_time

                # 如果实际走过的路小于理论最优路径的1.5倍，给予额外奖励
                if optimal_distance > 0 and actual_distance <= optimal_distance * 1.5:
                    efficiency_bonus = 5 * (optimal_distance / max(1, actual_distance))
                    reward += efficiency_bonus

        # 引入"最近邻"启发式奖励
        min_distance_to_unvisited = float('inf')
        closest_unvisited = None

        for user in self.all_users:
            if user.active and not user.visited:
                distance = math.sqrt((self.drone_x - user.x) ** 2 + (self.drone_y - user.y) ** 2)
                if distance < min_distance_to_unvisited:
                    min_distance_to_unvisited = distance
                    closest_unvisited = user

        # 添加接近未访问用户的额外奖励
        if closest_unvisited is not None:
            # 使用指数衰减的接近奖励
            proximity_reward = 15 * math.exp(-min_distance_to_unvisited / 20)
            reward += proximity_reward

            # 额外奖励：朝着最近未访问用户方向移动
            if len(self.trajectory_x) >= 2:
                last_x, last_y = self.trajectory_x[-2], self.trajectory_y[-2]
                last_dist = math.sqrt((last_x - closest_unvisited.x) ** 2 +
                                      (last_y - closest_unvisited.y) ** 2)
                curr_dist = math.sqrt((self.drone_x - closest_unvisited.x) ** 2 +
                                      (self.drone_y - closest_unvisited.y) ** 2)

                # 如果距离减小，给予额外奖励
                if curr_dist < last_dist:
                    reward += 1.0 * (last_dist - curr_dist) / self.service_radius

        # 全部服务完成奖励
        active_count = sum(1 for user in self.all_users if user.active)
        visited_count = sum(1 for user in self.all_users if user.active and user.visited)

        # 动态任务完成奖励
        completion_percentage = visited_count / active_count if active_count > 0 else 0
        reward += completion_percentage * 10  # 随访问比例线性增加的奖励

        if visited_count == active_count and active_count > 0:
            # 任务完成奖励
            reward += 150  # 增加任务完成奖励

            # 额外奖励：节省时间
            time_saved = (self.max_steps - self.steps) / self.max_steps
            reward += 75 * time_saved  # 增加时间效率奖励

            # 路径效率奖励
            path_len = len(self.trajectory_x)
            if path_len > 0:
                # 计算实际路径长度
                path_length = 0
                for i in range(1, path_len):
                    dx = self.trajectory_x[i] - self.trajectory_x[i - 1]
                    dy = self.trajectory_y[i] - self.trajectory_y[i - 1]
                    path_length += math.sqrt(dx * dx + dy * dy)

                # 估计理论最短路径（TSP近似）
                points = [(user.x, user.y) for user in self.all_users if user.active]
                if len(points) >= 2:
                    # 使用贪婪方法计算近似TSP路径长度
                    approx_optimal = self._approximate_tsp_length(points)
                    if approx_optimal > 0:
                        path_efficiency = approx_optimal / max(1, path_length)
                        reward += 50 * min(1.5, path_efficiency)  # 限制最大值

        # 更新步数
        self.steps += 1
        done = self.steps >= self.max_steps

        # 如果所有激活用户都已被服务，可以提前结束
        if visited_count == active_count and active_count > 0:
            done = True

        info = {
            "serviced_users": serviced_users,
            "newly_serviced": newly_serviced,
            "total_visited": visited_count,
            "total_active": active_count,
            "path_inefficiency": path_inefficiency
        }

        return self.get_state(), reward, done, info

    def _check_path_inefficiency(self):
        """检测无人机是否在同一区域徘徊"""
        if len(self.trajectory_x) < 10:
            return 0

        # 取最近10步
        recent_x = self.trajectory_x[-10:]
        recent_y = self.trajectory_y[-10:]

        # 计算覆盖面积（用方差近似）
        x_var = np.var(recent_x)
        y_var = np.var(recent_y)

        # 低方差表示无人机一直在一个小区域活动
        area_coverage = x_var + y_var

        # 返回一个徘徊程度指标(0-1)，值越低表示徘徊越严重
        return max(0, 1 - min(1, area_coverage / 100))

    def _approximate_tsp_length(self, points):
        """使用贪婪方法近似计算TSP路径长度"""
        if len(points) <= 1:
            return 0

        # 从无人机初始位置开始
        current = (self.trajectory_x[0], self.trajectory_y[0])
        unvisited = points.copy()
        total_distance = 0

        while unvisited:
            # 找到最近的点
            min_dist = float('inf')
            nearest = None

            for point in unvisited:
                dist = math.sqrt((current[0] - point[0]) ** 2 + (current[1] - point[1]) ** 2)
                if dist < min_dist:
                    min_dist = dist
                    nearest = point

            # 更新当前位置和距离
            total_distance += min_dist
            current = nearest
            unvisited.remove(nearest)

        return total_distance

    def render(self, episode, step=None):
        plt.figure(figsize=(10, 10))
        plt.xlim(0, self.width)
        plt.ylim(0, self.height)

        # 绘制无人机轨迹
        plt.plot(self.trajectory_x, self.trajectory_y, 'ro-', alpha=0.6, label='Drone Path')

        # 绘制无人机当前位置
        plt.scatter(self.drone_x, self.drone_y, c='red', s=100, marker='*', label='Drone')

        # 绘制服务半径
        circle = plt.Circle((self.drone_x, self.drone_y), self.service_radius,
                            color='blue', fill=False, alpha=0.3, linestyle='--')
        plt.gca().add_patch(circle)

        # 绘制用户
        for user in self.all_users:
            if user.active and user.visited:
                color = 'limegreen'  # 已访问的激活用户
                marker = 'o'
            elif user.active and not user.visited:
                color = 'green'  # 未访问的激活用户
                marker = 'o'
            else:
                color = 'gray'  # 未激活用户
                marker = 'x'

            plt.scatter(user.x, user.y, c=color, s=80, marker=marker)

            # 如果用户在服务范围内，绘制服务圆圈
            distance = math.sqrt((self.drone_x - user.x) ** 2 + (self.drone_y - user.y) ** 2)
            if distance < self.service_radius and user.active:
                circle = plt.Circle((user.x, user.y), 2, color='green', alpha=0.5)
                plt.gca().add_patch(circle)

        # 添加图例
        visited_handle = plt.scatter([], [], c='limegreen', marker='o', label='Visited User')
        active_handle = plt.scatter([], [], c='green', marker='o', label='Active User')
        inactive_handle = plt.scatter([], [], c='gray', marker='x', label='Inactive User')
        plt.legend(handles=[visited_handle, active_handle, inactive_handle])

        # 添加文本信息
        visited_count = sum(1 for user in self.all_users if user.active and user.visited)
        active_count = sum(1 for user in self.all_users if user.active)
        plt.title(f'Episode {episode}: Visited {visited_count}/{active_count} users (Step {self.steps})')
        plt.xlabel('X Position (m)')
        plt.ylabel('Y Position (m)')
        plt.grid(True)

        # 使用唯一文件名保存图像
        filename = f'../trajectory_images/episode_{episode}_step_{self.steps}.png'
        plt.savefig(filename)
        plt.close()


class ContinualRLAgent:
    def __init__(self, state_dim, action_dim, config):
        self.state_dim = state_dim
        self.action_dim = action_dim
        self.gamma = config["gamma"]
        self.lr = config["learning_rate"]
        self.epsilon = 1.0
        self.epsilon_decay = config["epsilon_decay"]
        self.epsilon_min = 0.01
        self.batch_size = config["batch_size"]
        self.ewc_lambda = config["ewc_lambda"]
        self.target_update_freq = config["target_update_freq"]
        self.alpha = config["alpha"]  # 用于软目标更新
        self.grad_clip = config["grad_clip"]
        self.n_step = config["n_step"]  # N步返回

        # 神经网络 - 使用增强的Dueling DQN架构
        self.q_network = DuelingQNetwork(state_dim, action_dim).to(device)
        self.target_network = DuelingQNetwork(state_dim, action_dim).to(device)
        self.target_network.load_state_dict(self.q_network.state_dict())

        # 优化器 - 使用AdamW减少过拟合
        self.optimizer = optim.AdamW(
            self.q_network.parameters(),
            lr=self.lr,
            weight_decay=1e-4
        )

        # 学习率调度 - 使用CosineAnnealingLR实现更平滑的学习率变化
        self.scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(
            self.optimizer,
            T_0=200,  # 重启周期
            T_mult=2,  # 每次重启后周期长度倍数
            eta_min=self.lr / 10  # 最小学习率
        )

        # 优先级经验回放
        self.memory = PrioritizedReplayBuffer(
            config["memory_capacity"],
            alpha=config["per_alpha"],
            beta=config["per_beta"],
            beta_increment=config["per_beta_increment"]
        )

        # N步经验回放缓冲区
        self.n_step_buffer = deque(maxlen=self.n_step)

        # 持续学习相关变量
        self.previous_tasks_fisher = {}  # 存储每个任务的Fisher信息矩阵
        self.previous_tasks_params = {}  # 存储每个任务的最优参数
        self.current_task_id = 0
        self.task_seen = 0

        # 训练统计
        self.training_steps = 0
        self.update_count = 0
        self.losses = []

        # 激活重播缓冲区 (经验重放)
        self.replay_buffers = []

    def select_action(self, state, eval_mode=False):
        # epsilon-greedy策略 (但只在训练时使用，NoisyNet在评估时会自动关闭噪声)
        if not eval_mode and random.random() < self.epsilon:
            return random.randrange(self.action_dim)

        state_tensor = torch.tensor(state, dtype=torch.float).unsqueeze(0).to(device)
        with torch.no_grad():
            q_values = self.q_network(state_tensor)
        return q_values.argmax().item()

    def push_experience(self, state, action, reward, next_state, done):
        # 将经验添加到N步缓冲区
        self.n_step_buffer.append((state, action, reward, next_state, done))

        # 如果缓冲区已满或者遇到终止状态，处理N步返回
        if len(self.n_step_buffer) < self.n_step and not done:
            return

        # 计算N步返回
        state, action, cumulative_reward, next_state, done = self.get_n_step_info()

        # 存储到优先级经验回放缓冲区
        self.memory.push(state, action, cumulative_reward, next_state, done)

        # 如果是终止状态，清空剩余的经验
        if done:
            while len(self.n_step_buffer) > 0:
        # 继续处理剩余的经验
                state, action, cumulative_reward, next_state, done = self.get_n_step_info()
                self.memory.push(state, action, cumulative_reward, next_state, done)
                self.n_step_buffer.popleft()


    def get_n_step_info(self):
        """计算N步返回值"""
        # 获取缓冲区中的第一个状态和动作
        first_state, first_action = self.n_step_buffer[0][:2]

        # 如果缓冲区已满或遇到终止状态，需要计算N步回报
        reward, next_state, done = self._get_n_step_return_info()

        return first_state, first_action, reward, next_state, done


    def _get_n_step_return_info(self):
        """辅助方法：计算N步回报、下一状态和终止标志"""
        # 初始为0步回报
        n_reward = 0
        # 最后状态是最远的那个
        n_next_state = self.n_step_buffer[-1][3]
        n_done = self.n_step_buffer[-1][4]

        # 计算折扣累计回报
        for i in range(len(self.n_step_buffer)):
            r_i = self.n_step_buffer[i][2]  # 获取第i步的奖励
            n_reward += (self.gamma ** i) * r_i

            # 如果遇到终止状态，后续奖励不再计算
            if self.n_step_buffer[i][4]:
                n_next_state = self.n_step_buffer[i][3]
                n_done = True
                break

        return n_reward, n_next_state, n_done


    def update_model(self):
        # 如果经验回放缓冲区中的样本不足，则不更新
        if len(self.memory) < self.batch_size:
            return

        # 从优先级经验回放缓冲区中采样
        states, actions, rewards, next_states, dones, indices, is_weights = self.memory.sample(self.batch_size)

        # 计算当前Q值
        q_values = self.q_network(states)
        q_values = q_values.gather(1, actions.unsqueeze(1)).squeeze(1)

        # 使用目标网络计算下一状态的最大Q值
        with torch.no_grad():
            # 使用双重Q学习: 从当前网络中选择动作，从目标网络中获取值
            next_q_values = self.q_network(next_states)
            next_actions = next_q_values.argmax(dim=1, keepdim=True)
            next_q_values_target = self.target_network(next_states)
            max_next_q_values = next_q_values_target.gather(1, next_actions).squeeze(1)

            # 计算目标Q值
            target_q_values = rewards + (1 - dones) * (self.gamma ** self.n_step) * max_next_q_values

        # 计算TD误差
        td_errors = torch.abs(q_values - target_q_values).detach().cpu().numpy()

        # 更新优先级
        self.memory.update_priorities(indices, td_errors)

        # 计算加权损失 - 使用Huber损失提高稳定性
        loss = nn.functional.huber_loss(q_values, target_q_values, reduction='none')
        loss = (loss * is_weights).mean()  # 应用重要性采样权重

        # 添加EWC正则化损失（持续学习）
        ewc_loss = 0
        if len(self.previous_tasks_fisher) > 0:
            for task_id in self.previous_tasks_fisher:
                for name, param in self.q_network.named_parameters():
                    if name in self.previous_tasks_fisher[task_id] and name in self.previous_tasks_params[task_id]:
                        fisher = self.previous_tasks_fisher[task_id][name]
                        old_param = self.previous_tasks_params[task_id][name]
                        ewc_loss += (fisher * (param - old_param).pow(2)).sum()

            loss += self.ewc_lambda * ewc_loss

        # 反向传播
        self.optimizer.zero_grad()
        loss.backward()

        # 梯度裁剪，避免梯度爆炸
        torch.nn.utils.clip_grad_norm_(self.q_network.parameters(), self.grad_clip)

        self.optimizer.step()
        self.scheduler.step()  # 更新学习率

        # 记录损失
        self.losses.append(loss.item())

        # 软目标网络更新
        self.soft_update_target_network()

        # 更新训练计数器
        self.training_steps += 1

        # 重置Noisy网络的噪声
        self.q_network.reset_noise()

        return loss.item()


    def soft_update_target_network(self):
        """软更新目标网络参数"""
        for target_param, param in zip(self.target_network.parameters(), self.q_network.parameters()):
            target_param.data.copy_(target_param.data * (1.0 - self.alpha) + param.data * self.alpha)


    def compute_fisher_information(self, env, num_samples=200):
        """计算Fisher信息矩阵，用于EWC"""
        fisher_dict = {}
        param_dict = {}

        # 为每个参数初始化Fisher信息矩阵
        for name, param in self.q_network.named_parameters():
            fisher_dict[name] = torch.zeros_like(param.data)
            param_dict[name] = param.data.clone()

        # 收集环境样本
        self.q_network.train()  # 启用批归一化等训练模式组件

        # 采集多个样本
        for _ in range(num_samples):
            state = env.reset()
            done = False
            while not done:
                # 获取动作
                action = self.select_action(state)

                # 执行动作
                next_state, _, done, _ = env.step(action)

                # 计算Q值
                state_tensor = torch.tensor(state, dtype=torch.float).unsqueeze(0).to(device)
                q_values = self.q_network(state_tensor)
                action_tensor = torch.tensor([action], device=device)
                q_action = q_values.gather(1, action_tensor.unsqueeze(1)).squeeze(1)

                # 反向传播以获取梯度
                self.optimizer.zero_grad()
                q_action.backward()

                # 累积Fisher信息
                for name, param in self.q_network.named_parameters():
                    if param.grad is not None:
                        fisher_dict[name] += param.grad.data.pow(2) / num_samples

                state = next_state

                if done:
                    break

        # 保存当前任务的Fisher信息和参数
        self.previous_tasks_fisher[self.current_task_id] = fisher_dict
        self.previous_tasks_params[self.current_task_id] = param_dict

        # 增加任务ID计数
        self.current_task_id += 1
        self.task_seen += 1


    def decay_epsilon(self):
        """衰减探索率"""
        self.epsilon = max(self.epsilon_min, self.epsilon * self.epsilon_decay)


    def save(self, path):
        """保存模型"""
        torch.save({
            'q_network': self.q_network.state_dict(),
            'target_network': self.target_network.state_dict(),
            'optimizer': self.optimizer.state_dict(),
            'scheduler': self.scheduler.state_dict(),
            'previous_tasks_fisher': self.previous_tasks_fisher,
            'previous_tasks_params': self.previous_tasks_params,
            'current_task_id': self.current_task_id,
            'task_seen': self.task_seen,
            'epsilon': self.epsilon,
            'training_steps': self.training_steps
        }, path)
        print(f"Model saved to {path}")


    def load(self, path):
        """加载模型"""
        checkpoint = torch.load(path)
        self.q_network.load_state_dict(checkpoint['q_network'])
        self.target_network.load_state_dict(checkpoint['target_network'])
        self.optimizer.load_state_dict(checkpoint['optimizer'])
        self.scheduler.load_state_dict(checkpoint['scheduler'])
        self.previous_tasks_fisher = checkpoint['previous_tasks_fisher']
        self.previous_tasks_params = checkpoint['previous_tasks_params']
        self.current_task_id = checkpoint['current_task_id']
        self.task_seen = checkpoint['task_seen']
        self.epsilon = checkpoint['epsilon']
        self.training_steps = checkpoint['training_steps']
        print(f"Model loaded from {path}")


    def add_experience_to_task_buffer(self, task_id, state, action, reward, next_state, done):
        """添加经验到特定任务的缓冲区"""
        # 确保有足够的缓冲区
        while len(self.replay_buffers) <= task_id:
            self.replay_buffers.append(deque(maxlen=10000))  # 每个任务保留最近10000个经验

        self.replay_buffers[task_id].append((state, action, reward, next_state, done))


def evaluate_agent(agent, env, num_episodes=5):
    """评估智能体性能"""
    total_rewards = []
    completion_rates = []

    for episode in range(num_episodes):
        state = env.reset()
        episode_reward = 0
        done = False

        while not done:
            action = agent.select_action(state, eval_mode=True)
            next_state, reward, done, info = env.step(action)
            episode_reward += reward
            state = next_state

        # 计算完成率
        completion_rate = info["total_visited"] / max(1, info["total_active"])

        total_rewards.append(episode_reward)
        completion_rates.append(completion_rate)

    return np.mean(total_rewards), np.mean(completion_rates)


def train(config):
    """训练智能体"""
    # 设置随机种子
    set_seed = config["seed"]
    torch.manual_seed(set_seed)
    np.random.seed(set_seed)
    random.seed(set_seed)

    # 创建环境
    env = DroneEnvironment(config)

    # 获取状态维度和动作维度
    state = env.reset()
    state_dim = len(state)
    action_dim = len(env.actions)

    # 创建智能体
    agent = ContinualRLAgent(state_dim, action_dim, config)

    # TensorBoard日志记录
    writer = SummaryWriter(f'../logs/drone_rl_{time.strftime("%Y%m%d-%H%M%S")}')

    # 训练参数
    num_episodes = config["num_episodes"]
    task_switch_frequency = config["task_switch_frequency"]

    # 训练统计
    rewards_history = []
    completion_rates = []
    steps_per_episode = []
    losses = []
    eval_rewards = []
    eval_completion_rates = []

    # 训练循环
    for episode in range(1, num_episodes + 1):
        state = env.reset()
        episode_reward = 0
        done = False
        step = 0
        episode_losses = []

        # 每隔固定周期切换任务
        if episode % task_switch_frequency == 0 and episode > 0:
            print(f"\nSwitching task at episode {episode}")
            # 计算当前任务的Fisher信息
            agent.compute_fisher_information(env)
            # 切换到新任务
            env.switch_task()

        while not done:
            # 选择动作
            action = agent.select_action(state)

            # 执行动作
            next_state, reward, done, info = env.step(action)

            # 存储经验
            agent.push_experience(state, action, reward, next_state, done)

            # 将经验也存储到当前任务的缓冲区（用于经验重放）
            agent.add_experience_to_task_buffer(agent.current_task_id, state, action, reward, next_state, done)

            # 更新模型
            loss = agent.update_model()
            if loss is not None:
                episode_losses.append(loss)

            # 更新状态和奖励
            state = next_state
            episode_reward += reward
            step += 1

            # 定期硬更新目标网络
            if agent.training_steps % agent.target_update_freq == 0:
                agent.target_network.load_state_dict(agent.q_network.state_dict())

        # 记录每一轮的统计数据
        rewards_history.append(episode_reward)
        completion_rate = info["total_visited"] / max(1, info["total_active"])
        completion_rates.append(completion_rate)
        steps_per_episode.append(step)

        if episode_losses:
            avg_loss = np.mean(episode_losses)
            losses.append(avg_loss)
            writer.add_scalar('Loss/train', avg_loss, episode)

        # 衰减探索率
        agent.decay_epsilon()

        # 绘制轨迹
        if episode % 10 == 0 or episode == 1:
            env.render(episode)

        # 评估智能体性能
        if episode % 50 == 0:
            eval_reward, eval_comp_rate = evaluate_agent(agent, env)
            eval_rewards.append(eval_reward)
            eval_completion_rates.append(eval_comp_rate)

            # 保存到TensorBoard
            writer.add_scalar('Reward/eval', eval_reward, episode)
            writer.add_scalar('CompletionRate/eval', eval_comp_rate, episode)

            print(
                f"\nEvaluation at episode {episode}: Reward = {eval_reward:.2f}, Completion Rate = {eval_comp_rate:.2f}")

        # 保存模型
        if episode % 500 == 0:
            agent.save(f"../models/drone_agent_ep{episode}.pt")

        # 记录到TensorBoard
        writer.add_scalar('Reward/train', episode_reward, episode)
        writer.add_scalar('CompletionRate/train', completion_rate, episode)
        writer.add_scalar('Steps/episode', step, episode)
        writer.add_scalar('Epsilon', agent.epsilon, episode)

        # 打印训练进度
        if episode % 10 == 0:
            print(f"Episode {episode}/{num_episodes} | Reward: {episode_reward:.2f} | "
                  f"Completion: {completion_rate:.2f} | Steps: {step} | Epsilon: {agent.epsilon:.3f}")

    # 最终评估
    final_reward, final_comp_rate = evaluate_agent(agent, env, num_episodes=10)
    print(f"\nFinal Evaluation: Reward = {final_reward:.2f}, Completion Rate = {final_comp_rate:.2f}")

    # 保存最终模型
    agent.save("../models/drone_agent_final.pt")

    # 绘制并保存训练曲线
    plt.figure(figsize=(15, 10))

    plt.subplot(2, 2, 1)
    plt.plot(rewards_history)
    plt.title('Episode Rewards')
    plt.xlabel('Episode')
    plt.ylabel('Reward')

    plt.subplot(2, 2, 2)
    plt.plot(completion_rates)
    plt.title('Completion Rates')
    plt.xlabel('Episode')
    plt.ylabel('Completion Rate')

    plt.subplot(2, 2, 3)
    plt.plot(steps_per_episode)
    plt.title('Steps per Episode')
    plt.xlabel('Episode')
    plt.ylabel('Steps')

    plt.subplot(2, 2, 4)
    plt.plot(losses)
    plt.title('Training Loss')
    plt.xlabel('Episode')
    plt.ylabel('Loss')

    plt.tight_layout()
    plt.savefig('../trajectory_images/training_curves.png')

    # 关闭TensorBoard写入器
    writer.close()

    return agent


if __name__ == "__main__":
    # 配置参数
    config = {
        "seed": 42,
        "service_radius": 10,
        "gamma": 0.99,
        "learning_rate": 1e-4,
        "epsilon_decay": 0.995,
        "batch_size": 128,
        "memory_capacity": 100000,
        "target_update_freq": 500,
        "ewc_lambda": 100,  # EWC正则化强度
        "num_episodes": 2000,
        "task_switch_frequency": 500,  # 每500轮切换一次任务
        "alpha": 0.01,  # 软目标网络更新率
        "grad_clip": 10.0,  # 梯度裁剪
        "per_alpha": 0.7,  # 优先级经验回放参数
        "per_beta": 0.5,  # 重要性采样初始beta
        "per_beta_increment": 0.0005,  # beta增长率
        "n_step": 3,  # N步返回
    }

    # 开始训练
    print("Starting training...")
    agent = train(config)
    print("Training completed!")


