import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import random
import matplotlib.pyplot as plt
from collections import deque, namedtuple
import copy
import math
import os
import time
from torch.utils.tensorboard import SummaryWriter
from torch.nn import LayerNorm, MultiheadAttention

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

# 检查GPU可用性
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")

# 创建保存目录
os.makedirs("../trajectory_images", exist_ok=True)
os.makedirs("../models", exist_ok=True)
os.makedirs("../logs", exist_ok=True)

# 设置随机种子以保证可复现性
torch.manual_seed(42)
np.random.seed(42)
random.seed(42)

# 经验回放的数据结构
Experience = namedtuple('Experience', ['state', 'action', 'reward', 'next_state', 'done'])


# 使用SumTree实现高效优先级采样
class SumTree:
    def __init__(self, capacity):
        self.capacity = capacity
        self.tree = np.zeros(2 * capacity - 1)
        self.data = np.zeros(capacity, dtype=object)
        self.write = 0
        self.n_entries = 0

    def _propagate(self, idx, change):
        parent = (idx - 1) // 2
        self.tree[parent] += change
        if parent != 0:
            self._propagate(parent, change)

    def _retrieve(self, idx, s):
        left = 2 * idx + 1
        right = left + 1

        if left >= len(self.tree):
            return idx

        if s <= self.tree[left]:
            return self._retrieve(left, s)
        else:
            return self._retrieve(right, s - self.tree[left])

    def total(self):
        return self.tree[0]

    def add(self, priority, data):
        idx = self.write + self.capacity - 1

        self.data[self.write] = data
        self.update(idx, priority)

        self.write += 1
        if self.write >= self.capacity:
            self.write = 0
        if self.n_entries < self.capacity:
            self.n_entries += 1

    def update(self, idx, priority):
        change = priority - self.tree[idx]
        self.tree[idx] = priority
        self._propagate(idx, change)

    def get(self, s):
        idx = self._retrieve(0, s)
        data_idx = idx - self.capacity + 1
        return idx, self.tree[idx], self.data[data_idx]


class HybridReplayBuffer:
    """混合经验回放：结合优先级和均匀采样"""

    def __init__(self, capacity, alpha=0.6, beta=0.4, beta_increment=0.001, uniform_ratio=0.2):
        self.alpha = alpha
        self.beta = beta
        self.beta_increment = beta_increment
        self.tree = SumTree(capacity)
        self.uniform_buffer = deque(maxlen=capacity)
        self.capacity = capacity
        self.eps = 1e-5
        self.uniform_ratio = uniform_ratio

    def _get_priority(self, error):
        return (np.abs(error) + self.eps) ** self.alpha

    def push(self, state, action, reward, next_state, done):
        experience = Experience(state, action, reward, next_state, done)
        max_priority = np.max(self.tree.tree[-self.tree.capacity:]) if self.tree.n_entries > 0 else 1.0
        priority = max_priority
        self.tree.add(priority, experience)
        self.uniform_buffer.append(experience)

    def sample(self, batch_size):
        # 确定优先级采样和均匀采样的比例
        priority_size = int(batch_size * (1 - self.uniform_ratio))
        uniform_size = batch_size - priority_size

        batch = []
        idxs = []
        priorities = []

        # 优先级采样部分
        if priority_size > 0 and self.tree.n_entries > 0:
            segment = self.tree.total() / priority_size
            for i in range(priority_size):
                a = segment * i
                b = segment * (i + 1)
                s = random.uniform(a, b)
                idx, priority, data = self.tree.get(s)
                batch.append(data)
                idxs.append(idx)
                priorities.append(priority)

        # 均匀采样部分
        if uniform_size > 0 and len(self.uniform_buffer) > 0:
            uniform_samples = random.sample(self.uniform_buffer, min(uniform_size, len(self.uniform_buffer)))
            for data in uniform_samples:
                batch.append(data)
                # 对于均匀采样的样本，使用平均优先级
                avg_priority = self.tree.total() / max(1, self.tree.n_entries)
                priorities.append(avg_priority)
                # 为均匀样本添加虚拟索引
                idxs.append(-1)

        # 如果缓冲区样本不足，填充随机样本
        while len(batch) < batch_size:
            if self.tree.n_entries > 0:
                idx, priority, data = self.tree.get(random.uniform(0, self.tree.total()))
                batch.append(data)
                idxs.append(idx)
                priorities.append(priority)
            elif len(self.uniform_buffer) > 0:
                data = random.choice(self.uniform_buffer)
                batch.append(data)
                priorities.append(1.0)
                idxs.append(-1)

        states = torch.tensor([e.state for e in batch], dtype=torch.float).to(device)
        actions = torch.tensor([e.action for e in batch], dtype=torch.long).to(device)
        rewards = torch.tensor([e.reward for e in batch], dtype=torch.float).to(device)
        next_states = torch.tensor([e.next_state for e in batch], dtype=torch.float).to(device)
        dones = torch.tensor([e.done for e in batch], dtype=torch.float).to(device)

        # 计算重要性采样权重
        self.beta = np.min([1.0, self.beta + self.beta_increment])
        sampling_weights = np.array(priorities) / self.tree.total()
        is_weights = np.power(self.tree.n_entries * sampling_weights, -self.beta)
        is_weights /= is_weights.max()
        is_weights = torch.tensor(is_weights, dtype=torch.float).to(device)

        return states, actions, rewards, next_states, dones, idxs, is_weights

    def update_priorities(self, indices, errors):
        for idx, error in zip(indices, errors):
            if idx >= 0:  # 只更新优先级采样的样本
                priority = self._get_priority(error)
                self.tree.update(idx, priority)

    def __len__(self):
        return max(self.tree.n_entries, len(self.uniform_buffer))


class EnhancedAttention(nn.Module):
    """增强的注意力机制"""

    def __init__(self, embed_dim, num_heads):
        super(EnhancedAttention, self).__init__()
        self.multihead_attn = MultiheadAttention(embed_dim, num_heads, batch_first=True)
        self.layer_norm = LayerNorm(embed_dim)
        self.dropout = nn.Dropout(0.1)

    def forward(self, x):
        # x: [batch_size, seq_len, embed_dim]
        # 添加位置编码
        seq_len, embed_dim = x.size(1), x.size(2)
        position = torch.arange(0, seq_len, dtype=torch.float).unsqueeze(1).to(x.device)
        div_term = torch.exp(torch.arange(0, embed_dim, 2).float() * (-math.log(10000.0) / embed_dim).to(x.device)
        pos_enc = torch.zeros(1, seq_len, embed_dim).to(x.device)
        pos_enc[0, :, 0::2] = torch.sin(position * div_term)
        pos_enc[0, :, 1::2] = torch.cos(position * div_term)
        x = x + pos_enc

        # 多头注意力
        attn_output, _ = self.multihead_attn(x, x, x)
        attn_output = self.dropout(attn_output)
        out = self.layer_norm(x + attn_output)
        return out


class DuelingQNetwork(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(DuelingQNetwork, self).__init__()
        # 增强网络架构
        self.feature_layer = nn.Sequential(
            nn.Linear(state_dim, 512),
            LayerNorm(512),
            nn.GELU(),
            nn.Linear(512, 512),
            LayerNorm(512),
            nn.GELU()
        )

        # 增强注意力机制
        self.attention = nn.Sequential(
            nn.Linear(512, 256),
            nn.GELU(),
            EnhancedAttention(256, 4),  # 使用增强的注意力模块
            nn.Linear(256, 1),
            nn.Softmax(dim=1)
        )

        # 价值流
        self.value_stream = nn.Sequential(
            nn.Linear(256, 128),
            nn.GELU(),
            nn.Linear(128, 1)
        )

        # 优势流
        self.advantage_stream = nn.Sequential(
            nn.Linear(256, 128),
            nn.GELU(),
            nn.Linear(128, action_dim)
        )

        # 初始化权重
        self.apply(self._init_weights)

    def _init_weights(self, module):
        if isinstance(module, nn.Linear):
            nn.init.kaiming_normal_(module.weight, nonlinearity='relu')
            if module.bias is not None:
                nn.init.constant_(module.bias, 0.1)

    def forward(self, x):
        features = self.feature_layer(x)
        features = features.unsqueeze(1)  # 添加序列维度 [batch, 1, 512]

        # 应用注意力机制
        attn_features = self.attention(features)  # [batch, 1, 256]
        attn_features = attn_features.squeeze(1)  # [batch, 256]

        values = self.value_stream(attn_features)
        advantages = self.advantage_stream(attn_features)

        # 结合值流和优势流
        qvals = values + (advantages - advantages.mean(dim=1, keepdim=True))
        return qvals


class User:
    def __init__(self, x, y):
        self.x = x
        self.y = y
        self.active = False
        self.visited = False  # 记录用户是否已被服务
        self.last_visit_step = -1000  # 记录上次被服务的时间步


class DroneEnvironment:
    def __init__(self, config):
        self.width = 100
        self.height = 100
        self.service_radius = config["service_radius"]
        self.max_steps = 300
        self.steps = 0
        self.episode_num = 0

        # 创建固定位置的用户
        self.all_users = []
        for _ in range(10):
            x = random.uniform(0, self.width)
            y = random.uniform(0, self.height)
            self.all_users.append(User(x, y))

        # 激活用户
        self.active_users = random.sample(self.all_users, k=8)
        for user in self.all_users:
            user.active = user in self.active_users
            user.visited = False

        # 无人机状态
        self.drone_x = random.uniform(0, self.width)
        self.drone_y = random.uniform(0, self.height)
        self.speed = 5

        # 轨迹记录
        self.trajectory_x = [self.drone_x]
        self.trajectory_y = [self.drone_y]

        # 动作空间 (8个方向 + 悬停)
        self.actions = ["N", "NE", "E", "SE", "S", "SW", "W", "NW", "hover"]

    def reset(self):
        self.episode_num += 1
        # 重置用户访问状态
        for user in self.all_users:
            user.visited = False
            user.last_visit_step = -1000

        # 智能初始化：将无人机放在激活用户的加权中心
        if self.episode_num < 500:  # 延长课程学习阶段
            # 计算激活用户的加权中心（未服务用户权重更高）
            center_x, center_y = 0, 0
            total_weight = 0
            for user in self.active_users:
                weight = 2.0 if not user.visited else 0.5
                center_x += user.x * weight
                center_y += user.y * weight
                total_weight += weight

            if total_weight > 0:
                center_x /= total_weight
                center_y /= total_weight
            else:
                center_x = random.uniform(0, self.width)
                center_y = random.uniform(0, self.height)

            # 在中心附近随机位置初始化无人机
            angle = random.uniform(0, 2 * math.pi)
            distance = random.uniform(15, 35)  # 15-35米范围内
            self.drone_x = max(0, min(self.width, center_x + distance * math.cos(angle)))
            self.drone_y = max(0, min(self.height, center_y + distance * math.sin(angle)))
        else:
            # 后期随机初始化
            self.drone_x = random.uniform(0, self.width)
            self.drone_y = random.uniform(0, self.height)

        self.steps = 0

        # 清空轨迹
        self.trajectory_x = [self.drone_x]
        self.trajectory_y = [self.drone_y]

        # 重置进度跟踪
        if hasattr(self, 'prev_visited_count'):
            del self.prev_visited_count

        return self.get_state()

    def switch_task(self):
        # 切换激活用户，模拟任务变化
        self.active_users = random.sample(self.all_users, k=8)
        for user in self.all_users:
            user.active = user in self.active_users
            user.visited = False
            user.last_visit_step = -1000

    def get_state(self):
        # 构建更丰富的状态向量
        state = []

        # 1. 无人机坐标（归一化）
        state.extend([self.drone_x / self.width, self.drone_y / self.height])

        # 2. 每个用户的相关信息
        unvisited_count = 0
        unvisited_vec = [0, 0]  # 未服务用户方向向量
        nearest_unvisited_dist = float('inf')

        for user in self.all_users:
            # 用户激活状态
            active = 1.0 if user.active else 0.0
            state.append(active)

            # 用户是否已被访问
            visited = 1.0 if user.visited else 0.0
            state.append(visited)

            # 用户相对于无人机的相对位置（归一化）
            dx = (user.x - self.drone_x) / self.width
            dy = (user.y - self.drone_y) / self.height
            state.extend([dx, dy])

            # 到用户的距离（使用对数缩放）
            distance = math.sqrt((user.x - self.drone_x) ** 2 + (user.y - self.drone_y) ** 2)
            scaled_dist = math.log(1 + distance) / math.log(1 + 100)  # 100为环境宽度
            state.append(scaled_dist)

            # 用户是否在服务范围内
            in_range = 1.0 if distance < self.service_radius else 0.0
            state.append(in_range)

            # 计算未服务用户方向向量
            if user.active and not user.visited:
                unvisited_count += 1
                unvisited_vec[0] += dx
                unvisited_vec[1] += dy

                # 更新最近未服务用户距离
                if distance < nearest_unvisited_dist:
                    nearest_unvisited_dist = distance

        # 3. 添加未服务用户方向向量（归一化）
        if unvisited_count > 0:
            unvisited_vec[0] /= unvisited_count
            unvisited_vec[1] /= unvisited_count
        state.extend(unvisited_vec)

        # 4. 最近未服务用户距离（归一化）
        state.append(min(nearest_unvisited_dist, 100) / 100)

        # 5. 无人机已移动步数（归一化）
        state.append(self.steps / self.max_steps)

        # 6. 已服务用户数量（归一化）
        visited_count = sum(1 for user in self.all_users if user.active and user.visited)
        state.append(visited_count / 8)  # 8个激活用户

        return np.array(state)

    def step(self, action_idx):
        action = self.actions[action_idx]
        prev_x, prev_y = self.drone_x, self.drone_y

        # 执行动作（8个方向 + 悬停）
        if action == "N":
            self.drone_y = min(self.height, self.drone_y + self.speed)
        elif action == "S":
            self.drone_y = max(0, self.drone_y - self.speed)
        elif action == "E":
            self.drone_x = min(self.width, self.drone_x + self.speed)
        elif action == "W":
            self.drone_x = max(0, self.drone_x - self.speed)
        elif action == "NE":
            self.drone_x = min(self.width, self.drone_x + self.speed * 0.7071)
            self.drone_y = min(self.height, self.drone_y + self.speed * 0.7071)
        elif action == "SE":
            self.drone_x = min(self.width, self.drone_x + self.speed * 0.7071)
            self.drone_y = max(0, self.drone_y - self.speed * 0.7071)
        elif action == "SW":
            self.drone_x = max(0, self.drone_x - self.speed * 0.7071)
            self.drone_y = max(0, self.drone_y - self.speed * 0.7071)
        elif action == "NW":
            self.drone_x = max(0, self.drone_x - self.speed * 0.7071)
            self.drone_y = min(self.height, self.drone_y + self.speed * 0.7071)
        # hover不需要动作

        # 计算移动成本
        movement_cost = math.sqrt((self.drone_x - prev_x) ** 2 + (self.drone_y - prev_y) ** 2)

        # 记录轨迹
        self.trajectory_x.append(self.drone_x)
        self.trajectory_y.append(self.drone_y)

        # 优化后的奖励函数 - 关键修改点
        reward = 0
        serviced_users = 0
        newly_serviced = 0
        min_distance = float('inf')
        max_proximity = 0  # 跟踪最大接近度

        # 1. 移动惩罚（降低惩罚强度）
        reward -= 0.003 * movement_cost  # 进一步降低移动惩罚

        # 2. 计算服务用户的奖励
        for user in self.all_users:
            if user.active:
                dx = user.x - self.drone_x
                dy = user.y - self.drone_y
                distance = math.sqrt(dx ** 2 + dy ** 2)

                # 方向奖励（鼓励朝向未服务用户移动）
                if not user.visited:
                    # 计算方向一致性
                    movement_dx = self.drone_x - prev_x
                    movement_dy = self.drone_y - prev_y

                    if movement_dx != 0 or movement_dy != 0:
                        movement_mag = math.sqrt(movement_dx ** 2 + movement_dy ** 2)
                        dot_product = (dx * movement_dx + dy * movement_dy) / (distance * movement_mag + 1e-8)
                        direction_bonus = max(0, dot_product) * 1.0  # 增加方向奖励
                        reward += direction_bonus

                # 更新最近未服务用户距离
                if not user.visited and distance < min_distance:
                    min_distance = distance
                    max_proximity = max(max_proximity, 30 / (distance + 1))  # 30米内有效

                if distance < self.service_radius:
                    serviced_users += 1
                    if not user.visited:
                        reward += 30  # 增加首次服务奖励
                        newly_serviced += 1
                        user.visited = True
                        user.last_visit_step = self.steps
                    else:
                        # 定期服务奖励（鼓励定期访问）
                        steps_since_last = self.steps - user.last_visit_step
                        if steps_since_last > 20:  # 至少20步未访问
                            reward += 5
                            user.last_visit_step = self.steps

        # 3. 接近未服务用户的奖励（指数奖励）
        if min_distance < float('inf'):
            # 使用指数衰减奖励函数
            proximity_reward = 25 * math.exp(-min_distance / 20)  # 20米为衰减距离
            reward += proximity_reward

            # 额外最大接近度奖励
            reward += max_proximity * 1.5  # 增加接近度奖励

        # 4. 进度奖励
        active_count = sum(1 for user in self.all_users if user.active)
        visited_count = sum(1 for user in self.all_users if user.active and user.visited)

        # 基于进度的奖励（非线性增长）
        progress = visited_count - (self.prev_visited_count if hasattr(self, 'prev_visited_count') else 0)
        if progress > 0:
            # 每服务一个新用户，额外奖励随进度增加
            progress_reward = 15 + 8 * (visited_count / max(active_count, 1))  # 增加进度奖励
            reward += progress_reward * progress

        # 初始化或更新已服务计数
        if hasattr(self, 'prev_visited_count'):
            self.prev_visited_count = visited_count
        else:
            self.prev_visited_count = visited_count

        # 5. 完成奖励
        if visited_count == active_count and active_count > 0:
            # 完成奖励 + 时间节省奖励
            time_bonus = 400 * (1 - self.steps / self.max_steps)  # 增加完成奖励
            reward += 400 + time_bonus

        # 6. 探索奖励（鼓励发现未服务区域）
        if visited_count < active_count:
            # 基于未服务用户比例的探索奖励
            exploration_bonus = 10 * (1 - visited_count / active_count)  # 增加探索奖励
            reward += exploration_bonus

        # 7. 时间惩罚（鼓励快速完成任务）
        reward -= 0.1 * self.steps

        # 更新步数
        self.steps += 1
        done = self.steps >= self.max_steps

        # 如果所有激活用户都已被服务，提前结束
        if visited_count == active_count and active_count > 0:
            done = True
            reward += 150  # 增加额外完成奖励

        info = {
            "serviced_users": serviced_users,
            "newly_serviced": newly_serviced,
            "total_visited": visited_count,
            "total_active": active_count
        }

        return self.get_state(), reward, done, info

    def render(self, episode, step=None):
        plt.figure(figsize=(10, 10))
        plt.xlim(0, self.width)
        plt.ylim(0, self.height)

        # 绘制无人机轨迹
        plt.plot(self.trajectory_x, self.trajectory_y, 'ro-', alpha=0.6, label='Drone Path')

        # 绘制无人机当前位置
        plt.scatter(self.drone_x, self.drone_y, c='red', s=100, marker='*', label='Drone')

        # 绘制服务半径
        circle = plt.Circle((self.drone_x, self.drone_y), self.service_radius,
                            color='blue', fill=False, alpha=0.3, linestyle='--')
        plt.gca().add_patch(circle)

        # 绘制用户
        for user in self.all_users:
            if user.active and user.visited:
                color = 'limegreen'  # 已访问的激活用户
                marker = 'o'
            elif user.active and not user.visited:
                color = 'green'  # 未访问的激活用户
                marker = 'o'
            else:
                color = 'gray'  # 未激活用户
                marker = 'x'

            plt.scatter(user.x, user.y, c=color, s=80, marker=marker)

            # 如果用户在服务范围内，绘制服务圆圈
            distance = math.sqrt((self.drone_x - user.x) ** 2 + (self.drone_y - user.y) ** 2)
            if distance < self.service_radius and user.active:
                circle = plt.Circle((user.x, user.y), 2, color='green', alpha=0.5)
                plt.gca().add_patch(circle)

        # 添加图例
        visited_handle = plt.scatter([], [], c='limegreen', marker='o', label='Visited User')
        active_handle = plt.scatter([], [], c='green', marker='o', label='Active User')
        inactive_handle = plt.scatter([], [], c='gray', marker='x', label='Inactive User')
        plt.legend(handles=[visited_handle, active_handle, inactive_handle])

        # 添加文本信息
        visited_count = sum(1 for user in self.all_users if user.active and user.visited)
        active_count = sum(1 for user in self.all_users if user.active)
        plt.title(f'Episode {episode}: Visited {visited_count}/{active_count} users (Step {self.steps})')
        plt.xlabel('X Position (m)')
        plt.ylabel('Y Position (m)')
        plt.grid(True)

        # 使用唯一文件名保存图像
        filename = f'trajectory_images/episode_{episode}_step_{self.steps}.png'
        plt.savefig(filename)
        plt.close()


class ContinualRLAgent:
    def __init__(self, state_dim, action_dim, config):
        self.state_dim = state_dim
        self.action_dim = action_dim
        self.gamma = config["gamma"]
        self.lr = config["learning_rate"]
        self.epsilon = 1.0
        self.epsilon_decay = config["epsilon_decay"]
        self.epsilon_min = config["epsilon_min"]
        self.batch_size = config["batch_size"]
        self.ewc_lambda_base = config["ewc_lambda_base"]
        self.target_update_freq = config["target_update_freq"]
        self.alpha = config["alpha"]  # 用于Double DQN
        self.grad_clip = config["grad_clip"]
        self.task_correlation = {}  # 存储任务相关性
        self.uncertainty_weight = 0.5  # 不确定性探索权重
        self.uncertainty_decay = 0.995  # 不确定性衰减

        # 神经网络 - 使用增强的Dueling DQN架构
        self.q_network = DuelingQNetwork(state_dim, action_dim).to(device)
        self.target_network = DuelingQNetwork(state_dim, action_dim).to(device)
        self.target_network.load_state_dict(self.q_network.state_dict())

        # 使用AdamW优化器（带权重衰减）
        self.optimizer = optim.AdamW(
            self.q_network.parameters(),
            lr=self.lr,
            weight_decay=1e-4
        )

        # 学习率调度器 - 使用余弦退火
        self.scheduler = optim.lr_scheduler.CosineAnnealingLR(
            self.optimizer,
            T_max=5000,  # 半周期长度
            eta_min=self.lr / 100
        )

        # 混合经验回放
        self.memory = HybridReplayBuffer(config["memory_capacity"], uniform_ratio=0.3)

        # 持续学习相关变量
        self.previous_tasks_fisher = {}  # 存储每个任务的Fisher信息矩阵
        self.previous_tasks_params = {}  # 存储每个任务的最优参数
        self.task_memory = {}  # 存储每个任务的记忆摘要
        self.task_importance = {}  # 任务重要性权重
        self.current_task_id = 0
        self.task_seen = 0

        # 训练统计
        self.training_steps = 0

    def select_action(self, state, eval_mode=False):
        # 基于覆盖率和不确定性的自适应探索
        coverage_ratio = state[-1]  # 已服务比例

        if not eval_mode:
            # 计算不确定性探索率
            uncertainty_exploration = self.uncertainty_weight * (1 - coverage_ratio)

            # 任务初期增加探索
            task_progress = min(1.0, self.current_task_episodes / 300) if hasattr(self,
                                                                                  'current_task_episodes') else 0.0
            exploration_boost = 1.0 + (1 - task_progress) * 1.5  # 任务初期探索率提高50%

            # 组合探索率
            exploration_rate = min(
                self.epsilon * exploration_boost * (1 + 2 * (1 - coverage_ratio)) + uncertainty_exploration,
                0.95  # 探索率上限提高到95%
            )
            if random.random() < exploration_rate:
                return random.randrange(self.action_dim)

        # 使用网络选择动作
        state_tensor = torch.tensor(state, dtype=torch.float).unsqueeze(0).to(device)
        with torch.no_grad():
            q_values = self.q_network(state_tensor)
        return q_values.argmax().item()

    def update_model(self):
        if len(self.memory) < self.batch_size:
            return 0.0

        # 从混合经验回放中采样
        states, actions, rewards, next_states, dones, indices, is_weights = self.memory.sample(self.batch_size)

        # 计算当前Q值
        current_q = self.q_network(states).gather(1, actions.unsqueeze(1)).squeeze(1)

        # Double DQN: 使用主网络选择动作，目标网络评估价值
        with torch.no_grad():
            # 使用主网络选择最佳动作
            next_actions = self.q_network(next_states).argmax(1).unsqueeze(1)
            # 使用目标网络获取这些动作的Q值
            next_q = self.target_network(next_states).gather(1, next_actions).squeeze(1)
            # 计算目标Q值
            target_q = rewards + (1 - dones) * self.gamma * next_q

        # 计算TD误差用于更新优先级
        td_errors = torch.abs(current_q - target_q).detach().cpu().numpy()
        self.memory.update_priorities(indices, td_errors)

        # 计算TD误差损失（使用重要性采样权重）
        td_loss = (is_weights * (current_q - target_q.detach()).pow(2)).mean()

        # 加入EWC正则化项防止灾难性遗忘（带任务相关性和衰减）
        ewc_loss = 0
        total_weight = 0
        if self.task_seen > 0:
            for task_id in range(self.task_seen):
                # 获取任务相关性和重要性
                correlation = self.task_correlation.get((self.current_task_id, task_id), 0.5)
                importance = self.task_importance.get(task_id, 0.5)

                # 组合权重
                weight = correlation * importance
                total_weight += weight

                for name, param in self.q_network.named_parameters():
                    if name in self.previous_tasks_fisher[task_id]:
                        fisher = self.previous_tasks_fisher[task_id][name].to(device)
                        old_param = self.previous_tasks_params[task_id][name].to(device)
                        ewc_loss += weight * (fisher * (param - old_param).pow(2)).sum()

        # 动态正则化强度（随任务数量增加而增强）
        ewc_lambda = self.ewc_lambda_base * (1 - 0.5 ** self.task_seen)

        # 总损失 = TD损失 + EWC正则化
        loss = td_loss
        if total_weight > 0:
            loss += (ewc_lambda / total_weight) * ewc_loss

        # 更新参数
        self.optimizer.zero_grad()
        loss.backward()

        # 梯度裁剪，防止梯度爆炸
        torch.nn.utils.clip_grad_norm_(self.q_network.parameters(), self.grad_clip)

        self.optimizer.step()
        self.scheduler.step()  # 更新学习率

        # 增加训练步数计数
        self.training_steps += 1

        # 定期更新目标网络
        if self.training_steps % self.target_update_freq == 0:
            self.update_target_network()

        # 衰减不确定性权重
        self.uncertainty_weight *= self.uncertainty_decay

        return loss.item()

    def update_target_network(self):
        # Polyak平均法更新目标网络 (软更新)
        for target_param, param in zip(self.target_network.parameters(), self.q_network.parameters()):
            target_param.data.copy_(self.alpha * param.data + (1 - self.alpha) * target_param.data)

    def calculate_fisher_information(self, env, num_samples=200):
        """计算Fisher信息矩阵，用于EWC正则化"""
        fisher_dict = {}
        param_dict = {}

        # 初始化Fisher字典
        for name, param in self.q_network.named_parameters():
            fisher_dict[name] = torch.zeros_like(param).to(device)
            param_dict[name] = param.data.clone().to(device)

        # 收集样本并计算Fisher信息
        self.q_network.eval()
        for _ in range(num_samples):
            state = env.reset()
            done = False

            while not done:
                action = self.select_action(state, eval_mode=True)
                next_state, reward, done, _ = env.step(action)

                # 计算损失
                state_tensor = torch.tensor(state, dtype=torch.float).unsqueeze(0).to(device)
                q_values = self.q_network(state_tensor)

                # 获取对应动作的Q值
                log_prob = torch.log_softmax(q_values, dim=1)[0, action]

                # 计算梯度
                self.optimizer.zero_grad()
                (-log_prob).backward()

                # 累积Fisher信息
                for name, param in self.q_network.named_parameters():
                    if param.grad is not None:
                        fisher_dict[name] += param.grad.data.pow(2)

                state = next_state
                if done:
                    break

        # 归一化Fisher信息
        for name in fisher_dict:
            fisher_dict[name] /= num_samples

        self.q_network.train()
        return fisher_dict, param_dict

    def finish_task(self, env):
        """当前任务完成时的处理"""
        # 计算并存储当前任务的Fisher信息和参数
        fisher, params = self.calculate_fisher_information(env, num_samples=200)
        self.previous_tasks_fisher[self.current_task_id] = fisher
        self.previous_tasks_params[self.current_task_id] = params

        # 计算任务相关性（基于激活用户的重叠）
        overlap_ratios = []
        for prev_id in range(self.current_task_id):
            prev_users = set(self.task_memory[prev_id]["active_users"])
            current_users = set([(user.x, user.y) for user in env.active_users])
            overlap = len(prev_users & current_users) / max(len(prev_users), 1)
            overlap_ratios.append(overlap)

        # 初始化任务相关性
        for i in range(self.task_seen):
            # 重叠率越高，相关性越大
            correlation = overlap_ratios[i] if i < len(overlap_ratios) else 0.3
            self.task_correlation[(self.current_task_id, i)] = max(0.1, min(0.9, correlation))
            self.task_correlation[(i, self.current_task_id)] = self.task_correlation[(self.current_task_id, i)]

        # 存储任务记忆摘要
        self.task_memory[self.current_task_id] = {
            "active_users": [(user.x, user.y) for user in env.active_users],
            "avg_reward": np.mean(self.task_rewards) if hasattr(self, 'task_rewards') else 0,
            "completion_rate": self.task_completion_rate
        }

        # 更新任务重要性
        self.update_task_importance()

        print(f"任务 {self.current_task_id} 完成，相关任务数: {len(overlap_ratios)}")

        self.task_seen += 1
        self.current_task_id += 1
        self.ewc_lambda_base = max(1000, self.ewc_lambda_base * 0.9)  # 衰减EWC强度但保持最小值

        # 保存模型
        torch.save(self.q_network.state_dict(), f"../models/task_{self.current_task_id}.pth")

    def update_task_importance(self):
        """基于任务表现更新任务重要性"""
        total_performance = 0
        for task_id, memory in self.task_memory.items():
            # 重要性 = 平均奖励 * 完成率
            importance = memory["avg_reward"] * memory["completion_rate"]
            self.task_importance[task_id] = importance
            total_performance += importance

        # 归一化重要性
        if total_performance > 0:
            for task_id in self.task_importance:
                self.task_importance[task_id] /= total_performance


def train():
    # 增强的训练配置 - 关键修改点
    config = {
        "learning_rate": 0.0003,  # 提高学习率
        "gamma": 0.998,  # 提高折扣因子
        "epsilon_decay": 0.998,  # 减缓探索衰减
        "epsilon_min": 0.02,  # 降低最小探索率
        "batch_size": 512,
        "memory_capacity": 500000,  # 增加记忆容量
        "service_radius": 12,
        "ewc_lambda_base": 6000,  # 调整EWC强度
        "target_update_freq": 100,
        "alpha": 0.01,
        "grad_clip": 0.7  # 放松梯度裁剪
    }

    # 创建TensorBoard记录器
    writer = SummaryWriter(log_dir='../logs/drone_rl_optimized')

    # 创建环境
    env = DroneEnvironment(config)

    # 状态和动作空间
    state_dim = 2 + 10 * 6 + 3 + 2  # 增加了最近未服务用户距离
    action_dim = 9

    # 创建智能体
    agent = ContinualRLAgent(state_dim, action_dim, config)

    # 训练参数 - 根据要求修改
    num_episodes = 5000  # 增加训练轮次
    task_switch_interval = 2000  # 延长任务切换间隔
    render_interval = 200  # 渲染间隔

    # 收敛检测参数 - 关键修改点
    convergence_window = 200  # 增加检测窗口
    convergence_std_threshold = 0.18  # 放宽标准差阈值
    required_convergence_count = 2  # 减少所需连续收敛次数

    last_save = 0
    best_avg_reward = -float('inf')

    # 训练循环
    all_rewards = []
    task_rewards = []
    coverage_rates = []
    task_boundaries = [0]
    start_time = time.time()

    # 收敛检测变量
    convergence_count = 0
    last_avg_reward = -float('inf')

    # 预热阶段：先收集一些经验
    print("预热经验收集...")
    state = env.reset()
    warmup_steps = 0
    while len(agent.memory) < 20000:  # 增加预热样本量
        action = agent.select_action(state)
        next_state, reward, done, _ = env.step(action)
        agent.memory.push(state, action, reward, next_state, done)
        state = next_state
        warmup_steps += 1
        if done:
            state = env.reset()
    print(f"预热完成，已收集 {len(agent.memory)} 条经验, 步数: {warmup_steps}")

    # 主训练循环
    for episode in range(num_episodes):
        # 记录当前任务的轮次（用于探索率调整）
        if hasattr(agent, 'current_task_episodes'):
            agent.current_task_episodes += 1
        else:
            agent.current_task_episodes = 1

        # 任务切换逻辑（增加收敛检查） - 关键修改点
        if episode % task_switch_interval == 0 and episode > 0:
            # 检查是否收敛
            if len(task_rewards) > convergence_window:
                recent_rewards = task_rewards[-convergence_window:]
                avg_reward = np.mean(recent_rewards)
                std_reward = np.std(recent_rewards)
                relative_std = std_reward / (avg_reward + 1e-8)  # 避免除零

                # 收敛标准：相对标准差小于阈值
                if relative_std < convergence_std_threshold:
                    convergence_count += 1
                    print(
                        f"任务收敛 ({convergence_count}/{required_convergence_count}), 平均奖励: {avg_reward:.2f} ± {std_reward:.2f} (相对标准差: {relative_std:.2f})")
                else:
                    convergence_count = 0
                    print(f"未收敛: 相对标准差 {relative_std:.2f} > 阈值 {convergence_std_threshold}")

                # 满足收敛条件才切换任务
                if convergence_count >= required_convergence_count:
                    print(f"\n===== 切换到新任务 (Episode {episode}) =====")
                    agent.finish_task(env)
                    env.switch_task()

                    # 减缓服务半径缩减
                    env.service_radius = max(10, env.service_radius - 0.3)  # 每次减少0.3米
                    print(f"新服务半径: {env.service_radius}m")

                    task_rewards = []
                    task_boundaries.append(episode)
                    convergence_count = 0

                    # 重置任务轮次计数
                    agent.current_task_episodes = 0

                    # 重置探索率（适度提高探索率）
                    agent.epsilon = min(0.6, agent.epsilon_min + 0.4)
                    agent.uncertainty_weight = 0.5  # 重置不确定性权重
                else:
                    print(f"延迟任务切换，未达到收敛标准 (收敛计数: {convergence_count}/{required_convergence_count})")
            else:
                print("延迟任务切换，样本不足")

        state = env.reset()
        total_reward = 0
        done = False
        step_count = 0
        update_counter = 0

        while not done:
            # 选择动作
            action = agent.select_action(state)

            # 执行动作
            next_state, reward, done, info = env.step(action)

            # 存储经验
            agent.memory.push(state, action, reward, next_state, done)

            # 更新模型（每2步更新一次）
            update_counter += 1
            if update_counter >= 2:
                loss = agent.update_model()
                if loss > 0:
                    writer.add_scalar('Training/Loss', loss, agent.training_steps)
                update_counter = 0

            # 更新状态和奖励
            state = next_state
            total_reward += reward
            step_count += 1

        # 衰减探索率
        agent.epsilon = max(agent.epsilon_min, agent.epsilon * agent.epsilon_decay)
        writer.add_scalar('Training/Epsilon', agent.epsilon, episode)
        writer.add_scalar('Training/Uncertainty Weight', agent.uncertainty_weight, episode)

        # 记录奖励
        all_rewards.append(total_reward)
        task_rewards.append(total_reward)
        writer.add_scalar('Training/Reward', total_reward, episode)

        # 计算平均奖励
        avg_reward = np.mean(all_rewards[-100:]) if len(all_rewards) >= 100 else np.mean(all_rewards)
        writer.add_scalar('Training/Avg Reward (100ep)', avg_reward, episode)

        # 记录覆盖率
        coverage = info["total_visited"] / max(info["total_active"], 1)
        coverage_rates.append(coverage)
        writer.add_scalar('Metrics/Coverage Rate', coverage, episode)

        # 存储任务完成率用于任务重要性计算
        agent.task_completion_rate = coverage

        # 输出进度
        if (episode + 1) % 10 == 0:
            visited = info["total_visited"]
            active = info["total_active"]
            print(f"Episode {episode + 1}/{num_episodes}, Reward: {total_reward:.2f}, "
                  f"Avg Reward: {avg_reward:.2f}, Epsilon: {agent.epsilon:.4f}, "
                  f"Coverage: {coverage * 100:.1f}% ({visited}/{active}), Steps: {step_count}")

        # 每100轮检查收敛情况并保存最佳模型
        if (episode + 1) % 100 == 0:
            if len(all_rewards) > 100:
                recent_rewards = all_rewards[-100:]
                avg_reward = np.mean(recent_rewards)

                # 保存最佳模型
                if avg_reward > best_avg_reward:
                    best_avg_reward = avg_reward
                    torch.save(agent.q_network.state_dict(), "../models/best_model.pth")
                    print(f"最佳模型已保存，平均奖励: {best_avg_reward:.2f}")

            # 定期保存模型
            if episode - last_save >= 500 or episode == num_episodes - 1:
                torch.save(agent.q_network.state_dict(), f"../models/model_ep{episode}.pth")
                last_save = episode
                print(f"模型已保存: models/model_ep{episode}.pth")

        # 渲染环境
        if (episode + 1) % render_interval == 0 or episode == 0:
            env.render(episode + 1)

    # 计算训练时间
    training_time = time.time() - start_time
    print(f"训练完成! 总耗时: {training_time / 60:.2f} 分钟")

    # 绘制奖励曲线
    plt.figure(figsize=(12, 6))
    plt.plot(all_rewards, alpha=0.3, label='Episode Reward')

    # 计算和绘制滑动平均奖励
    window_size = 100
    smoothed_rewards = []
    for i in range(len(all_rewards)):
        start_idx = max(0, i - window_size + 1)
        smoothed_rewards.append(sum(all_rewards[start_idx:(i + 1)]) / (i - start_idx + 1))

    plt.plot(smoothed_rewards, linewidth=2, label=f'Smoothed Reward (window={window_size})')

    # 标记任务切换点
    for boundary in task_boundaries[1:]:
        plt.axvline(x=boundary, color='r', linestyle='--', alpha=0.7)

    plt.xlabel('Episode')
    plt.ylabel('Reward')
    plt.title('优化后的训练进度 (任务切换)')
    plt.legend()
    plt.grid(True)
    plt.savefig('optimized_training_rewards.png')
    plt.close()

    # 绘制覆盖率曲线
    plt.figure(figsize=(12, 6))
    plt.plot(coverage_rates, alpha=0.7)
    plt.xlabel('Episode')
    plt.ylabel('Coverage Rate')
    plt.title('用户服务覆盖率变化')
    plt.grid(True)
    plt.savefig('coverage_rates.png')
    plt.close()

    # 关闭TensorBoard写入器
    writer.close()

    # 最终测试
    print("\n===== 最终评估 =====")
    test_episodes = 10  # 增加测试回合数

    # 加载最佳模型
    agent.q_network.load_state_dict(torch.load("../models/best_model.pth"))
    agent.q_network.eval()

    # 测试每个任务
    for task_id in range(len(task_boundaries)):
        print(f"\n测试任务 {task_id}:")

        # 设置相应的任务环境
        if task_id > 0:
            env.switch_task()

        task_coverage = []
        task_rewards = []
        for test_ep in range(test_episodes):
            state = env.reset()
            total_reward = 0
            done = False
            step_count = 0

            while not done:
                action = agent.select_action(state, eval_mode=True)
                next_state, reward, done, info = env.step(action)
                state = next_state
                total_reward += reward
                step_count += 1

            visited = info["total_visited"]
            active = info["total_active"]
            coverage = visited / active if active > 0 else 0
            task_coverage.append(coverage)
            task_rewards.append(total_reward)
            print(f"  测试回合 {test_ep + 1}, 奖励: {total_reward:.2f}, "
                  f"服务覆盖率: {coverage * 100:.1f}% ({visited}/{active}), 步数: {step_count}")

            # 保存测试轨迹
            if test_ep == 0:
                env.render(f"final_task{task_id}_ep{test_ep + 1}")

        # 输出任务平均覆盖率
        avg_coverage = np.mean(task_coverage) * 100
        avg_reward = np.mean(task_rewards)
        print(f"任务 {task_id} 平均覆盖率: {avg_coverage:.1f}%, 平均奖励: {avg_reward:.2f}")


if __name__ == "__main__":
    train()