import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import random
import matplotlib.pyplot as plt
from collections import deque, namedtuple
import copy
import math
import os
import time
from torch.utils.tensorboard import SummaryWriter

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

# 检查GPU可用性
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")

# 创建保存目录
os.makedirs("../trajectory_images", exist_ok=True)
os.makedirs("../models", exist_ok=True)
os.makedirs("../logs", exist_ok=True)

# 设置随机种子以保证可复现性
seed = 42
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(seed)
    torch.backends.cudnn.deterministic = True

# 经验回放的数据结构
Experience = namedtuple('Experience', ['state', 'action', 'reward', 'next_state', 'done'])


# 使用SumTree实现优先经验回放
class SumTree:
    def __init__(self, capacity):
        self.capacity = capacity
        self.tree = np.zeros(2 * capacity - 1)
        self.data = np.zeros(capacity, dtype=object)
        self.write = 0
        self.n_entries = 0

    def _propagate(self, idx, change):
        parent = (idx - 1) // 2
        self.tree[parent] += change
        if parent != 0:
            self._propagate(parent, change)

    def _retrieve(self, idx, s):
        left = 2 * idx + 1
        right = left + 1

        if left >= len(self.tree):
            return idx

        if s <= self.tree[left]:
            return self._retrieve(left, s)
        else:
            return self._retrieve(right, s - self.tree[left])

    def total(self):
        return self.tree[0]

    def add(self, p, data):
        idx = self.write + self.capacity - 1
        self.data[self.write] = data
        self.update(idx, p)

        self.write += 1
        if self.write >= self.capacity:
            self.write = 0

        if self.n_entries < self.capacity:
            self.n_entries += 1

    def update(self, idx, p):
        change = p - self.tree[idx]
        self.tree[idx] = p
        self._propagate(idx, change)

    def get(self, s):
        idx = self._retrieve(0, s)
        dataIdx = idx - self.capacity + 1
        return (idx, self.tree[idx], self.data[dataIdx])


class PrioritizedReplayBuffer:
    def __init__(self, capacity, alpha=0.6, beta=0.4, beta_increment=0.001, eps=1e-6):
        self.tree = SumTree(capacity)
        self.capacity = capacity
        self.alpha = alpha  # 决定优先级重要性的程度
        self.beta = beta  # 重要性采样的程度
        self.beta_increment = beta_increment  # beta的增量
        self.eps = eps  # 避免优先级为0
        self.max_priority = 1.0

    def push(self, state, action, reward, next_state, done):
        experience = Experience(state, action, reward, next_state, done)
        priority = self.max_priority ** self.alpha
        self.tree.add(priority, experience)

    def sample(self, batch_size):
        batch_indices = []
        batch = []
        weights = np.zeros(batch_size, dtype=np.float32)

        # 增加beta值
        self.beta = min(1.0, self.beta + self.beta_increment)

        # 计算每个段的大小
        segment = self.tree.total() / batch_size

        for i in range(batch_size):
            # 在每个段中随机采样
            a = segment * i
            b = segment * (i + 1)
            s = random.uniform(a, b)

            idx, priority, experience = self.tree.get(s)
            batch_indices.append(idx)
            batch.append(experience)

            # 计算重要性权重
            sample_prob = priority / self.tree.total()
            weights[i] = (self.tree.n_entries * sample_prob) ** (-self.beta)

        # 归一化权重
        weights = weights / weights.max()

        # 将batch转换为tensor
        states = torch.tensor(np.vstack([e.state for e in batch]), dtype=torch.float32).to(device)
        actions = torch.tensor(np.array([e.action for e in batch]), dtype=torch.float32).to(device)
        rewards = torch.tensor(np.array([e.reward for e in batch]), dtype=torch.float32).to(device)
        next_states = torch.tensor(np.vstack([e.next_state for e in batch]), dtype=torch.float32).to(device)
        dones = torch.tensor(np.array([e.done for e in batch]), dtype=torch.float32).to(device)
        weights = torch.tensor(weights, dtype=torch.float32).to(device)

        return states, actions, rewards, next_states, dones, batch_indices, weights

    def update_priorities(self, indices, priorities):
        for idx, priority in zip(indices, priorities):
            priority = max(priority, self.eps) ** self.alpha
            self.max_priority = max(self.max_priority, priority)
            self.tree.update(idx, priority)

    def __len__(self):
        return self.tree.n_entries


class Actor(nn.Module):
    def __init__(self, state_dim, action_dim, max_action, hidden_dims=(512, 384, 256)):
        super(Actor, self).__init__()
        self.max_action = max_action

        # 输入标准化层
        self.input_norm = nn.LayerNorm(state_dim)

        # 构建隐藏层
        layers = []
        dims = [state_dim] + list(hidden_dims)

        for i in range(len(dims) - 1):
            layers.append(nn.Linear(dims[i], dims[i + 1]))
            layers.append(nn.LayerNorm(dims[i + 1]))
            layers.append(nn.ReLU())

        self.hidden_layers = nn.Sequential(*layers)
        self.output_layer = nn.Linear(hidden_dims[-1], action_dim)

        # 初始化权重
        self.apply(self._init_weights)
        # 输出层单独初始化
        nn.init.uniform_(self.output_layer.weight, -3e-3, 3e-3)
        nn.init.uniform_(self.output_layer.bias, -3e-3, 3e-3)

    def _init_weights(self, m):
        if isinstance(m, nn.Linear):
            nn.init.orthogonal_(m.weight, gain=np.sqrt(2))
            if m.bias is not None:
                nn.init.constant_(m.bias, 0.0)

    def forward(self, x):
        x = self.input_norm(x)
        x = self.hidden_layers(x)
        # tanh输出在[-1, 1]范围
        return self.max_action * torch.tanh(self.output_layer(x))


class Critic(nn.Module):
    def __init__(self, state_dim, action_dim, hidden_dims=(512, 384, 256)):
        super(Critic, self).__init__()

        # 输入标准化层
        self.input_norm = nn.LayerNorm(state_dim + action_dim)

        # Q1 网络
        q1_layers = []
        q1_dims = [state_dim + action_dim] + list(hidden_dims)

        for i in range(len(q1_dims) - 1):
            q1_layers.append(nn.Linear(q1_dims[i], q1_dims[i + 1]))
            q1_layers.append(nn.LayerNorm(q1_dims[i + 1]))
            q1_layers.append(nn.ReLU())

        self.q1_layers = nn.Sequential(*q1_layers)
        self.q1_output = nn.Linear(hidden_dims[-1], 1)

        # Q2 网络
        q2_layers = []
        q2_dims = [state_dim + action_dim] + list(hidden_dims)

        for i in range(len(q2_dims) - 1):
            q2_layers.append(nn.Linear(q2_dims[i], q2_dims[i + 1]))
            q2_layers.append(nn.LayerNorm(q2_dims[i + 1]))
            q2_layers.append(nn.ReLU())

        self.q2_layers = nn.Sequential(*q2_layers)
        self.q2_output = nn.Linear(hidden_dims[-1], 1)

        # 初始化权重
        self.apply(self._init_weights)
        # 输出层单独初始化
        nn.init.uniform_(self.q1_output.weight, -3e-3, 3e-3)
        nn.init.uniform_(self.q1_output.bias, -3e-3, 3e-3)
        nn.init.uniform_(self.q2_output.weight, -3e-3, 3e-3)
        nn.init.uniform_(self.q2_output.bias, -3e-3, 3e-3)

    def _init_weights(self, m):
        if isinstance(m, nn.Linear):
            nn.init.orthogonal_(m.weight, gain=np.sqrt(2))
            if m.bias is not None:
                nn.init.constant_(m.bias, 0.0)

    def forward(self, state, action):
        x = torch.cat([state, action], dim=1)
        x = self.input_norm(x)

        q1 = self.q1_layers(x)
        q1 = self.q1_output(q1)

        q2 = self.q2_layers(x)
        q2 = self.q2_output(q2)

        return q1, q2

    def q1_forward(self, state, action):
        x = torch.cat([state, action], dim=1)
        x = self.input_norm(x)
        q1 = self.q1_layers(x)
        q1 = self.q1_output(q1)
        return q1


class User:
    def __init__(self, x, y):
        self.x = x
        self.y = y
        self.active = False
        self.visited = False  # 记录用户是否已被服务


class DroneEnvironment:
    def __init__(self, config):
        self.width = 100
        self.height = 100
        self.service_radius = config["service_radius"]
        self.max_steps = 300
        self.steps = 0
        self.episode_num = 0

        # 创建固定位置的用户
        self.all_users = []
        for _ in range(10):
            x = random.uniform(0, self.width)
            y = random.uniform(0, self.height)
            self.all_users.append(User(x, y))

        # 激活用户
        self.active_users = random.sample(self.all_users, k=8)
        for user in self.all_users:
            user.active = user in self.active_users
            user.visited = False

        # 无人机状态
        self.drone_x = random.uniform(0, self.width)
        self.drone_y = random.uniform(0, self.height)
        self.max_speed = 5

        # 轨迹记录
        self.trajectory_x = [self.drone_x]
        self.trajectory_y = [self.drone_y]

        # 记录上一次访问状态
        self.prev_visited_count = 0

        # 当前任务难度
        self.difficulty_level = 0

    def reset(self):
        self.episode_num += 1
        # 重置用户访问状态
        for user in self.all_users:
            user.visited = False

        # 智能初始化：将无人机放在靠近激活用户的位置
        if self.episode_num < 300:  # 前300轮使用课程学习
            # 随机选择一个激活用户，将无人机初始化在其附近
            if self.active_users:
                random_user = random.choice(self.active_users)
                # 在用户周围随机位置初始化无人机
                angle = random.uniform(0, 2 * math.pi)
                distance = random.uniform(10, 30)  # 10-30米范围内
                self.drone_x = max(0, min(self.width, random_user.x + distance * math.cos(angle)))
                self.drone_y = max(0, min(self.height, random_user.y + distance * math.sin(angle)))
            else:
                self.drone_x = random.uniform(0, self.width)
                self.drone_y = random.uniform(0, self.height)
        else:
            # 后期随机初始化
            self.drone_x = random.uniform(0, self.width)
            self.drone_y = random.uniform(0, self.height)

        self.steps = 0
        self.prev_visited_count = 0

        # 清空轨迹
        self.trajectory_x = [self.drone_x]
        self.trajectory_y = [self.drone_y]

        return self.get_state()

    def switch_task(self):
        # 切换激活用户，模拟任务变化
        self.active_users = random.sample(self.all_users, k=8)
        for user in self.all_users:
            user.active = user in self.active_users
            user.visited = False

    def get_state(self):
        # 构建更丰富的状态向量
        state = []

        # 1. 无人机坐标（归一化）
        state.extend([self.drone_x / self.width, self.drone_y / self.height])

        # 2. 每个用户的相关信息
        unvisited_count = 0
        unvisited_vec = [0, 0]  # 未服务用户方向向量

        for user in self.all_users:
            # 用户激活状态
            active = 1.0 if user.active else 0.0
            state.append(active)

            # 用户是否已被访问
            visited = 1.0 if user.visited else 0.0
            state.append(visited)

            # 用户相对于无人机的相对位置（归一化）
            dx = (user.x - self.drone_x) / self.width
            dy = (user.y - self.drone_y) / self.height
            state.extend([dx, dy])

            # 到用户的距离（使用对数缩放）
            distance = math.sqrt((user.x - self.drone_x) ** 2 + (user.y - self.drone_y) ** 2)
            scaled_dist = math.log(1 + distance) / math.log(1 + 100)  # 100为环境宽度
            state.append(scaled_dist)

            # 用户是否在服务范围内
            in_range = 1.0 if distance < self.service_radius else 0.0
            state.append(in_range)

            # 计算未服务用户方向向量
            if user.active and not user.visited:
                unvisited_count += 1
                unvisited_vec[0] += dx
                unvisited_vec[1] += dy

        # 3. 添加未服务用户方向向量（归一化）
        if unvisited_count > 0:
            unvisited_vec[0] /= unvisited_count
            unvisited_vec[1] /= unvisited_count
        state.extend(unvisited_vec)

        # 4. 无人机已移动步数（归一化）
        state.append(self.steps / self.max_steps)

        # 5. 已服务用户数量（归一化）
        visited_count = sum(1 for user in self.all_users if user.active and user.visited)
        active_count = sum(1 for user in self.all_users if user.active)
        state.append(visited_count / max(active_count, 1))  # 防止除零

        return np.array(state, dtype=np.float32)

    def step(self, action):
        # 连续动作空间: action[0]和action[1]分别代表x和y方向的速度分量
        # 范围为[-1, 1]，需要缩放到[-max_speed, max_speed]
        dx = action[0] * self.max_speed
        dy = action[1] * self.max_speed

        prev_x, prev_y = self.drone_x, self.drone_y

        # 更新无人机位置
        self.drone_x = max(0, min(self.width, self.drone_x + dx))
        self.drone_y = max(0, min(self.height, self.drone_y + dy))

        # 计算移动成本
        movement_cost = math.sqrt((self.drone_x - prev_x) ** 2 + (self.drone_y - prev_y) ** 2)

        # 记录轨迹
        self.trajectory_x.append(self.drone_x)
        self.trajectory_y.append(self.drone_y)

        # 计算奖励
        reward = 0
        serviced_users = 0
        newly_serviced = 0  # 新服务的用户数
        min_distance = float('inf')  # 到最近未服务用户的距离

        # 1. 基本移动惩罚（与距离相关）
        reward -= 0.02 * movement_cost

        # 2. 计算服务用户的奖励
        for user in self.all_users:
            if user.active:
                distance = math.sqrt((self.drone_x - user.x) ** 2 + (self.drone_y - user.y) ** 2)

                # 更新最近未服务用户距离
                if not user.visited and distance < min_distance:
                    min_distance = distance

                if distance < self.service_radius:
                    serviced_users += 1

                    # 首次访问用户给额外奖励
                    if not user.visited:
                        reward += 20  # 增加首次服务奖励
                        newly_serviced += 1
                        user.visited = True
                    else:
                        reward += 1  # 已访问过的用户奖励降低

        # 3. 接近未服务用户的奖励（指数衰减）
        if min_distance < float('inf'):
            # 使用更强的接近奖励
            proximity_reward = 10 * math.exp(-min_distance / 20)
            reward += proximity_reward

        # 4. 全部服务完成奖励
        active_count = sum(1 for user in self.all_users if user.active)
        visited_count = sum(1 for user in self.all_users if user.active and user.visited)

        # 进度奖励（鼓励连续服务）
        progress = visited_count - self.prev_visited_count
        if progress > 0:
            reward += 10 * progress  # 增加进度奖励

        # 保存当前已服务数量用于下次计算
        self.prev_visited_count = visited_count

        if visited_count == active_count and active_count > 0:
            # 完成奖励 + 时间节省奖励
            reward += 300 + 150 * (1 - self.steps / self.max_steps)  # 增加完成奖励

        # 5. 未服务用户惩罚（鼓励探索）
        coverage_ratio = visited_count / max(active_count, 1)
        unvisited_penalty = -0.8 * (1 - coverage_ratio)
        reward += unvisited_penalty

        # 6. 添加基于覆盖率的额外奖励
        if coverage_ratio > 0.5:
            reward += 5  # 服务超过一半用户的额外奖励
        if coverage_ratio > 0.8:
            reward += 10  # 服务超过80%用户的额外奖励

        # 更新步数
        self.steps += 1
        done = self.steps >= self.max_steps

        # 如果所有激活用户都已被服务，可以提前结束
        if visited_count == active_count and active_count > 0:
            done = True

        info = {
            "serviced_users": serviced_users,
            "newly_serviced": newly_serviced,
            "total_visited": visited_count,
            "total_active": active_count,
            "coverage_ratio": coverage_ratio
        }

        return self.get_state(), reward, done, info

    def render(self, episode, step=None):
        plt.figure(figsize=(10, 10))
        plt.xlim(0, self.width)
        plt.ylim(0, self.height)

        # 绘制无人机轨迹
        plt.plot(self.trajectory_x, self.trajectory_y, 'ro-', alpha=0.6, label='Drone Path')

        # 绘制无人机当前位置
        plt.scatter(self.drone_x, self.drone_y, c='red', s=100, marker='*', label='Drone')

        # 绘制服务半径
        circle = plt.Circle((self.drone_x, self.drone_y), self.service_radius,
                            color='blue', fill=False, alpha=0.3, linestyle='--')
        plt.gca().add_patch(circle)

        # 绘制用户
        for user in self.all_users:
            if user.active and user.visited:
                color = 'limegreen'  # 已访问的激活用户
                marker = 'o'
            elif user.active and not user.visited:
                color = 'green'  # 未访问的激活用户
                marker = 'o'
            else:
                color = 'gray'  # 未激活用户
                marker = 'x'

            plt.scatter(user.x, user.y, c=color, s=80, marker=marker)

            # 如果用户在服务范围内，绘制服务圆圈
            distance = math.sqrt((self.drone_x - user.x) ** 2 + (self.drone_y - user.y) ** 2)
            if distance < self.service_radius and user.active:
                circle = plt.Circle((user.x, user.y), 2, color='green', alpha=0.5)
                plt.gca().add_patch(circle)

        # 添加图例
        visited_handle = plt.scatter([], [], c='limegreen', marker='o', label='已服务用户')
        active_handle = plt.scatter([], [], c='green', marker='o', label='未服务用户')
        inactive_handle = plt.scatter([], [], c='gray', marker='x', label='未激活用户')
        plt.legend(handles=[visited_handle, active_handle, inactive_handle])

        # 添加文本信息
        visited_count = sum(1 for user in self.all_users if user.active and user.visited)
        active_count = sum(1 for user in self.all_users if user.active)
        plt.title(f'Episode {episode}: 已访问 {visited_count}/{active_count} 用户 (Step {self.steps})')
        plt.xlabel('X 位置 (m)')
        plt.ylabel('Y 位置 (m)')
        plt.grid(True)

        # 使用唯一文件名保存图像
        filename = f'../trajectory_images/episode_{episode}_step_{self.steps}.png'
        plt.savefig(filename)
        plt.close()


class TD3Agent:
    def __init__(self, state_dim, action_dim, max_action, config):
        self.state_dim = state_dim
        self.action_dim = action_dim
        self.max_action = max_action

        # 超参数
        self.gamma = config["gamma"]
        self.tau = config["tau"]
        self.policy_noise = config["policy_noise"]
        self.noise_clip = config["noise_clip"]
        self.policy_freq = config["policy_freq"]
        self.batch_size = config["batch_size"]
        self.exploration_noise = config["exploration_noise"]
        self.noise_decay = config["noise_decay"]
        self.min_noise = config["min_noise"]
        self.lr_actor = config["lr_actor"]
        self.lr_critic = config["lr_critic"]
        self.weight_decay = config["weight_decay"]
        self.n_step = config["n_step"]
        self.reward_scale = config["reward_scale"]
        self.grad_clip = config["grad_clip"]

        # 网络
        self.actor = Actor(state_dim, action_dim, max_action).to(device)
        self.actor_target = Actor(state_dim, action_dim, max_action).to(device)
        self.actor_target.load_state_dict(self.actor.state_dict())
        self.actor_optimizer = optim.AdamW(self.actor.parameters(),
                                           lr=self.lr_actor,
                                           weight_decay=self.weight_decay)

        self.critic = Critic(state_dim, action_dim).to(device)
        self.critic_target = Critic(state_dim, action_dim).to(device)
        self.critic_target.load_state_dict(self.critic.state_dict())
        self.critic_optimizer = optim.AdamW(self.critic.parameters(),
                                            lr=self.lr_critic,
                                            weight_decay=self.weight_decay)

        # 经验回放
        self.memory = PrioritizedReplayBuffer(config["memory_capacity"])

        # 训练信息
        self.total_it = 0
        self.current_noise = self.exploration_noise
        self.task_seen = 0
        self.current_task_id = 0

        # 学习率调度器
        self.actor_scheduler = optim.lr_scheduler.CosineAnnealingLR(
            self.actor_optimizer,
            T_max=config["lr_decay_steps"],
            eta_min=self.lr_actor / 10
        )
        self.critic_scheduler = optim.lr_scheduler.CosineAnnealingLR(
            self.critic_optimizer,
            T_max=config["lr_decay_steps"],
            eta_min=self.lr_critic / 10
        )

        # N步经验缓冲
        self.n_step_buffer = deque(maxlen=self.n_step)

    def select_action(self, state, eval_mode=False):
        state = torch.FloatTensor(state.reshape(1, -1)).to(device)
        with torch.no_grad():
            action = self.actor(state).cpu().data.numpy().flatten()

        if not eval_mode:
            # 基于覆盖率的自适应噪声
            coverage_ratio = state.cpu().numpy()[0][-1]  # 已服务比例
            # 未服务用户越多，噪声越大
            adaptive_noise = self.current_noise * (1 + 2 * (1 - coverage_ratio))
            # 添加噪声
            noise = np.random.normal(0, adaptive_noise, size=self.action_dim)
            action = np.clip(action + noise, -self.max_action, self.max_action)

        return action

    def decay_noise(self):
        # 减小探索噪声
        self.current_noise = max(self.min_noise, self.current_noise * self.noise_decay)
        return self.current_noise

    def remember(self, state, action, reward, next_state, done):
        # 添加经验到N步缓冲
        self.n_step_buffer.append((state, action, reward, next_state, done))

        # 如果缓冲区未满，且当前经验不是终止状态，则不进行N步更新
        if len(self.n_step_buffer) < self.n_step and not done:
            return

        # 获取初始状态和动作
        state, action, _, _, _ = self.n_step_buffer[0]

        # 计算n步累积奖励
        n_reward = 0
        for i, (_, _, r, _, _) in enumerate(self.n_step_buffer):
            n_reward += (self.gamma ** i) * r

        # 获取最终的下一个状态和done
        _, _, _, next_state, done = self.n_step_buffer[-1]

        # 将N步经验存储到优先回放缓冲区
        self.memory.push(state, action, n_reward * self.reward_scale, next_state, done)

        # 如果是终止状态，清空N步缓冲区
        if done:
            self.n_step_buffer.clear()

    def update(self):
        self.total_it += 1

        # 如果经验不足，不进行更新
        if len(self.memory) < self.batch_size:
            return {"actor_loss": 0, "critic_loss": 0, "td_error": 0}

        # 从优先经验回放中采样
        states, actions, rewards, next_states, dones, indices, weights = self.memory.sample(self.batch_size)

        with torch.no_grad():
            # 目标策略平滑化：在下一个动作上添加裁剪的噪声
            noise = torch.randn_like(actions) * self.policy_noise
            noise = torch.clamp(noise, -self.noise_clip, self.noise_clip)

            next_actions = self.actor_target(next_states) + noise
            next_actions = torch.clamp(next_actions, -self.max_action, self.max_action)

            # 使用目标Critic计算两个Q值，取较小的一个防止过估计
            target_q1, target_q2 = self.critic_target(next_states, next_actions)
            target_q = torch.min(target_q1, target_q2)

            # TD目标
            target_q = rewards + (1 - dones) * self.gamma ** self.n_step * target_q

        # 获取当前Q值
        current_q1, current_q2 = self.critic(states, actions)

        # 计算TD误差
        td_error1 = torch.abs(target_q - current_q1).detach().cpu().numpy()
        td_error2 = torch.abs(target_q - current_q2).detach().cpu().numpy()
        td_errors = np.mean([td_error1, td_error2], axis=0)

        # 使用重要性权重更新优先级
        critic_loss = (weights * nn.MSELoss(reduction='none')(current_q1, target_q)).mean() + \
                      (weights * nn.MSELoss(reduction='none')(current_q2, target_q)).mean()

        # 更新Critic
        self.critic_optimizer.zero_grad()
        critic_loss.backward()
        torch.nn.utils.clip_grad_norm_(self.critic.parameters(), self.grad_clip)
        self.critic_optimizer.step()

        # 更新回放缓冲区中的优先级
        self.memory.update_priorities(indices, td_errors.flatten())

        actor_loss = torch.tensor(0.0).to(device)
        # 延迟策略更新（每隔policy_freq次更新一次Actor）
        if self.total_it % self.policy_freq == 0:
            # Actor损失 = -Q值的期望
            actor_loss = -self.critic.q1_forward(states, self.actor(states)).mean()

            # 更新Actor
            self.actor_optimizer.zero_grad()
            actor_loss.backward()
            torch.nn.utils.clip_grad_norm_(self.actor.parameters(), self.grad_clip)
            self.actor_optimizer.step()

            # 软更新目标网络
            for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
                target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)

            for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
                target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)

            # 更新学习率
            self.actor_scheduler.step()
            self.critic_scheduler.step()

        return {
            "actor_loss": actor_loss.item() if isinstance(actor_loss, torch.Tensor) else actor_loss,
            "critic_loss": critic_loss.item(),
            "td_error": float(np.mean(td_errors))
        }

    def save(self, path):
        torch.save({
            'actor': self.actor.state_dict(),
            'critic': self.critic.state_dict(),
            'actor_target': self.actor_target.state_dict(),
            'critic_target': self.critic_target.state_dict(),
            'actor_optimizer': self.actor_optimizer.state_dict(),
            'critic_optimizer': self.critic_optimizer.state_dict(),
            'current_noise': self.current_noise,
            'total_it': self.total_it
        }, path)
        print(f"模型已保存至 {path}")

    def load(self, path):
        checkpoint = torch.load(path)
        self.actor.load_state_dict(checkpoint['actor'])
        self.critic.load_state_dict(checkpoint['critic'])
        self.actor_target.load_state_dict(checkpoint['actor_target'])
        self.critic_target.load_state_dict(checkpoint['critic_target'])
        self.actor_optimizer.load_state_dict(checkpoint['actor_optimizer'])
        self.critic_optimizer.load_state_dict(checkpoint['critic_optimizer'])
        self.current_noise = checkpoint['current_noise']
        self.total_it = checkpoint['total_it']
        print(f"模型已从 {path} 加载")

    def finish_task(self):
        """当前任务完成时的处理"""
        self.task_seen += 1
        self.current_task_id += 1
        print(f"任务 {self.current_task_id - 1} 完成")


def train():
    # 配置参数（经过大幅优化）
    config = {
        "lr_actor": 1e-4,
        "lr_critic": 3e-4,
        "gamma": 0.99,  # 折扣因子
        "tau": 0.005,  # 目标网络软更新系数
        "policy_noise": 0.2,  # 目标策略平滑化噪声
        "noise_clip": 0.5,  # 噪声裁剪范围
        "policy_freq": 2,  # 策略延迟更新频率
        "batch_size": 256,  # 批大小
        "memory_capacity": 100000,  # 经验回放缓冲区大小
        "service_radius": 15,  # 服务半径
        "exploration_noise": 0.3,  # 初始探索噪声
        "noise_decay": 0.995,  # 探索噪声衰减率
        "min_noise": 0.05,  # 最小探索噪声
        "lr_decay_steps": 10000,  # 学习率衰减步数
        "weight_decay": 1e-5,  # 权重衰减
        "n_step": 3,  # n步TD学习
        "reward_scale": 0.1,  # 奖励缩放
        "grad_clip": 1.0,  # 梯度裁剪
    }

    # 创建TensorBoard记录器
    writer = SummaryWriter(log_dir='../logs/drone_td3_improved')

    # 创建环境
    env = DroneEnvironment(config)

    # 状态和动作空间
    state_dim = 2 + 10 * 6 + 2 + 2  # 无人机坐标(2) + 10用户*(6特征) + 步数+覆盖率(2) + 方向向量(2)
    action_dim = 2  # x和y方向的速度
    max_action = 1.0  # 动作范围[-1, 1]

    # 创建TD3智能体
    agent = TD3Agent(state_dim, action_dim, max_action, config)

    # 训练参数
    num_episodes = 1000
    task_switch_interval = 300
    render_interval = 50
    eval_interval = 50
    last_save = 0
    best_avg_reward = -float('inf')

    # 训练循环
    all_rewards = []
    task_rewards = []
    coverage_rates = []
    task_boundaries = [0]
    start_time = time.time()

    # 预热阶段：先收集一些经验
    print("预热经验收集...")
    state = env.reset()
    warmup_episodes = 5
    current_ep = 0

    while current_ep < warmup_episodes:
        action = np.random.uniform(-max_action, max_action, size=action_dim)
        next_state, reward, done, _ = env.step(action)
        # 存储单步经验以快速填充缓冲区
        agent.memory.push(state, action, reward * config["reward_scale"], next_state, done)
        state = next_state

        if done:
            state = env.reset()
            current_ep += 1

    print(f"预热完成，已收集 {len(agent.memory)} 条经验")

    # 创建评估环境
    eval_env = copy.deepcopy(env)

    # 定义评估函数
    def evaluate(eval_episodes=5, render=False):
        avg_reward = 0
        avg_coverage = 0
        success_rate = 0

        for ep in range(eval_episodes):
            state = eval_env.reset()
            episode_reward = 0
            done = False

            while not done:
                action = agent.select_action(state, eval_mode=True)
                next_state, reward, done, info = eval_env.step(action)
                episode_reward += reward
                state = next_state

                if render and ep == 0:  # 只渲染第一个评估回合
                    eval_env.render(f"eval_{episode}")

            coverage = info["coverage_ratio"]
            success = info["total_visited"] == info["total_active"]

            avg_reward += episode_reward
            avg_coverage += coverage
            success_rate += float(success)

        avg_reward /= eval_episodes
        avg_coverage /= eval_episodes
        success_rate /= eval_episodes

        return avg_reward, avg_coverage, success_rate

    # 主训练循环
    print("\n开始训练...\n")
    for episode in range(num_episodes):
        # 每N轮切换用户激活模式（任务）
        if episode % task_switch_interval == 0 and episode > 0:
            print(f"\n===== 切换到新任务 (Episode {episode}) =====")
            agent.finish_task()  # 完成当前任务
            env.switch_task()  # 切换环境任务
            task_rewards = []  # 重置当前任务奖励记录
            task_boundaries.append(episode)  # 记录任务边界

        state = env.reset()
        total_reward = 0
        done = False
        episode_steps = 0

        # 初始化N步缓冲区
        agent.n_step_buffer.clear()

        while not done:
            # 选择动作
            action = agent.select_action(state)

            # 执行动作
            next_state, reward, done, info = env.step(action)

            # 存储经验
            agent.remember(state, action, reward, next_state, done)

            # 更新模型
            update_info = agent.update()

            # 记录训练信息
            if episode_steps % 10 == 0:
                writer.add_scalar('Training/Actor_Loss', update_info["actor_loss"], agent.total_it)
                writer.add_scalar('Training/Critic_Loss', update_info["critic_loss"], agent.total_it)
                writer.add_scalar('Training/TD_Error', update_info["td_error"], agent.total_it)

            # 更新状态和奖励
            state = next_state
            total_reward += reward
            episode_steps += 1

        # 更新噪声
        current_noise = agent.decay_noise()
        writer.add_scalar('Training/Exploration_Noise', current_noise, episode)

        # 记录奖励
        all_rewards.append(total_reward)
        task_rewards.append(total_reward)
        writer.add_scalar('Training/Episode_Reward', total_reward, episode)

        # 计算平均奖励
        avg_reward = np.mean(all_rewards[-50:]) if len(all_rewards) >= 50 else np.mean(all_rewards)
        writer.add_scalar('Training/Avg_Reward_50ep', avg_reward, episode)

        # 记录覆盖率
        coverage = info["total_visited"] / max(info["total_active"], 1)
        coverage_rates.append(coverage)
        writer.add_scalar('Metrics/Coverage_Rate', coverage, episode)
        writer.add_scalar('Metrics/Steps', episode_steps, episode)

        # 输出进度
        if (episode + 1) % 10 == 0:
            visited = info["total_visited"]
            active = info["total_active"]
            print(f"Episode {episode + 1}/{num_episodes}, Reward: {total_reward:.2f}, "
                  f"Avg Reward: {avg_reward:.2f}, Coverage: {coverage * 100:.1f}% ({visited}/{active})")

            # 保存最佳模型
            if avg_reward > best_avg_reward:
                best_avg_reward = avg_reward
                agent.save(f"../models/best_model_td3.pt")
                print(f"最佳模型已保存，平均奖励: {best_avg_reward:.2f}")

        # 定期评估和保存模型
        if (episode + 1) % eval_interval == 0:
            eval_reward, eval_coverage, success_rate = evaluate(eval_episodes=3)
            writer.add_scalar('Evaluation/Reward', eval_reward, episode)
            writer.add_scalar('Evaluation/Coverage', eval_coverage, episode)
            writer.add_scalar('Evaluation/Success_Rate', success_rate, episode)

            print(f"\n--- 评估结果 (Episode {episode + 1}) ---")
            print(f"评估奖励: {eval_reward:.2f}")
            print(f"评估覆盖率: {eval_coverage * 100:.1f}%")
            print(f"任务成功率: {success_rate * 100:.1f}%\n")

            # 保存模型
            agent.save(f"../models/model_td3_ep{episode}.pt")
            last_save = episode

        # 渲染环境
        if (episode + 1) % render_interval == 0 or episode == 0:
            env.render(episode + 1)

    # 计算训练时间
    training_time = time.time() - start_time
    print(f"\n训练完成! 总耗时: {training_time / 60:.2f} 分钟")

    # 绘制奖励曲线
    plt.figure(figsize=(12, 6))
    plt.plot(all_rewards, alpha=0.3, color='blue', label='Episode Reward')

    # 计算和绘制滑动平均奖励
    window_size = 30
    smoothed_rewards = []
    for i in range(len(all_rewards)):
        start_idx = max(0, i - window_size + 1)
        smoothed_rewards.append(sum(all_rewards[start_idx:(i + 1)]) / (i - start_idx + 1))

    plt.plot(smoothed_rewards, linewidth=2, color='darkblue', label=f'Smoothed Reward (window={window_size})')

    # 标记任务切换点
    for boundary in task_boundaries[1:]:
        plt.axvline(x=boundary, color='r', linestyle='--', alpha=0.7)

    plt.xlabel('Episode')
    plt.ylabel('Reward')
    plt.title('TD3 训练奖励曲线 (任务切换)')
    plt.legend()
    plt.grid(True)
    plt.savefig('../logs/td3_training_rewards.png')
    plt.close()

    # 绘制覆盖率曲线
    plt.figure(figsize=(12, 6))
    plt.plot(coverage_rates, alpha=0.7, color='green')

    # 计算和绘制滑动平均覆盖率
    smoothed_coverage = []
    for i in range(len(coverage_rates)):
        start_idx = max(0, i - window_size + 1)
        smoothed_coverage.append(sum(coverage_rates[start_idx:(i + 1)]) / (i - start_idx + 1))

    plt.plot(smoothed_coverage, linewidth=2, color='darkgreen', label=f'Smoothed Coverage (window={window_size})')

    # 标记任务切换点
    for boundary in task_boundaries[1:]:
        plt.axvline(x=boundary, color='r', linestyle='--', alpha=0.7)

    plt.xlabel('Episode')
    plt.ylabel('Coverage Rate')
    plt.title('用户服务覆盖率变化')
    plt.legend()
    plt.grid(True)
    plt.savefig('../logs/td3_coverage_rates.png')
    plt.close()

    # 关闭TensorBoard写入器
    writer.close()

    # 最终测试
    print("\n===== 最终评估 =====")

    # 加载最佳模型
    agent.load("../models/best_model_td3.pt")

    # 测试每个任务
    final_eval_episodes = 5

    for task_id in range(len(task_boundaries)):
        print(f"\n测试任务 {task_id}:")

        # 设置相应的任务环境
        if task_id > 0:
            eval_env.switch_task()

        task_coverage = []
        task_rewards = []
        success_count = 0

        for test_ep in range(final_eval_episodes):
            state = eval_env.reset()
            total_reward = 0
            done = False
            step_count = 0

            while not done:
                action = agent.select_action(state, eval_mode=True)
                next_state, reward, done, info = eval_env.step(action)
                state = next_state
                total_reward += reward
                step_count += 1

            visited = info["total_visited"]
            active = info["total_active"]
            coverage = visited / active if active > 0 else 0
            task_coverage.append(coverage)
            task_rewards.append(total_reward)

            # 任务是否成功完成
            if visited == active and active > 0:
                success_count += 1

            print(f"  测试回合 {test_ep + 1}, 奖励: {total_reward:.2f}, "
                  f"服务覆盖率: {coverage * 100:.1f}% ({visited}/{active}), 步数: {step_count}")

            # 保存测试轨迹
            if test_ep == 0:
                eval_env.render(f"final_task{task_id}_ep{test_ep + 1}")

        # 输出任务平均表现
        avg_coverage = np.mean(task_coverage) * 100
        avg_reward = np.mean(task_rewards)
        success_rate = success_count / final_eval_episodes * 100
        print(f"任务 {task_id} 平均表现:")
        print(f"  平均覆盖率: {avg_coverage:.1f}%")
        print(f"  平均奖励: {avg_reward:.2f}")
        print(f"  成功率: {success_rate:.1f}%")


if __name__ == "__main__":
    train()

