import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import random
import matplotlib.pyplot as plt
from collections import deque, namedtuple
import copy
import math
import os

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

# 检查GPU可用性
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")

# 创建图像保存目录
os.makedirs("../trajectory_images", exist_ok=True)

# 设置随机种子以保证可复现性
torch.manual_seed(42)
np.random.seed(42)
random.seed(42)

# 经验回放的数据结构，使用命名元组存储经验（状态、动作、奖励、下一个状态、是否结束）
Experience = namedtuple('Experience', ['state', 'action', 'reward', 'next_state', 'done'])

# 使用两个双端队列分别存储经验和优先级。
class PrioritizedReplayBuffer:
    def __init__(self, capacity):
        self.buffer = deque(maxlen=capacity)
        self.priorities = deque(maxlen=capacity)
        self.eps = 1e-5

    # 添加新经验，并赋予当前最大优先级（如果缓冲区为空则优先级为1.0）。
    def push(self, state, action, reward, next_state, done):
        # 新经验获得最高优先级
        max_priority = max(self.priorities) if self.priorities else 1.0
        self.buffer.append(Experience(state, action, reward, next_state, done))
        self.priorities.append(max_priority)
    # 根据优先级采样一批经验（概率与优先级成正比），并返回对应的索引（用于后续更新优先级）。
    def sample(self, batch_size):
        # 根据优先级采样
        probs = np.array(self.priorities) / sum(self.priorities)
        indices = np.random.choice(len(self.buffer), batch_size, p=probs)

        experiences = [self.buffer[i] for i in indices]
        states = torch.tensor([e.state for e in experiences], dtype=torch.float)
        actions = torch.tensor([e.action for e in experiences], dtype=torch.long)
        rewards = torch.tensor([e.reward for e in experiences], dtype=torch.float)
        next_states = torch.tensor([e.next_state for e in experiences], dtype=torch.float)
        dones = torch.tensor([e.done for e in experiences], dtype=torch.float)

        return states, actions, rewards, next_states, dones, indices
    # 用TD误差更新采样经验的优先级（加上一个小常数避免零优先级）。
    def update_priorities(self, indices, errors):
        # 更新采样经验的优先级
        for i, e in zip(indices, errors):
            self.priorities[i] = e + self.eps

    def __len__(self):
        return len(self.buffer)


class DuelingQNetwork(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(DuelingQNetwork, self).__init__()
        # 共享特征层
        self.feature_layer = nn.Sequential(
            nn.Linear(state_dim, 256),
            nn.ReLU(),
            nn.Linear(256, 256),
            nn.ReLU()
        )

        # 价值流
        self.value_stream = nn.Sequential(
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Linear(128, 1)
        )

        # 优势流
        self.advantage_stream = nn.Sequential(
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Linear(128, action_dim)
        )

    def forward(self, x):
        features = self.feature_layer(x)
        values = self.value_stream(features)
        advantages = self.advantage_stream(features)
        # 结合值流和优势流，获取Q值
        qvals = values + (advantages - advantages.mean(dim=1, keepdim=True))
        return qvals


class User:
    def __init__(self, x, y):
        self.x = x
        self.y = y
        self.active = False
        self.visited = False  # 记录用户是否已被服务


class DroneEnvironment:
    def __init__(self, config):
        self.width = 100
        self.height = 100
        self.service_radius = config["service_radius"]
        self.max_steps = 300  # 增加最大步长
        self.steps = 0
        self.episode_num = 0

        # 创建固定位置的用户
        self.all_users = []
        for _ in range(10):
            x = random.uniform(0, self.width)
            y = random.uniform(0, self.height)
            self.all_users.append(User(x, y))

        # 激活用户
        self.active_users = random.sample(self.all_users, k=8)
        for user in self.all_users:
            user.active = user in self.active_users
            user.visited = False

        # 无人机状态
        self.drone_x = random.uniform(0, self.width)
        self.drone_y = random.uniform(0, self.height)
        self.speed = 5

        # 轨迹记录
        self.trajectory_x = [self.drone_x]
        self.trajectory_y = [self.drone_y]

        # 动作空间 (8个方向 + 悬停)
        self.actions = ["N", "NE", "E", "SE", "S", "SW", "W", "NW", "hover"]

    def reset(self):
        self.episode_num += 1
        # 重置用户访问状态
        for user in self.all_users:
            user.visited = False

        # 智能初始化：将无人机放在靠近激活用户的位置
        if self.episode_num < 300:  # 前300轮使用课程学习
            # 随机选择一个激活用户，将无人机初始化在其附近
            if self.active_users:
                random_user = random.choice(self.active_users)
                # 在用户周围随机位置初始化无人机
                angle = random.uniform(0, 2 * math.pi)
                distance = random.uniform(10, 30)  # 10-30米范围内
                self.drone_x = max(0, min(self.width, random_user.x + distance * math.cos(angle)))
                self.drone_y = max(0, min(self.height, random_user.y + distance * math.sin(angle)))
            else:
                self.drone_x = random.uniform(0, self.width)
                self.drone_y = random.uniform(0, self.height)
        else:
            # 后期随机初始化
            self.drone_x = random.uniform(0, self.width)
            self.drone_y = random.uniform(0, self.height)

        self.steps = 0

        # 清空轨迹
        self.trajectory_x = [self.drone_x]
        self.trajectory_y = [self.drone_y]

        return self.get_state()

    def switch_task(self):
        # 切换激活用户，模拟任务变化
        self.active_users = random.sample(self.all_users, k=8)
        for user in self.all_users:
            user.active = user in self.active_users
            user.visited = False

    def get_state(self):
        # 构建更丰富的状态向量
        state = []

        # 1. 无人机坐标（归一化）
        state.extend([self.drone_x / self.width, self.drone_y / self.height])

        # 2. 每个用户的相关信息
        for user in self.all_users:
            # 用户激活状态
            state.append(1.0 if user.active else 0.0)

            # 用户是否已被访问
            state.append(1.0 if user.visited else 0.0)

            # 用户相对于无人机的相对位置（归一化）
            dx = (user.x - self.drone_x) / self.width
            dy = (user.y - self.drone_y) / self.height
            state.extend([dx, dy])

            # 到用户的距离（归一化）
            distance = math.sqrt(dx * dx + dy * dy)
            state.append(min(distance * 5, 1.0))  # 缩放以强调近距离

            # 用户是否在服务范围内
            in_range = 1.0 if math.sqrt((self.drone_x - user.x) ** 2 +
                                        (self.drone_y - user.y) ** 2) < self.service_radius else 0.0
            state.append(in_range)

        # 3. 无人机已移动步数（归一化）
        state.append(self.steps / self.max_steps)

        # 4. 已服务用户数量（归一化）
        visited_count = sum(1 for user in self.all_users if user.active and user.visited)
        state.append(visited_count / 8)  # 8个激活用户

        return np.array(state)

    def step(self, action_idx):
        action = self.actions[action_idx]
        prev_x, prev_y = self.drone_x, self.drone_y

        # 执行动作（8个方向 + 悬停）
        if action == "N":
            self.drone_y = min(self.height, self.drone_y + self.speed)
        elif action == "S":
            self.drone_y = max(0, self.drone_y - self.speed)
        elif action == "E":
            self.drone_x = min(self.width, self.drone_x + self.speed)
        elif action == "W":
            self.drone_x = max(0, self.drone_x - self.speed)
        elif action == "NE":
            self.drone_x = min(self.width, self.drone_x + self.speed * 0.7071)
            self.drone_y = min(self.height, self.drone_y + self.speed * 0.7071)
        elif action == "SE":
            self.drone_x = min(self.width, self.drone_x + self.speed * 0.7071)
            self.drone_y = max(0, self.drone_y - self.speed * 0.7071)
        elif action == "SW":
            self.drone_x = max(0, self.drone_x - self.speed * 0.7071)
            self.drone_y = max(0, self.drone_y - self.speed * 0.7071)
        elif action == "NW":
            self.drone_x = max(0, self.drone_x - self.speed * 0.7071)
            self.drone_y = min(self.height, self.drone_y + self.speed * 0.7071)
        # hover不需要动作

        # 计算移动成本
        movement_cost = math.sqrt((self.drone_x - prev_x) ** 2 + (self.drone_y - prev_y) ** 2)

        # 记录轨迹
        self.trajectory_x.append(self.drone_x)
        self.trajectory_y.append(self.drone_y)

        # 计算奖励
        reward = 0
        serviced_users = 0
        newly_serviced = 0  # 新服务的用户数

        # 每个步骤的基本时间惩罚
        reward -= 0.2

        # 计算服务用户的奖励
        for user in self.all_users:
            if user.active:
                distance = math.sqrt((self.drone_x - user.x) ** 2 + (self.drone_y - user.y) ** 2)

                if distance < self.service_radius:
                    serviced_users += 1

                    # 首次访问用户给额外奖励
                    if not user.visited:
                        reward += 50  # 大幅提高首次服务奖励
                        newly_serviced += 1
                        user.visited = True
                    else:
                        reward += 5  # 已访问过的用户奖励降低

        # 接近未服务用户的奖励
        min_distance_to_unvisited = float('inf')
        for user in self.all_users:
            if user.active and not user.visited:
                distance = math.sqrt((self.drone_x - user.x) ** 2 + (self.drone_y - user.y) ** 2)
                min_distance_to_unvisited = min(min_distance_to_unvisited, distance)

        # 添加接近未访问用户的额外奖励
        if min_distance_to_unvisited != float('inf'):
            proximity_reward = 15 * (1 - min(min_distance_to_unvisited / 50, 1))
            reward += proximity_reward

        # 全部服务完成奖励
        active_count = sum(1 for user in self.all_users if user.active)
        visited_count = sum(1 for user in self.all_users if user.active and user.visited)

        if visited_count == active_count and active_count > 0:
            reward += 100  # 所有用户都已访问的巨大奖励

        # 更新步数
        self.steps += 1
        done = self.steps >= self.max_steps

        # 如果所有激活用户都已被服务，可以提前结束
        if visited_count == active_count and active_count > 0:
            done = True

        info = {
            "serviced_users": serviced_users,
            "newly_serviced": newly_serviced,
            "total_visited": visited_count,
            "total_active": active_count
        }

        return self.get_state(), reward, done, info

    def render(self, episode, step=None):
        plt.figure(figsize=(10, 10))
        plt.xlim(0, self.width)
        plt.ylim(0, self.height)

        # 绘制无人机轨迹
        plt.plot(self.trajectory_x, self.trajectory_y, 'ro-', alpha=0.6, label='Drone Path')

        # 绘制无人机当前位置
        plt.scatter(self.drone_x, self.drone_y, c='red', s=100, marker='*', label='Drone')

        # 绘制服务半径
        circle = plt.Circle((self.drone_x, self.drone_y), self.service_radius,
                            color='blue', fill=False, alpha=0.3, linestyle='--')
        plt.gca().add_patch(circle)

        # 绘制用户
        for user in self.all_users:
            if user.active and user.visited:
                color = 'limegreen'  # 已访问的激活用户
                marker = 'o'
            elif user.active and not user.visited:
                color = 'green'  # 未访问的激活用户
                marker = 'o'
            else:
                color = 'gray'  # 未激活用户
                marker = 'x'

            plt.scatter(user.x, user.y, c=color, s=80, marker=marker)

            # 如果用户在服务范围内，绘制服务圆圈
            distance = math.sqrt((self.drone_x - user.x) ** 2 + (self.drone_y - user.y) ** 2)
            if distance < self.service_radius and user.active:
                circle = plt.Circle((user.x, user.y), 2, color='green', alpha=0.5)
                plt.gca().add_patch(circle)

        # 添加图例
        visited_handle = plt.scatter([], [], c='limegreen', marker='o', label='Visited User')
        active_handle = plt.scatter([], [], c='green', marker='o', label='Active User')
        inactive_handle = plt.scatter([], [], c='gray', marker='x', label='Inactive User')
        plt.legend(handles=[visited_handle, active_handle, inactive_handle])

        # 添加文本信息
        visited_count = sum(1 for user in self.all_users if user.active and user.visited)
        active_count = sum(1 for user in self.all_users if user.active)
        plt.title(f'Episode {episode}: Visited {visited_count}/{active_count} users (Step {self.steps})')
        plt.xlabel('X Position (m)')
        plt.ylabel('Y Position (m)')
        plt.grid(True)

        # 使用唯一文件名保存图像
        filename = f'trajectory_images/episode_{episode}_step_{self.steps}.png'
        plt.savefig(filename)
        plt.close()


class ContinualRLAgent:
    def __init__(self, state_dim, action_dim, config):
        self.state_dim = state_dim
        self.action_dim = action_dim
        self.gamma = config["gamma"]
        self.lr = config["learning_rate"]
        self.epsilon = 1.0
        self.epsilon_decay = config["epsilon_decay"]
        self.epsilon_min = 0.01
        self.batch_size = config["batch_size"]
        self.ewc_lambda = config["ewc_lambda"]
        self.target_update_freq = config["target_update_freq"]
        self.alpha = config["alpha"]  # 用于Double DQN

        # 神经网络 - 使用Dueling DQN架构
        self.q_network = DuelingQNetwork(state_dim, action_dim)
        self.target_network = DuelingQNetwork(state_dim, action_dim)
        self.target_network.load_state_dict(self.q_network.state_dict())
        self.optimizer = optim.Adam(self.q_network.parameters(), lr=self.lr)

        # 优先级经验回放
        self.memory = PrioritizedReplayBuffer(config["memory_capacity"])

        # 持续学习相关变量
        self.previous_tasks_fisher = {}  # 存储每个任务的Fisher信息矩阵
        self.previous_tasks_params = {}  # 存储每个任务的最优参数
        self.current_task_id = 0
        self.task_seen = 0

        # 训练统计
        self.training_steps = 0

    def select_action(self, state, eval_mode=False):
        # epsilon-greedy策略
        if not eval_mode and random.random() < self.epsilon:
            return random.randrange(self.action_dim)

        state_tensor = torch.tensor(state, dtype=torch.float).unsqueeze(0)
        with torch.no_grad():
            q_values = self.q_network(state_tensor)
        return q_values.argmax().item()

    def update_model(self):
        if len(self.memory) < self.batch_size:
            return

        # 从优先级经验回放中采样
        states, actions, rewards, next_states, dones, indices = self.memory.sample(self.batch_size)

        # 计算当前Q值
        q_values = self.q_network(states).gather(1, actions.unsqueeze(1)).squeeze(1)

        # Double DQN: 使用主网络选择动作，目标网络评估价值
        with torch.no_grad():
            # 使用主网络选择最佳动作
            next_actions = self.q_network(next_states).argmax(1).unsqueeze(1)
            # 使用目标网络获取这些动作的Q值
            next_q_values = self.target_network(next_states).gather(1, next_actions).squeeze(1)
            # 计算目标Q值
            target_q_values = rewards + (1 - dones) * self.gamma * next_q_values

        # 计算TD误差用于更新优先级
        td_errors = torch.abs(q_values - target_q_values).detach().numpy()
        self.memory.update_priorities(indices, td_errors)

        # 计算TD误差损失
        td_loss = nn.MSELoss()(q_values, target_q_values)

        # 加入EWC正则化项防止灾难性遗忘
        ewc_loss = 0
        if self.task_seen > 0:
            for task_id in range(self.task_seen):
                for name, param in self.q_network.named_parameters():
                    if name in self.previous_tasks_fisher[task_id]:
                        fisher = self.previous_tasks_fisher[task_id][name]
                        old_param = self.previous_tasks_params[task_id][name]
                        ewc_loss += (fisher * (param - old_param).pow(2)).sum()

        # 总损失 = TD损失 + EWC正则化
        loss = td_loss + self.ewc_lambda * ewc_loss

        # 更新参数
        self.optimizer.zero_grad()
        loss.backward()
        # 梯度裁剪，防止梯度爆炸
        torch.nn.utils.clip_grad_norm_(self.q_network.parameters(), 10)
        self.optimizer.step()

        # 增加训练步数计数
        self.training_steps += 1

        # 定期更新目标网络
        if self.training_steps % self.target_update_freq == 0:
            self.update_target_network()

        return loss.item()

    def update_target_network(self):
        # Polyak平均法更新目标网络 (软更新)
        for target_param, param in zip(self.target_network.parameters(), self.q_network.parameters()):
            target_param.data.copy_(self.alpha * param.data + (1 - self.alpha) * target_param.data)

    def calculate_fisher_information(self, env, num_samples=200):
        """计算Fisher信息矩阵，用于EWC正则化"""
        fisher_dict = {}
        param_dict = {}

        # 初始化Fisher字典
        for name, param in self.q_network.named_parameters():
            fisher_dict[name] = torch.zeros_like(param)
            param_dict[name] = param.data.clone()

        # 收集样本并计算Fisher信息
        self.q_network.eval()
        for _ in range(num_samples):
            state = env.reset()
            done = False

            while not done:
                action = self.select_action(state, eval_mode=True)
                next_state, reward, done, _ = env.step(action)

                # 计算损失
                state_tensor = torch.tensor(state, dtype=torch.float).unsqueeze(0)
                action_tensor = torch.tensor([action], dtype=torch.long)
                q_values = self.q_network(state_tensor)

                # 获取对应动作的Q值
                log_prob = torch.log_softmax(q_values, dim=1)[0, action]

                # 计算梯度
                self.optimizer.zero_grad()
                (-log_prob).backward()

                # 累积Fisher信息
                for name, param in self.q_network.named_parameters():
                    if param.grad is not None:
                        fisher_dict[name] += param.grad.pow(2).data

                state = next_state
                if done:
                    break

        # 归一化Fisher信息
        for name in fisher_dict:
            fisher_dict[name] /= num_samples

        self.q_network.train()
        return fisher_dict, param_dict

    def finish_task(self, env):
        """当前任务完成时的处理"""
        # 计算并存储当前任务的Fisher信息和参数
        fisher, params = self.calculate_fisher_information(env)
        self.previous_tasks_fisher[self.current_task_id] = fisher
        self.previous_tasks_params[self.current_task_id] = params

        self.task_seen += 1
        self.current_task_id += 1


def train():
    # 配置参数
    config = {
        "learning_rate": 0.0003,  # 降低学习率，提高稳定性
        "gamma": 0.99,  # 提高折扣因子
        "epsilon_decay": 0.996,  # 减慢探索率衰减
        "batch_size": 128,  # 增大批量大小
        "memory_capacity": 50000,  # 大幅增加记忆容量
        "service_radius": 20,  # 服务半径
        "ewc_lambda": 1000,  # 增强EWC正则化强度
        "target_update_freq": 10,  # 目标网络更新频率
        "alpha": 0.005  # 软更新系数 (Polyak averaging)
    }

    # 创建环境
    env = DroneEnvironment(config)

    # 状态和动作空间 (增加状态维度)
    state_dim = 2 + 10 * 6 + 2  # 无人机坐标(2) + 10个用户*6个特征 + 步数和已服务用户比例(2)
    action_dim = 9  # 8个方向 + 悬停

    # 创建智能体
    agent = ContinualRLAgent(state_dim, action_dim, config)

    # 训练参数
    num_episodes = 1000
    task_switch_interval = 500  # 大幅增加每个任务的训练时长
    render_interval = 50

    # 训练循环
    all_rewards = []
    task_rewards = []
    avg_rewards = []
    task_boundaries = [0]  # 记录任务切换点

    for episode in range(num_episodes):
        # 每N轮切换用户激活模式（任务）
        if episode % task_switch_interval == 0 and episode > 0:
            print(f"\n===== 切换到新任务 (Episode {episode}) =====")
            agent.finish_task(env)  # 完成当前任务，保存Fisher信息
            env.switch_task()  # 切换环境任务
            task_rewards = []  # 重置当前任务奖励记录
            task_boundaries.append(episode)  # 记录任务边界

        state = env.reset()
        total_reward = 0
        done = False

        while not done:
            # 选择动作
            action = agent.select_action(state)

            # 执行动作
            next_state, reward, done, info = env.step(action)

            # 存储经验
            agent.memory.push(state, action, reward, next_state, done)

            # 更新模型 (可以进行多次更新)
            for _ in range(4):  # 每步执行4次更新，加速学习
                agent.update_model()

            # 更新状态和奖励
            state = next_state
            total_reward += reward

        # 衰减探索率
        agent.epsilon = max(agent.epsilon_min, agent.epsilon * agent.epsilon_decay)

        # 记录奖励
        all_rewards.append(total_reward)
        task_rewards.append(total_reward)

        # 计算平均奖励
        avg_reward = np.mean(all_rewards[-100:]) if len(all_rewards) >= 100 else np.mean(all_rewards)
        avg_rewards.append(avg_reward)

        # 输出进度
        if (episode + 1) % 10 == 0:
            visited = info["total_visited"]
            active = info["total_active"]
            coverage = (visited / active * 100) if active > 0 else 0
            print(f"Episode {episode + 1}/{num_episodes}, Reward: {total_reward:.2f}, "
                  f"Avg Reward: {avg_reward:.2f}, Epsilon: {agent.epsilon:.4f}, "
                  f"Coverage: {coverage:.1f}% ({visited}/{active})")

        # 渲染环境
        if (episode + 1) % render_interval == 0 or episode == 0:
            env.render(episode + 1)

    # 绘制奖励曲线
    plt.figure(figsize=(12, 6))
    plt.plot(all_rewards, alpha=0.3, label='Episode Reward')

    # 计算和绘制滑动平均奖励 (更平滑的曲线)
    window_size = 20
    smoothed_rewards = []
    for i in range(len(all_rewards)):
        start_idx = max(0, i - window_size + 1)
        smoothed_rewards.append(sum(all_rewards[start_idx:(i + 1)]) / (i - start_idx + 1))

    plt.plot(smoothed_rewards, linewidth=2, label=f'Smoothed Reward (window={window_size})')

    # 标记任务切换点
    for boundary in task_boundaries[1:]:
        plt.axvline(x=boundary, color='r', linestyle='--')

    plt.xlabel('Episode')
    plt.ylabel('Reward')
    plt.title('Training Progress with Task Switching')
    plt.legend()
    plt.grid(True)
    plt.savefig('training_rewards.png')

    # 最终测试
    print("\n===== 最终评估 =====")
    test_episodes = 3

    # 测试每个任务
    for task_id in range(len(task_boundaries)):
        print(f"\n测试任务 {task_id}:")

        # 设置相应的任务环境
        if task_id > 0:
            env.switch_task()

        for test_ep in range(test_episodes):
            state = env.reset()
            total_reward = 0
            done = False

            while not done:
                action = agent.select_action(state, eval_mode=True)
                next_state, reward, done, info = env.step(action)
                state = next_state
                total_reward += reward

            visited = info["total_visited"]
            active = info["total_active"]
            coverage = (visited / active * 100) if active > 0 else 0
            print(
                f"  测试回合 {test_ep + 1}, 奖励: {total_reward:.2f}, 服务覆盖率: {coverage:.1f}% ({visited}/{active})")
            env.render(f"final_task{task_id}_ep{test_ep + 1}")


if __name__ == "__main__":
    train()
