import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import random
from collections import namedtuple, deque
from mpl_toolkits.mplot3d import Axes3D

# 设置随机种子确保结果可复现
random.seed(42)
np.random.seed(42)
torch.manual_seed(42)
if torch.cuda.is_available():
    torch.cuda.manual_seed(42)


class DualNetwork(nn.Module):
    """双网络架构: 一个用于当前任务，一个保持历史知识"""

    def __init__(self, state_dim, action_dim, hidden_dim=256):
        super(DualNetwork, self).__init__()
        # 当前任务网络
        self.current = nn.Sequential(
            nn.Linear(state_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, action_dim)
        )

        # 知识保留网络
        self.knowledge = nn.Sequential(
            nn.Linear(state_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, action_dim)
        )

    def forward(self, state, alpha=0.5):
        # 融合两个网络的输出
        current_q = self.current(state)
        knowledge_q = self.knowledge(state)
        return alpha * current_q + (1 - alpha) * knowledge_q


class EpisodeBuffer:
    """经验回放缓冲区，带有任务标识"""

    def __init__(self, capacity):
        self.buffer = deque(maxlen=capacity)
        self.task_buffers = {}  # 每个任务环境的专用缓冲区

    def push(self, task_id, state, action, reward, next_state, done):
        # 存储带任务ID的转换
        self.buffer.append((task_id, state, action, reward, next_state, done))

        # 同时保存到任务专用缓冲区
        if task_id not in self.task_buffers:
            self.task_buffers[task_id] = deque(maxlen=10000)
        self.task_buffers[task_id].append((state, action, reward, next_state, done))

    def sample(self, batch_size, current_task_id=None, replay_ratio=0.7):
        if len(self.buffer) < batch_size:
            return random.sample(self.buffer, len(self.buffer))

        # 从当前任务和历史任务中采样
        if current_task_id and current_task_id in self.task_buffers and len(self.task_buffers[current_task_id]) > 0:
            current_samples_size = min(int(batch_size * replay_ratio), len(self.task_buffers[current_task_id]))
            if current_samples_size > 0:
                current_samples = random.sample(list(self.task_buffers[current_task_id]), current_samples_size)
                # 剩余样本从所有经验中采样
                other_samples = random.sample(list(self.buffer), batch_size - len(current_samples))
                return current_samples + [(t, s, a, r, ns, d) for t, s, a, r, ns, d in other_samples]
        return random.sample(list(self.buffer), min(batch_size, len(self.buffer)))


class UAVEnvironment:
    def __init__(self, width=100, height=100, user_count=10, active_users=8):
        self.width = width
        self.height = height
        self.user_count = user_count
        self.active_users = active_users

        # 生成10个固定位置的用户
        self.all_users = []
        for _ in range(user_count):
            self.all_users.append([
                random.uniform(0, width),
                random.uniform(0, height)
            ])

        # UAV参数
        self.uav_pos = [width / 2, height / 2, 30]  # 起始位置
        self.uav_speed = 5.0  # m/s
        self.uav_energy = 1000  # 初始能量
        self.collection_radius = 10  # 收集任务的半径范围

        # 任务参数
        self.tasks_collected = 0
        self.current_active_users = []
        self.task_status = []  # 0表示未收集，1表示已收集
        self.task_id = 0  # 当前任务环境ID

        # 重置环境
        self.reset()

    def reset(self):
        """重置环境状态"""
        self.uav_pos = [self.width / 2, self.height / 2, 30]
        self.uav_energy = 1000
        self.tasks_collected = 0

        # 随机选择active_users个用户
        indices = random.sample(range(self.user_count), self.active_users)
        self.current_active_users = [self.all_users[i] for i in indices]
        self.task_status = [0] * self.active_users

        # 构造状态
        state = self._get_state()
        return state

    def change_task(self):
        """改变任务环境"""
        self.task_id += 1
        return self.reset()

    def _get_state(self):
        """构建状态向量"""
        state = []
        # UAV位置和能量
        state.extend(self.uav_pos)
        state.append(self.uav_energy / 1000.0)  # 归一化能量

        # 每个活跃用户的位置和任务状态
        for i, user_pos in enumerate(self.current_active_users):
            state.extend(user_pos)  # 用户位置
            state.append(self.task_status[i])  # 任务状态

        # 填充固定长度状态向量（为了处理不同数量的用户）
        while len(state) < 3 + 1 + self.active_users * 3:
            state.append(0)

        return np.array(state, dtype=np.float32)

    def step(self, action):
        """执行动作并返回新状态、奖励等"""
        # 动作: [dx, dy, dz] - 移动方向
        dx, dy, dz = action

        # 归一化动作向量
        norm = np.sqrt(dx ** 2 + dy ** 2 + dz ** 2)
        if norm > 0:
            dx, dy, dz = dx / norm, dy / norm, dz / norm

        # 更新UAV位置
        new_x = np.clip(self.uav_pos[0] + dx * self.uav_speed, 0, self.width)
        new_y = np.clip(self.uav_pos[1] + dy * self.uav_speed, 0, self.height)
        new_z = np.clip(self.uav_pos[2] + dz * self.uav_speed, 10, 50)  # 限制高度

        # 计算移动距离
        distance = np.sqrt((new_x - self.uav_pos[0]) ** 2 +
                           (new_y - self.uav_pos[1]) ** 2 +
                           (new_z - self.uav_pos[2]) ** 2)

        # 更新位置
        self.uav_pos = [new_x, new_y, new_z]

        # 能量消耗: 移动消耗 + 悬停消耗
        energy_consumed = 0.5 * distance + 0.2
        self.uav_energy -= energy_consumed

        # 检查任务收集
        reward = -0.1  # 基础奖励为负，鼓励快速完成任务
        tasks_collected_this_step = 0

        for i, user_pos in enumerate(self.current_active_users):
            if self.task_status[i] == 0:  # 未收集的任务
                dist = np.sqrt((self.uav_pos[0] - user_pos[0]) ** 2 +
                               (self.uav_pos[1] - user_pos[1]) ** 2)

                if dist <= self.collection_radius:
                    self.task_status[i] = 1
                    self.tasks_collected += 1
                    tasks_collected_this_step += 1
                    reward += 10  # 收集任务的正奖励

        # 额外奖励：根据收集到任务的比例
        collection_ratio = sum(self.task_status) / len(self.task_status)
        reward += collection_ratio * 5

        # 能量罚项
        if self.uav_energy <= 0:
            reward -= 50
            done = True
        else:
            # 检查是否所有任务都已完成
            done = all(status == 1 for status in self.task_status)
            if done:
                # 完成所有任务的额外奖励
                reward += 50 - 0.01 * (1000 - self.uav_energy)  # 节省能量的奖励

        # 获取新状态
        new_state = self._get_state()

        # 信息字典
        info = {
            'tasks_collected_total': self.tasks_collected,
            'tasks_collected_step': tasks_collected_this_step,
            'energy': self.uav_energy,
            'collection_ratio': collection_ratio
        }

        return new_state, reward, done, info


class CRLAgent:
    def __init__(self, state_dim, action_dim, device='cuda'):
        self.device = device
        self.state_dim = state_dim
        self.action_dim = action_dim

        # 策略网络
        self.policy_net = DualNetwork(state_dim, action_dim).to(device)
        self.target_net = DualNetwork(state_dim, action_dim).to(device)
        self.target_net.load_state_dict(self.policy_net.state_dict())

        self.optimizer = optim.Adam(self.policy_net.parameters(), lr=0.001)
        self.memory = EpisodeBuffer(100000)
        self.task_memory = {}  # 每个任务的示例存储

        # 训练参数
        self.batch_size = 64
        self.gamma = 0.99
        self.epsilon = 1.0
        self.epsilon_min = 0.01
        self.epsilon_decay = 0.995
        self.target_update = 10
        self.alpha = 0.5  # 知识融合因子

        # 当前任务ID
        self.current_task_id = 0

    def remember_task_examples(self, task_id, states, actions):
        """存储任务示例，用于后续经验重放"""
        if task_id not in self.task_memory:
            self.task_memory[task_id] = []
        self.task_memory[task_id].extend(zip(states, actions))

    def select_action(self, state):
        """使用ε-greedy策略选择动作"""
        if random.random() < self.epsilon:
            # 随机探索
            return np.random.uniform(-1, 1, self.action_dim)
        else:
            # 利用当前策略
            with torch.no_grad():
                state_tensor = torch.FloatTensor(state).unsqueeze(0).to(self.device)
                q_values = self.policy_net(state_tensor, self.alpha)
                action = q_values.cpu().data.numpy()[0]
                # 添加噪声以实现连续控制
                noise = np.random.normal(0, 0.1, self.action_dim)
                action = np.clip(action + noise, -1, 1)
                return action

    def train(self):
        """训练模型"""
        if len(self.memory.buffer) < self.batch_size:
            return 0

        # 从经验回放中采样
        transitions = self.memory.sample(self.batch_size, self.current_task_id)

        # 准备批量数据
        batch_task_ids = []
        batch_states = []
        batch_actions = []
        batch_rewards = []
        batch_next_states = []
        batch_dones = []

        for sample in transitions:
            if len(sample) == 6:  # 带任务ID的样本
                task_id, state, action, reward, next_state, done = sample
                batch_task_ids.append(task_id)
            else:  # 不带任务ID的样本
                state, action, reward, next_state, done = sample
                batch_task_ids.append(-1)  # 使用-1表示未知任务

            batch_states.append(state)
            batch_actions.append(action)
            batch_rewards.append(reward)
            batch_next_states.append(next_state)
            batch_dones.append(done)

        # 转换为tensor
        states = torch.FloatTensor(np.array(batch_states)).to(self.device)
        actions = torch.FloatTensor(np.array(batch_actions)).to(self.device)
        rewards = torch.FloatTensor(np.array(batch_rewards)).unsqueeze(1).to(self.device)
        next_states = torch.FloatTensor(np.array(batch_next_states)).to(self.device)
        dones = torch.FloatTensor(np.array(batch_dones, dtype=np.float32)).unsqueeze(1).to(self.device)

        # 计算当前Q值
        current_q_values = self.policy_net(states, self.alpha)
        # 构建action索引
        action_indices = torch.tensor(list(range(len(actions)))).to(self.device)
        current_q = current_q_values[action_indices, torch.argmax(actions, dim=1).long()].unsqueeze(1)

        # 使用目标网络计算下一状态的最大Q值
        with torch.no_grad():
            next_q_values = self.target_net(next_states, self.alpha)
            next_q = torch.max(next_q_values, dim=1)[0].unsqueeze(1)
            expected_q = rewards + (1 - dones) * self.gamma * next_q

        # 计算损失
        loss = nn.MSELoss()(current_q, expected_q)

        # 知识蒸馏损失：当前网络向知识保留网络学习
        current_outputs = self.policy_net.current(states)
        knowledge_outputs = self.policy_net.knowledge(states).detach()
        distill_loss = nn.MSELoss()(current_outputs, knowledge_outputs) * 0.1

        # 总损失
        total_loss = loss + distill_loss

        # 优化
        self.optimizer.zero_grad()
        total_loss.backward()
        # 梯度裁剪，避免梯度爆炸
        torch.nn.utils.clip_grad_norm_(self.policy_net.parameters(), 1.0)
        self.optimizer.step()

        # 更新epsilon
        if self.epsilon > self.epsilon_min:
            self.epsilon *= self.epsilon_decay

        return loss.item()

    def update_target_network(self):
        """更新目标网络"""
        self.target_net.load_state_dict(self.policy_net.state_dict())

    def adapt_to_new_task(self, task_id):
        """适应新任务环境"""
        # 更新任务ID
        self.current_task_id = task_id

        # 更新知识保留网络
        if random.random() < 0.5:  # 概率性更新，避免完全丢失历史知识
            # 逐步将当前网络的知识转移到知识保留网络
            for target_param, param in zip(self.policy_net.knowledge.parameters(),
                                           self.policy_net.current.parameters()):
                target_param.data.copy_(
                    0.9 * target_param.data + 0.1 * param.data
                )

        # 提高探索率以适应新环境
        self.epsilon = max(0.5, self.epsilon)  # 重设探索率

        # 调整融合因子，减少对历史知识的依赖
        self.alpha = min(0.8, self.alpha + 0.1)

    def save_model(self, filename):
        """保存模型"""
        torch.save({
            'policy_net': self.policy_net.state_dict(),
            'target_net': self.target_net.state_dict(),
            'optimizer': self.optimizer.state_dict(),
            'task_memory': self.task_memory
        }, filename)

    def load_model(self, filename):
        """加载模型"""
        checkpoint = torch.load(filename)
        self.policy_net.load_state_dict(checkpoint['policy_net'])
        self.target_net.load_state_dict(checkpoint['target_net'])
        self.optimizer.load_state_dict(checkpoint['optimizer'])
        self.task_memory = checkpoint.get('task_memory', {})


def train_agent():
    # 环境设置
    env = UAVEnvironment()
    state_dim = 3 + 1 + env.active_users * 3  # UAV位置(3) + 能量(1) + 用户信息(3*active_users)
    action_dim = 3  # dx, dy, dz

    # 使用GPU训练
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"Using device: {device}")

    # 创建代理
    agent = CRLAgent(state_dim, action_dim, device=device)

    # 训练参数
    num_episodes = 1000
    task_change_freq = 200  # 每200个episode更换一次任务环境
    max_steps = 200  # 每个episode的最大步数

    # 记录训练过程
    rewards_history = []
    tasks_collected_history = []
    loss_history = []

    # 训练循环
    for episode in range(num_episodes):
        # 检查是否需要切换任务
        if episode > 0 and episode % task_change_freq == 0:
            # 切换到新任务
            state = env.change_task()
            agent.adapt_to_new_task(env.task_id)
            print(f"\nSwitching to new task environment {env.task_id}!")

        else:
            # 重置环境
            state = env.reset()

        episode_reward = 0
        episode_loss = 0

        # 存储状态和动作，用于知识保留
        episode_states = []
        episode_actions = []

        for t in range(max_steps):
            # 选择动作
            action = agent.select_action(state)
            episode_states.append(state)
            episode_actions.append(action)

            # 执行动作
            next_state, reward, done, info = env.step(action)

            # 存储经验
            agent.memory.push(env.task_id, state, action, reward, next_state, done)

            # 训练代理
            loss = agent.train()
            if loss:
                episode_loss += loss

            state = next_state
            episode_reward += reward

            # 如果episode结束
            if done:
                break

        # 更新目标网络
        if episode % agent.target_update == 0:
            agent.update_target_network()

        # 存储这个episode的知识
        agent.remember_task_examples(env.task_id, episode_states, episode_actions)

        # 记录历史数据
        rewards_history.append(episode_reward)
        tasks_collected_history.append(info['tasks_collected_total'])
        if episode_loss > 0:
            loss_history.append(episode_loss / (t + 1))
        else:
            loss_history.append(0)

        # 输出进度
        if episode % 10 == 0:
            print(f"Episode: {episode}, Reward: {episode_reward:.2f}, "
                  f"Tasks Collected: {info['tasks_collected_total']}, "
                  f"Avg Loss: {episode_loss / (t + 1) if t > 0 else 0:.4f}, "
                  f"Epsilon: {agent.epsilon:.2f}")

        # 每100个episode保存模型
        if episode % 100 == 0:
            agent.save_model(f"uav_crl_model_ep{episode}.pt")

    # 训练结束
    print("Training completed!")
    agent.save_model("uav_crl_model_final.pt")

    # 绘制结果
    plot_results(rewards_history, tasks_collected_history, loss_history, task_change_freq)

    return agent, env


def plot_results(rewards, tasks, losses, task_change_freq):
    """绘制训练结果"""
    fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(12, 18))

    # 绘制奖励曲线
    ax1.plot(rewards)
    ax1.set_title('Episode Rewards')
    ax1.set_xlabel('Episode')
    ax1.set_ylabel('Reward')

    # 标记任务环境变化点
    for tc in range(task_change_freq, len(rewards), task_change_freq):
        ax1.axvline(x=tc, color='r', linestyle='--', alpha=0.3)

    # 绘制任务收集数量
    ax2.plot(tasks)
    ax2.set_title('Tasks Collected')
    ax2.set_xlabel('Episode')
    ax2.set_ylabel('Number of Tasks')

    # 标记任务环境变化点
    for tc in range(task_change_freq, len(tasks), task_change_freq):
        ax2.axvline(x=tc, color='r', linestyle='--', alpha=0.3)

    # 绘制损失函数
    ax3.plot(losses)
    ax3.set_title('Training Loss')
    ax3.set_xlabel('Episode')
    ax3.set_ylabel('Loss')

    # 标记任务环境变化点
    for tc in range(task_change_freq, len(losses), task_change_freq):
        ax3.axvline(x=tc, color='r', linestyle='--', alpha=0.3)

    plt.tight_layout()
    plt.savefig('training_results.png')
    plt.close()


def visualize_trajectory(env, agent, episode_steps=200):
    """可视化UAV轨迹"""
    state = env.reset()

    # 记录UAV轨迹
    trajectory = []
    trajectory.append(env.uav_pos.copy())

    done = False
    step = 0

    while not done and step < episode_steps:
        action = agent.select_action(state)
        next_state, _, done, _ = env.step(action)
        trajectory.append(env.uav_pos.copy())
        state = next_state
        step += 1

    # 绘制轨迹
    fig = plt.figure(figsize=(10, 8))
    ax = fig.add_subplot(111, projection='3d')

    # 提取轨迹坐标
    xs = [pos[0] for pos in trajectory]
    ys = [pos[1] for pos in trajectory]
    zs = [pos[2] for pos in trajectory]

    # 绘制轨迹
    ax.plot(xs, ys, zs, 'b-', label='UAV Trajectory')

    # 绘制起点和终点
    ax.scatter(trajectory[0][0], trajectory[0][1], trajectory[0][2], c='g', marker='o', s=100, label='Start')
    ax.scatter(trajectory[-1][0], trajectory[-1][1], trajectory[-1][2], c='r', marker='x', s=100, label='End')

    # 绘制用户位置
    for i, user in enumerate(env.current_active_users):
        ax.scatter(user[0], user[1], 0, c='orange' if env.task_status[i] == 1 else 'black',
                   marker='^', s=80, label='User (Served)' if i == 0 else "")

    ax.set_xlabel('X (m)')
    ax.set_ylabel('Y (m)')
    ax.set_zlabel('Height (m)')
    ax.set_title('UAV Trajectory')
    ax.legend()

    plt.savefig('uav_trajectory.png')
    plt.close()


def main():
    # 训练代理
    agent, env = train_agent()

    # 可视化最终轨迹
    visualize_trajectory(env, agent)


if __name__ == "__main__":
    main()
