import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
from collections import deque
import random
import os

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

# 检查GPU可用性
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")

# 环境参数
AREA_SIZE = 100  # 区域大小 100m x 100m
NUM_TOTAL_USERS = 10  # 总用户数
NUM_ACTIVE_USERS = 8  # 每次激活的用户数
UAV_SPEED = 5  # 无人机速度 m/s
MAX_ENERGY = 1000  # 无人机最大能量
COMM_RANGE = 10  # 通信范围 m
EPISODES_PER_TASK = 500  # 每个任务环境的训练轮次 K
MAX_STEPS = 100  # 每个episode的最大步数
TOTAL_EPISODES = 3000  # 总训练轮次

# 经验回放缓冲区大小
REPLAY_BUFFER_SIZE = 100000
BATCH_SIZE = 64

# RL参数
GAMMA = 0.99  # 折扣因子
EPSILON_START = 1.0
EPSILON_END = 0.01
EPSILON_DECAY = 0.995
LEARNING_RATE = 0.001
TAU = 0.001  # 目标网络软更新系数


# 记忆回放缓冲区
class ReplayBuffer:
    def __init__(self, capacity):
        self.buffer = deque(maxlen=capacity)

    def push(self, state, action, reward, next_state, done):
        self.buffer.append((state, action, reward, next_state, done))

    def sample(self, batch_size):
        return random.sample(self.buffer, batch_size)

    def __len__(self):
        return len(self.buffer)


# 双重DQN网络
class DuelingDQN(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(DuelingDQN, self).__init__()

        # 特征提取层
        self.feature_layer = nn.Sequential(
            nn.Linear(state_dim, 256),
            nn.ReLU(),
            nn.Linear(256, 256),
            nn.ReLU()
        )

        # 价值流
        self.value_stream = nn.Sequential(
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Linear(128, 1)
        )

        # 优势流
        self.advantage_stream = nn.Sequential(
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Linear(128, action_dim)
        )

    def forward(self, state):
        features = self.feature_layer(state)
        values = self.value_stream(features)
        advantages = self.advantage_stream(features)
        # Q(s,a) = V(s) + (A(s,a) - 1/|A| * sum(A(s,a')))
        return values + (advantages - advantages.mean(dim=1, keepdim=True))


# 记忆保存机制（针对持续学习）
class EpisodicMemory:
    def __init__(self, capacity=10):
        self.capacity = capacity
        self.task_memories = {}  # 为每个任务环境保存模型状态

    def store_task_model(self, task_id, model_state):
        if len(self.task_memories) >= self.capacity and task_id not in self.task_memories:
            # 移除最旧的任务记忆
            oldest_task = min(self.task_memories.keys())
            del self.task_memories[oldest_task]
        self.task_memories[task_id] = model_state

    def retrieve_task_model(self, task_id):
        return self.task_memories.get(task_id, None)


# MEC环境类
class MECEnvironment:
    def __init__(self):
        # 生成10个固定用户位置
        self.all_user_positions = np.random.rand(NUM_TOTAL_USERS, 2) * AREA_SIZE
        self.active_users_indices = None
        self.active_user_positions = None
        self.uav_position = np.array([AREA_SIZE / 2, AREA_SIZE / 2])  # 初始位置在中心
        self.uav_energy = MAX_ENERGY
        self.collected_tasks = 0
        self.visited_users = set()

        # 定义可能的动作: 上, 右, 下, 左, 停留
        self.actions = [
            np.array([0, UAV_SPEED]),  # 上
            np.array([UAV_SPEED, 0]),  # 右
            np.array([0, -UAV_SPEED]),  # 下
            np.array([-UAV_SPEED, 0]),  # 左
            np.array([0, 0])  # 停留
        ]
        self.action_dim = len(self.actions)

        # 状态维度: UAV位置(2) + 每个激活用户的位置(2*NUM_ACTIVE_USERS) + 用户是否已访问(NUM_ACTIVE_USERS)
        self.state_dim = 2 + 2 * NUM_ACTIVE_USERS + NUM_ACTIVE_USERS

        # 切换任务环境
        self.switch_task_environment()

    def switch_task_environment(self):
        """切换任务环境，激活不同的用户组合"""
        self.active_users_indices = np.random.choice(NUM_TOTAL_USERS, NUM_ACTIVE_USERS, replace=False)
        self.active_user_positions = self.all_user_positions[self.active_users_indices]
        self.uav_position = np.array([AREA_SIZE / 2, AREA_SIZE / 2])  # 重置UAV位置
        self.uav_energy = MAX_ENERGY
        self.collected_tasks = 0
        self.visited_users = set()

        print(f"环境切换: 激活用户索引 {self.active_users_indices}")

    def reset(self):
        """重置环境但保持当前激活的用户"""
        self.uav_position = np.array([AREA_SIZE / 2, AREA_SIZE / 2])
        self.uav_energy = MAX_ENERGY
        self.collected_tasks = 0
        self.visited_users = set()
        return self._get_state()

    def _get_state(self):
        """构建状态向量"""
        state = np.concatenate([
            self.uav_position,  # UAV位置
            self.active_user_positions.flatten(),  # 激活用户位置
            np.array([i in self.visited_users for i in range(NUM_ACTIVE_USERS)], dtype=float)  # 用户是否已访问
        ])
        return state

    def step(self, action_idx):
        """执行动作并返回新状态、奖励和是否终止"""
        action = self.actions[action_idx]

        # 更新UAV位置
        self.uav_position = np.clip(self.uav_position + action, 0, AREA_SIZE)

        # 消耗能量 (移动消耗更多)
        energy_consumption = 2.0 if np.any(action != 0) else 0.5
        self.uav_energy -= energy_consumption

        # 检查是否收集到任务
        collected_new = False
        for i, user_pos in enumerate(self.active_user_positions):
            if i not in self.visited_users:
                distance = np.linalg.norm(self.uav_position - user_pos)
                if distance <= COMM_RANGE:
                    self.visited_users.add(i)
                    self.collected_tasks += 1
                    collected_new = True

        # 计算奖励
        reward = 0

        # 收集任务奖励
        if collected_new:
            reward += 20

        # 能量消耗惩罚
        reward -= energy_consumption * 0.1

        # 距离最近未访问用户的距离奖励
        min_distance = float('inf')
        for i, user_pos in enumerate(self.active_user_positions):
            if i not in self.visited_users:
                distance = np.linalg.norm(self.uav_position - user_pos)
                min_distance = min(min_distance, distance)

        if min_distance != float('inf'):
            proximity_reward = 1.0 / (1.0 + min_distance * 0.1)
            reward += proximity_reward

        # 检查是否达到终止条件
        done = False
        if self.uav_energy <= 0 or len(self.visited_users) == NUM_ACTIVE_USERS:
            done = True

            # 访问用户数量的终止奖励
            coverage_ratio = len(self.visited_users) / NUM_ACTIVE_USERS
            reward += coverage_ratio * 50

        return self._get_state(), reward, done, {"collected_tasks": self.collected_tasks}


# 持续强化学习代理
class CRLAgent:
    def __init__(self, state_dim, action_dim):
        self.state_dim = state_dim
        self.action_dim = action_dim

        # 主网络和目标网络
        self.policy_net = DuelingDQN(state_dim, action_dim).to(device)
        self.target_net = DuelingDQN(state_dim, action_dim).to(device)
        self.target_net.load_state_dict(self.policy_net.state_dict())
        self.target_net.eval()

        self.optimizer = optim.Adam(self.policy_net.parameters(), lr=LEARNING_RATE)
        self.memory = ReplayBuffer(REPLAY_BUFFER_SIZE)
        self.episodic_memory = EpisodicMemory()

        self.epsilon = EPSILON_START
        self.steps_done = 0
        self.current_task_id = 0

    def select_action(self, state):
        """基于ε-贪婪策略选择动作"""
        if random.random() < self.epsilon:
            return random.randrange(self.action_dim)
        else:
            with torch.no_grad():
                state_tensor = torch.FloatTensor(state).unsqueeze(0).to(device)
                q_values = self.policy_net(state_tensor)
                return q_values.max(1)[1].item()

    def update_epsilon(self):
        """衰减探索率"""
        self.epsilon = max(EPSILON_END, self.epsilon * EPSILON_DECAY)

    def store_transition(self, state, action, reward, next_state, done):
        """存储经验"""
        self.memory.push(state, action, reward, next_state, done)

    def optimize_model(self):
        """从经验回放中学习"""
        if len(self.memory) < BATCH_SIZE:
            return 0.0

        transitions = self.memory.sample(BATCH_SIZE)
        batch = list(zip(*transitions))

        state_batch = torch.FloatTensor(np.array(batch[0])).to(device)
        action_batch = torch.LongTensor(np.array(batch[1])).unsqueeze(1).to(device)
        reward_batch = torch.FloatTensor(np.array(batch[2])).unsqueeze(1).to(device)
        next_state_batch = torch.FloatTensor(np.array(batch[3])).to(device)
        done_batch = torch.FloatTensor(np.array(batch[4])).unsqueeze(1).to(device)

        # 计算当前Q值
        q_values = self.policy_net(state_batch).gather(1, action_batch)

        # 计算下一状态的最大Q值
        with torch.no_grad():
            # 使用目标网络计算下一状态的值
            next_q_values = self.target_net(next_state_batch)
            next_q_values_max = next_q_values.max(1)[0].unsqueeze(1)
            # 计算期望Q值
            expected_q_values = reward_batch + (1 - done_batch) * GAMMA * next_q_values_max

        # 计算损失
        loss = nn.MSELoss()(q_values, expected_q_values)

        # 优化模型
        self.optimizer.zero_grad()
        loss.backward()
        # 梯度裁剪，避免梯度爆炸
        for param in self.policy_net.parameters():
            param.grad.data.clamp_(-1, 1)
        self.optimizer.step()

        # 软更新目标网络
        for target_param, policy_param in zip(self.target_net.parameters(), self.policy_net.parameters()):
            target_param.data.copy_(TAU * policy_param.data + (1.0 - TAU) * target_param.data)

        return loss.item()

    def switch_task(self, task_id):
        """切换任务环境时的操作"""
        # 保存当前任务的模型
        self.episodic_memory.store_task_model(self.current_task_id, self.policy_net.state_dict())

        # 检查新任务是否有记忆
        prev_model_state = self.episodic_memory.retrieve_task_model(task_id)
        if prev_model_state is not None:
            # 如果有之前的记忆，加载之前的模型状态
            self.policy_net.load_state_dict(prev_model_state)
            self.target_net.load_state_dict(prev_model_state)
            print(f"已从记忆中恢复任务 {task_id} 的模型状态")
        else:
            print(f"任务 {task_id} 没有之前的记忆，使用当前模型继续训练")

        self.current_task_id = task_id


# 训练函数
def train():
    env = MECEnvironment()
    agent = CRLAgent(env.state_dim, env.action_dim)

    all_rewards = []
    task_switch_points = []
    avg_rewards = []
    collected_tasks_history = []
    task_id = 0

    fig, axes = plt.subplots(2, 1, figsize=(12, 10))

    for episode in range(TOTAL_EPISODES):
        # 每EPISODES_PER_TASK轮切换任务环境
        if episode > 0 and episode % EPISODES_PER_TASK == 0:
            env.switch_task_environment()
            task_id += 1
            agent.switch_task(task_id)
            task_switch_points.append(episode)

            # 绘制无人机轨迹
            plot_trajectory(env, agent, episode)

        state = env.reset()
        total_reward = 0
        done = False
        steps = 0

        while not done and steps < MAX_STEPS:
            action = agent.select_action(state)
            next_state, reward, done, info = env.step(action)
            agent.store_transition(state, action, reward, next_state, done)

            loss = agent.optimize_model()

            state = next_state
            total_reward += reward
            steps += 1

        agent.update_epsilon()
        all_rewards.append(total_reward)
        collected_tasks_history.append(env.collected_tasks)

        # 计算最近100轮的平均奖励
        avg_reward = np.mean(all_rewards[-100:]) if len(all_rewards) >= 100 else np.mean(all_rewards)
        avg_rewards.append(avg_reward)

        if episode % 10 == 0:
            print(f"Episode {episode}/{TOTAL_EPISODES}, 奖励: {total_reward:.2f}, 平均奖励: {avg_reward:.2f}, "
                  f"收集任务数: {env.collected_tasks}/{NUM_ACTIVE_USERS}, Epsilon: {agent.epsilon:.4f}")

        # 更新奖励曲线图
        if episode % 50 == 0 and episode > 0:
            update_reward_plot(axes[0], all_rewards, avg_rewards, task_switch_points)
            update_tasks_plot(axes[1], collected_tasks_history, task_switch_points)
            plt.pause(0.01)

    plt.savefig("训练结果.png")
    print("训练完成，结果已保存")


# 绘制无人机轨迹
def plot_trajectory(env, agent, episode):
    """绘制无人机的飞行轨迹"""
    state = env.reset()
    done = False
    steps = 0

    trajectory = [env.uav_position.copy()]
    visited = set()

    while not done and steps < MAX_STEPS:
        action = agent.select_action(state)
        next_state, _, done, _ = env.step(action)

        trajectory.append(env.uav_position.copy())
        for i, user_pos in enumerate(env.active_user_positions):
            distance = np.linalg.norm(env.uav_position - user_pos)
            if distance <= COMM_RANGE and i not in visited:
                visited.add(i)

        state = next_state
        steps += 1

    # 绘制轨迹
    plt.figure(figsize=(8, 8))
    trajectory = np.array(trajectory)

    # 绘制区域边界
    plt.plot([0, AREA_SIZE, AREA_SIZE, 0, 0], [0, 0, AREA_SIZE, AREA_SIZE, 0], 'k--')

    # 绘制所有用户位置
    for i, pos in enumerate(env.all_user_positions):
        if i in env.active_users_indices:
            plt.plot(pos[0], pos[1], 'bo', markersize=8)  # 激活用户为蓝色
        else:
            plt.plot(pos[0], pos[1], 'go', markersize=8, alpha=0.3)  # 未激活用户为灰绿色

    # 绘制UAV轨迹
    plt.plot(trajectory[:, 0], trajectory[:, 1], 'r-', linewidth=2)
    plt.plot(trajectory[0, 0], trajectory[0, 1], 'r^', markersize=10)  # 起点
    plt.plot(trajectory[-1, 0], trajectory[-1, 1], 'rv', markersize=10)  # 终点

    # 绘制已访问用户的通信范围
    for i in visited:
        user_pos = env.active_user_positions[i]
        circle = plt.Circle((user_pos[0], user_pos[1]), COMM_RANGE, color='b', alpha=0.1)
        plt.gca().add_patch(circle)

    plt.title(f"无人机飞行轨迹 - 任务环境 {episode // EPISODES_PER_TASK}")
    plt.xlabel("X坐标 (m)")
    plt.ylabel("Y坐标 (m)")
    plt.grid(True)
    plt.savefig(f"轨迹_任务{episode // EPISODES_PER_TASK}.png")
    plt.close()


# 更新奖励曲线图
def update_reward_plot(ax, rewards, avg_rewards, switch_points):
    ax.clear()
    ax.plot(rewards, 'b-', alpha=0.3, label='单轮奖励')
    ax.plot(avg_rewards, 'r-', label='平均奖励')

    # 在任务切换点添加垂直线
    for point in switch_points:
        ax.axvline(x=point, color='g', linestyle='--')

    ax.set_title('训练奖励曲线')
    ax.set_xlabel('训练轮次')
    ax.set_ylabel('奖励')
    ax.legend()
    ax.grid(True)


# 更新收集任务数量图
def update_tasks_plot(ax, tasks, switch_points):
    ax.clear()
    ax.plot(tasks, 'b-', label='收集任务数量')

    # 在任务切换点添加垂直线
    for point in switch_points:
        ax.axvline(x=point, color='g', linestyle='--')

    ax.set_title('任务收集数量')
    ax.set_xlabel('训练轮次')
    ax.set_ylabel('收集任务数')
    ax.axhline(y=NUM_ACTIVE_USERS, color='r', linestyle='--', label='最大任务数')
    ax.legend()
    ax.grid(True)


# 主函数
if __name__ == "__main__":
    train()
