import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
from collections import deque
import random
import argparse

# 设置随机种子以确保可重复性
torch.manual_seed(42)
np.random.seed(42)
random.seed(42)


# 环境定义
class MECEnvironment:
    def __init__(self):
        self.grid_size = 100  # 100x100m区域
        self.user_positions = np.array([
            [10, 15], [30, 75], [45, 20], [60, 60],
            [25, 40], [70, 30], [80, 80], [50, 50],
            [90, 10], [20, 90]  # 10个固定用户位置
        ])
        self.active_users = None
        self.uav_pos = np.array([0.0, 0.0])  # 起始位置
        self.comm_radius = 8  # 通信半径(m)
        self.velocity = 5  # UAV速度(m/s)
        self.time_step = 0.1  # 时间步长(s)
        self.steps = 0  # 当前episode步数
        self.collected = None  # 任务收集状态
        self.reset()

    def reset(self, user_mask=None):
        """初始化环境并选择活跃用户"""
        self.uav_pos = np.array([0.0, 0.0])
        self.collected = np.zeros(10, dtype=bool)
        self.steps = 0

        # 选择活跃用户 (8/10)
        if user_mask is None:
            self.active_users = np.random.choice(10, 8, replace=False)
        else:
            self.active_users = user_mask

        return self._get_state()

    def step(self, action):
        # 动作空间: [0-8]对应9个方向 (包括停留)
        dx, dy = [(0, 0), (0, 1), (1, 1), (1, 0), (1, -1),
                  (0, -1), (-1, -1), (-1, 0), (-1, 1)][action]

        # 更新UAV位置 (边界约束)
        new_x = np.clip(self.uav_pos[0] + dx * self.velocity, 0, self.grid_size)
        new_y = np.clip(self.uav_pos[1] + dy * self.velocity, 0, self.grid_size)
        self.uav_pos = np.array([new_x, new_y])
        self.steps += 1

        # 计算能耗 (Joule)
        distance = np.sqrt(dx ** 2 + dy ** 2) * self.velocity * self.time_step
        energy = 0.1 * distance  # 能耗模型

        # 任务收集检测
        collected_now = 0
        for idx in self.active_users:
            if not self.collected[idx]:
                dist = np.linalg.norm(self.uav_pos - self.user_positions[idx])
                if dist <= self.comm_radius:
                    self.collected[idx] = True
                    collected_now += 1

        # 奖励函数
        reward = self._calculate_reward(collected_now, energy)

        # 终止条件: 所有任务完成或超时(200步)
        done = np.all(self.collected[self.active_users]) or (self.steps >= 200)

        return self._get_state(), reward, done, {
            "collected": collected_now,
            "energy": energy,
            "position": self.uav_pos.copy()
        }

    def _calculate_reward(self, collected, energy):
        """多目标奖励函数"""
        # 任务收集奖励 (优先)
        task_reward = 15.0 * collected

        # 延迟惩罚 (时间步长)
        time_penalty = -0.1

        # 能耗惩罚
        energy_penalty = -0.05 * energy

        # 公平性奖励 (激励服务边缘用户)
        if collected > 0:
            active_uncollected = [i for i in self.active_users if not self.collected[i]]
            if active_uncollected:
                min_dist = min(np.linalg.norm(self.uav_pos - self.user_positions[i])
                               for i in active_uncollected)
                fairness_bonus = 0.2 if min_dist > 20 else 0
            else:
                fairness_bonus = 0
        else:
            fairness_bonus = 0

        return task_reward + time_penalty + energy_penalty + fairness_bonus

    def _user_distances(self):
        return [np.linalg.norm(self.uav_pos - self.user_positions[i])
                for i in self.active_users if not self.collected[i]]

    def _get_state(self):
        """状态空间: [UAV_x, UAV_y] + [用户状态(10)] + [用户位置(20)]"""
        state = [self.uav_pos[0] / self.grid_size,
                 self.uav_pos[1] / self.grid_size]

        # 用户收集状态 (1=已收集, 0=未收集)
        state.extend([1.0 if i in self.active_users and self.collected[i] else 0.0
                      for i in range(10)])

        # 用户位置归一化
        for pos in self.user_positions:
            state.append(pos[0] / self.grid_size)
            state.append(pos[1] / self.grid_size)

        return torch.tensor(state, dtype=torch.float32)


# 持续强化学习代理
class CRLAgent(nn.Module):
    def __init__(self, state_dim, action_dim):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(state_dim, 256),
            nn.ReLU(),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Linear(128, action_dim)
        )

        # EWC参数
        self.fisher_matrix = None
        self.optimal_params = None
        self.ewc_lambda = 500  # 遗忘控制强度

    def forward(self, x):
        return self.net(x)

    def compute_ewc_loss(self):
        if self.fisher_matrix is None or self.optimal_params is None:
            return 0

        loss = 0
        for name, param in self.named_parameters():
            fisher = self.fisher_matrix[name]
            optimal = self.optimal_params[name]
            loss += torch.sum(fisher * (param - optimal).pow(2))
        return self.ewc_lambda * loss


# 回放缓冲区
class ReplayBuffer:
    def __init__(self, capacity):
        self.buffer = deque(maxlen=capacity)
        self.task_buffers = {}  # 按任务存储经验

    def add(self, experience, task_id):
        self.buffer.append(experience)
        if task_id not in self.task_buffers:
            self.task_buffers[task_id] = deque(maxlen=2000)
        self.task_buffers[task_id].append(experience)

    def sample(self, batch_size, task_id=None):
        if task_id is not None and task_id in self.task_buffers and len(self.task_buffers[task_id]) >= batch_size:
            return random.sample(self.task_buffers[task_id], batch_size)
        elif len(self.buffer) >= batch_size:
            return random.sample(self.buffer, batch_size)
        return None

    def __len__(self):
        return len(self.buffer)


# 训练函数
def train(args):
    # 初始化环境
    env = MECEnvironment()
    state_dim = 32  # UAV(2) + 用户状态(10) + 用户位置(20)
    action_dim = 9  # 9个方向

    # 初始化智能体
    device = torch.device("cuda" if torch.cuda.is_available() and args.gpu else "cpu")
    agent = CRLAgent(state_dim, action_dim).to(device)
    target_agent = CRLAgent(state_dim, action_dim).to(device)
    target_agent.load_state_dict(agent.state_dict())
    target_agent.eval()

    # 优化器
    optimizer = optim.Adam(agent.parameters(), lr=args.lr)
    replay_buffer = ReplayBuffer(args.buffer_capacity)

    # 训练记录
    episode_rewards = []
    flight_paths = []
    collected_tasks_history = []
    epsilon = args.epsilon_start
    current_task = 0
    task_users = {}  # 存储任务-用户映射

    # 训练循环
    for episode in range(args.episodes):
        # 每1000episode切换任务
        if episode % args.task_switch_interval == 0:
            current_task = episode // args.task_switch_interval
            user_mask = np.random.choice(10, 8, replace=False)
            task_users[current_task] = user_mask
            state = env.reset(user_mask)

            # 保存旧任务参数
            if current_task > 0:
                agent.optimal_params = {
                    n: p.clone().detach() for n, p in agent.named_parameters()
                }
                # 简化Fisher矩阵计算
                agent.fisher_matrix = {}
                for n, p in agent.named_parameters():
                    agent.fisher_matrix[n] = torch.ones_like(p) * 0.1
            print(f"任务切换: 任务{current_task} 用户: {user_mask}")
        else:
            state = env.reset(task_users[current_task])

        # 记录轨迹
        flight_path = [env.uav_pos.copy()]
        episode_reward = 0
        collected_tasks = 0
        done = False

        while not done:
            # ε-贪婪策略
            if random.random() < epsilon:
                action = random.randint(0, action_dim - 1)
            else:
                with torch.no_grad():
                    state_tensor = state.unsqueeze(0).to(device)
                    q_values = agent(state_tensor)
                    action = q_values.argmax().item()

            # 执行动作
            next_state, reward, done, info = env.step(action)
            collected_tasks += info["collected"]
            flight_path.append(info["position"])

            # 存储经验
            replay_buffer.add((state, action, reward, next_state, done), current_task)

            state = next_state
            episode_reward += reward

            # 优化模型
            if len(replay_buffer) > args.batch_size:
                optimize_model(agent, target_agent, optimizer, replay_buffer,
                               args.batch_size, args.gamma, device, current_task)

            # 更新目标网络
            if episode % args.target_update == 0:
                target_agent.load_state_dict(agent.state_dict())

        # 记录结果
        episode_rewards.append(episode_reward)
        flight_paths.append(np.array(flight_path))
        collected_tasks_history.append(collected_tasks)

        # 打印收集信息
        print(f"Episode {episode + 1}/{args.episodes}: "
              f"奖励: {episode_reward:.2f}, "
              f"收集任务: {collected_tasks}, "
              f"步数: {env.steps}")

        # 衰减ε
        epsilon = max(args.epsilon_end,
                      args.epsilon_decay * epsilon)

    return agent, episode_rewards, flight_paths, collected_tasks_history


# 优化模型
def optimize_model(agent, target_agent, optimizer, replay_buffer,
                   batch_size, gamma, device, task_id):
    # 从回放缓冲区采样
    batch = replay_buffer.sample(batch_size, task_id)
    if batch is None:
        return

    states, actions, rewards, next_states, dones = zip(*batch)

    # 转换为张量
    states = torch.stack(states).to(device)
    actions = torch.tensor(actions, dtype=torch.int64).unsqueeze(1).to(device)
    rewards = torch.tensor(rewards, dtype=torch.float32).unsqueeze(1).to(device)
    next_states = torch.stack(next_states).to(device)
    dones = torch.tensor(dones, dtype=torch.float32).unsqueeze(1).to(device)

    # 计算当前Q值
    current_q = agent(states).gather(1, actions)

    # 计算目标Q值
    with torch.no_grad():
        next_q = target_agent(next_states).max(1)[0].unsqueeze(1)
        target_q = rewards + gamma * next_q * (1 - dones)

    # 计算DQN损失
    dqn_loss = nn.MSELoss()(current_q, target_q)

    # 添加EWC遗忘抑制
    ewc_loss = agent.compute_ewc_loss()
    total_loss = dqn_loss + ewc_loss

    # 优化步骤
    optimizer.zero_grad()
    total_loss.backward()
    # 梯度裁剪
    torch.nn.utils.clip_grad_norm_(agent.parameters(), max_norm=1.0)
    optimizer.step()

    return total_loss.item()


# 可视化结果
def plot_results(episode_rewards, flight_paths, user_positions):
    plt.figure(figsize=(18, 12))

    # 奖励曲线
    plt.subplot(2, 2, 1)
    plt.plot(episode_rewards)
    plt.title("训练奖励曲线")
    plt.xlabel("Episode")
    plt.ylabel("累计奖励")
    plt.grid(True)

    # 平滑奖励曲线
    smooth_window = 50
    smooth_rewards = [np.mean(episode_rewards[max(0, i - smooth_window):i + 1])
                      for i in range(len(episode_rewards))]
    plt.subplot(2, 2, 2)
    plt.plot(smooth_rewards)
    plt.title(f"平滑奖励曲线 (窗口={smooth_window})")
    plt.xlabel("Episode")
    plt.ylabel("平均奖励")
    plt.grid(True)

    # 飞行轨迹示例
    plt.subplot(2, 2, 3)
    for i, path in enumerate(flight_paths):
        # 只绘制部分轨迹避免过于拥挤
        if i % 50 == 0 and len(path) > 10:
            plt.plot(path[:, 0], path[:, 1], alpha=0.5, linewidth=0.5)

    # 绘制用户位置
    for i, pos in enumerate(user_positions):
        plt.scatter(pos[0], pos[1], s=100, marker='o', label=f'用户{i + 1}')

    plt.title("无人机飞行轨迹示例")
    plt.xlabel("X坐标 (m)")
    plt.ylabel("Y坐标 (m)")
    plt.xlim(0, 100)
    plt.ylim(0, 100)
    plt.grid(True)
    plt.legend()

    # 任务收集直方图
    plt.subplot(2, 2, 4)
    plt.hist([len(path) for path in flight_paths], bins=20)
    plt.title("任务完成步数分布")
    plt.xlabel("步数")
    plt.ylabel("频率")
    plt.grid(True)

    plt.tight_layout()
    plt.savefig("mec_crl_results.png")
    plt.show()


# 主函数
def main():
    parser = argparse.ArgumentParser(description='MEC持续强化学习UAV轨迹优化')
    parser.add_argument('--episodes', type=int, default=2000, help='训练episode数量')
    parser.add_argument('--batch_size', type=int, default=128, help='批大小')
    parser.add_argument('--gamma', type=float, default=0.99, help='折扣因子')
    parser.add_argument('--lr', type=float, default=0.0001, help='学习率')
    parser.add_argument('--buffer_capacity', type=int, default=10000, help='回放缓冲区容量')
    parser.add_argument('--epsilon_start', type=float, default=1.0, help='初始ε值')
    parser.add_argument('--epsilon_end', type=float, default=0.01, help='最终ε值')
    parser.add_argument('--epsilon_decay', type=float, default=0.995, help='ε衰减率')
    parser.add_argument('--target_update', type=int, default=10, help='目标网络更新频率')
    parser.add_argument('--task_switch_interval', type=int, default=500, help='任务切换间隔')
    parser.add_argument('--gpu', action='store_true', help='使用GPU加速')
    args = parser.parse_args()

    print("开始训练...")
    print(f"使用 {'GPU' if args.gpu and torch.cuda.is_available() else 'CPU'} 加速")

    # 训练模型
    agent, rewards, paths, collected = train(args)

    # 保存模型
    torch.save(agent.state_dict(), "mec_crl_agent.pth")
    print("模型已保存到 mec_crl_agent.pth")

    # 可视化结果
    env = MECEnvironment()
    plot_results(rewards, paths, env.user_positions)

    # 打印最终性能
    print(f"\n最终性能:")
    print(f"平均奖励: {np.mean(rewards[-100:]):.2f}")
    print(f"平均收集任务数: {np.mean(collected[-100:]):.2f}/8")
    print(f"平均步数: {np.mean([len(p) for p in paths[-100:]]):.2f}")


if __name__ == "__main__":
    main()