import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
from collections import deque, namedtuple
import random
import copy
import os
import time

# 设置随机种子确保可复现性
SEED = 42
torch.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)
if torch.cuda.is_available():
    torch.cuda.manual_seed(SEED)

# 使用GPU如果可用
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
# 定义经验回放缓冲区
Experience = namedtuple('Experience', ('state', 'action', 'reward', 'next_state', 'done', 'task_id'))


class ReplayBuffer:
    def __init__(self, capacity):
        self.buffer = deque(maxlen=capacity)

    def push(self, state, action, reward, next_state, done, task_id):
        self.buffer.append(Experience(state, action, reward, next_state, done, task_id))

    def sample(self, batch_size, current_task_id=None):
        # 混合采样策略：从当前任务和旧任务中采样
        if current_task_id is not None and len(self.buffer) > batch_size:
            current_experiences = [e for e in self.buffer if e.task_id == current_task_id]
            old_experiences = [e for e in self.buffer if e.task_id != current_task_id]

            # 当前任务样本比例 (至少确保有一些当前任务的样本)
            current_ratio = min(0.8, len(current_experiences) / len(self.buffer))
            current_batch_size = max(int(batch_size * current_ratio), min(batch_size // 4, len(current_experiences)))
            old_batch_size = batch_size - current_batch_size

            current_batch = random.sample(current_experiences, min(current_batch_size, len(current_experiences)))
            old_batch = random.sample(old_experiences,
                                      min(old_batch_size, len(old_experiences))) if old_experiences else []

            experiences = current_batch + old_batch
            random.shuffle(experiences)
        else:
            experiences = random.sample(self.buffer, min(batch_size, len(self.buffer)))

        batch = Experience(*zip(*experiences))
        states = torch.FloatTensor(np.array(batch.state)).to(device)
        actions = torch.LongTensor(np.array(batch.action)).to(device)
        rewards = torch.FloatTensor(np.array(batch.reward)).to(device)
        next_states = torch.FloatTensor(np.array(batch.next_state)).to(device)
        dones = torch.FloatTensor(np.array(batch.done)).to(device)

        return states, actions, rewards, next_states, dones

    def __len__(self):
        return len(self.buffer)


# 定义网络模型 - 双网络架构：策略网络和价值网络
class QNetwork(nn.Module):
    def __init__(self, state_size, action_size, hidden_size=128):
        super(QNetwork, self).__init__()
        self.fc1 = nn.Linear(state_size, hidden_size)
        self.fc2 = nn.Linear(hidden_size, hidden_size)
        self.fc3 = nn.Linear(hidden_size, action_size)

    def forward(self, state):
        x = torch.relu(self.fc1(state))
        x = torch.relu(self.fc2(x))
        return self.fc3(x)


# 弹性权重巩固 (EWC) 实现
class EWC:
    def __init__(self, model, lambda_ewc=1000):
        self.model = model
        self.lambda_ewc = lambda_ewc
        self.fisher_dict = {}  # 存储每个任务的Fisher信息矩阵
        self.optpar_dict = {}  # 存储每个任务的最优参数

    def compute_fisher(self, data_loader, task_id):
        """计算当前参数的Fisher信息矩阵"""
        fisher = {n: torch.zeros_like(p) for n, p in self.model.named_parameters()}
        self.model.eval()

        # 从缓冲区采样数据计算Fisher信息矩阵
        for batch in data_loader:
            self.model.zero_grad()
            states, actions = batch
            output = self.model(states)

            # 计算log概率
            log_probs = torch.log_softmax(output, dim=1)
            # 获取实际选择的动作的log概率
            selected_log_probs = log_probs.gather(1, actions.unsqueeze(1)).squeeze()
            loss = -selected_log_probs.mean()  # 负对数似然

            loss.backward()

            # 累积梯度的平方作为Fisher信息的估计
            for n, p in self.model.named_parameters():
                if p.grad is not None:
                    fisher[n] += p.grad.pow(2).detach() / len(data_loader)

        # 保存当前任务的Fisher信息矩阵和参数
        self.fisher_dict[task_id] = fisher
        self.optpar_dict[task_id] = {n: p.clone().detach() for n, p in self.model.named_parameters()}

        self.model.train()

    def ewc_loss(self, current_task_id=None):
        """计算EWC正则化损失"""
        loss = 0
        if not self.fisher_dict:  # 如果还没有保存的任务
            return loss

        # 对每个之前的任务计算正则化损失
        for task_id, fisher in self.fisher_dict.items():
            if current_task_id is not None and task_id == current_task_id:
                continue  # 跳过当前任务

            for n, p in self.model.named_parameters():
                # 确保参数名存在于Fisher矩阵和最优参数字典中
                if n in fisher and n in self.optpar_dict[task_id]:
                    # 计算参数偏离先前任务最优参数的程度，根据Fisher重要性加权
                    loss += (fisher[n] * (p - self.optpar_dict[task_id][n]).pow(2)).sum()

        return self.lambda_ewc * loss


# UAV环境
class UAVEnvironment:
    def __init__(self, area_size=100, num_users=10, num_active_users=8):
        self.area_size = area_size
        self.num_users = num_users
        self.num_active_users = num_active_users

        # 初始化用户位置（固定）
        self.user_positions = np.random.rand(num_users, 2) * area_size

        # 初始化动作空间（上、下、左、右、右上、右下、左上、左下）
        self.actions = [(0, 1), (0, -1), (-1, 0), (1, 0), (1, 1), (1, -1), (-1, 1), (-1, -1)]
        self.action_size = len(self.actions)
        self.move_distance = 5  # 每步移动距离

        # 状态空间: UAV位置(x,y) + 所有用户位置(x,y) + 任务收集状态(0/1)
        self.state_size = 2 + num_users * 3  # UAV(2) + 用户位置及激活状态(num_users*3)

        # 奖励参数
        self.reward_per_task = 100
        self.penalty_per_step = -1
        self.lambda_task = 1.0
        self.lambda_energy = 1.0

        # 环境变量
        self.reset()

    def reset(self):
        # 初始化UAV位置（中心）
        self.uav_position = np.array([self.area_size / 2, self.area_size / 2])

        # 随机选择激活用户
        self.active_users = np.random.choice(self.num_users, self.num_active_users, replace=False)
        self.active_status = np.zeros(self.num_users)
        self.active_status[self.active_users] = 1

        # 初始化任务收集状态
        self.collected = np.zeros(self.num_users)
        self.steps = 0

        # 构建初始状态
        state = self._get_state()
        return state

    def switch_environment(self):
        """环境切换 - 改变激活用户"""
        # 保存当前收集状态用于评估
        old_active_users = self.active_users.copy()

        # 随机重新选择激活用户
        self.active_users = np.random.choice(self.num_users, self.num_active_users, replace=False)
        self.active_status = np.zeros(self.num_users)
        self.active_status[self.active_users] = 1

        # 重置收集状态和UAV位置
        self.collected = np.zeros(self.num_users)
        self.uav_position = np.array([self.area_size / 2, self.area_size / 2])
        self.steps = 0

        return old_active_users

    def step(self, action):
        # 执行动作
        direction = self.actions[action]
        self.uav_position[0] += direction[0] * self.move_distance
        self.uav_position[1] += direction[1] * self.move_distance

        # 确保UAV在区域内
        self.uav_position = np.clip(self.uav_position, 0, self.area_size)

        # 记录上一步已收集的任务数
        prev_collected = np.sum(self.collected)

        # 检查是否收集任务
        self._collect_tasks()

        # 新收集的任务数
        newly_collected = np.sum(self.collected) - prev_collected

        # 计算奖励
        reward = self.lambda_task * newly_collected * self.reward_per_task + \
                 self.lambda_energy * self.penalty_per_step

        # 更新步数
        self.steps += 1

        # 检查是否结束（收集所有激活用户的任务）
        done = np.sum(self.collected[self.active_users]) == self.num_active_users

        # 最大步数限制（防止无限循环）
        if self.steps >= 200:
            done = True

        # 获取下一个状态
        next_state = self._get_state()

        return next_state, reward, done, {"collected": np.sum(self.collected)}

    def _collect_tasks(self):
        """检查并收集用户任务"""
        collection_radius = 5  # 收集范围

        for i in range(self.num_users):
            if self.active_status[i] == 1 and self.collected[i] == 0:
                distance = np.linalg.norm(self.uav_position - self.user_positions[i])
                if distance <= collection_radius:
                    self.collected[i] = 1

    def _get_state(self):
        """构建状态向量"""
        state = np.zeros(self.state_size)

        # UAV位置
        state[0:2] = self.uav_position / self.area_size  # 归一化位置

        # 用户信息（位置和激活状态）
        for i in range(self.num_users):
            idx = 2 + i * 3
            state[idx:idx + 2] = self.user_positions[i] / self.area_size  # 归一化位置
            state[idx + 2] = self.active_status[i] * (1 - self.collected[i])  # 激活且未收集

        return state

    def render(self, save_path=None):
        """可视化环境和UAV轨迹"""
        plt.figure(figsize=(8, 8))
        plt.xlim(0, self.area_size)
        plt.ylim(0, self.area_size)

        # 绘制所有用户
        for i, pos in enumerate(self.user_positions):
            color = 'gray'  # 未激活用户
            marker = 'o'
            if i in self.active_users:
                if self.collected[i] == 1:
                    color = 'green'  # 已收集任务
                    marker = 'o'
                else:
                    color = 'red'  # 未收集任务
                    marker = 's'
            plt.scatter(pos[0], pos[1], c=color, marker=marker, s=100)
            plt.text(pos[0] + 1, pos[1] + 1, f"U{i}", fontsize=10)

        # 绘制UAV位置
        plt.scatter(self.uav_position[0], self.uav_position[1], c='blue', marker='*', s=200)

        plt.grid(True)
        plt.title(f"UAV Trajectory - Active Users: {self.active_users}")

        if save_path:
            plt.savefig(save_path)
            plt.close()
        else:
            plt.show()


# SAC智能体（使用离散动作空间的变体）
class SACAgent:
    def __init__(self, state_size, action_size, hidden_size=128, buffer_size=100000,
                 batch_size=64, gamma=0.99, tau=0.005, lr=3e-4, lambda_ewc=100):
        self.state_size = state_size
        self.action_size = action_size
        self.batch_size = batch_size
        self.gamma = gamma
        self.tau = tau
        self.lambda_ewc = lambda_ewc
        self.current_task_id = 0

        # Q网络
        self.qnetwork_local = QNetwork(state_size, action_size, hidden_size).to(device)
        self.qnetwork_target = QNetwork(state_size, action_size, hidden_size).to(device)
        self.qnetwork_target.load_state_dict(self.qnetwork_local.state_dict())

        # 策略网络
        self.policy_network = QNetwork(state_size, action_size, hidden_size).to(device)

        # 优化器
        self.optimizer_q = optim.Adam(self.qnetwork_local.parameters(), lr=lr)
        self.optimizer_policy = optim.Adam(self.policy_network.parameters(), lr=lr)

        # 经验回放
        self.memory = ReplayBuffer(buffer_size)

        # EWC实现
        self.ewc = EWC(self.policy_network, lambda_ewc=lambda_ewc)
        self.q_ewc = EWC(self.qnetwork_local, lambda_ewc=lambda_ewc)

        # 用于追踪训练过程
        self.t_step = 0

    def step(self, state, action, reward, next_state, done):
        # 保存经验到回放缓冲区
        self.memory.push(state, action, reward, next_state, done, self.current_task_id)

        # 学习
        self.t_step = (self.t_step + 1) % 4
        if self.t_step == 0 and len(self.memory) > self.batch_size:
            self.learn()

    def act(self, state, eps=0.0):
        """根据当前策略选择动作"""
        state = torch.FloatTensor(state).unsqueeze(0).to(device)
        self.policy_network.eval()
        with torch.no_grad():
            action_values = self.policy_network(state)
        self.policy_network.train()

        # Epsilon-greedy策略
        if random.random() > eps:
            return np.argmax(action_values.cpu().data.numpy())
        else:
            return random.choice(np.arange(self.action_size))

    def learn(self):
        """使用经验回放更新价值和策略网络"""
        states, actions, rewards, next_states, dones = self.memory.sample(self.batch_size, self.current_task_id)

        # 计算Q目标值
        with torch.no_grad():
            next_actions = torch.argmax(self.policy_network(next_states), dim=1, keepdim=True)
            q_targets_next = self.qnetwork_target(next_states).gather(1, next_actions)
            q_targets = rewards.unsqueeze(1) + (self.gamma * q_targets_next * (1 - dones.unsqueeze(1)))

        # 当前Q值
        q_expected = self.qnetwork_local(states).gather(1, actions.unsqueeze(1))

        # Q值损失 (TD误差)
        q_loss = nn.functional.mse_loss(q_expected, q_targets)

        # 添加EWC正则化
        q_loss += self.q_ewc.ewc_loss(self.current_task_id)

        # 更新Q网络
        self.optimizer_q.zero_grad()
        q_loss.backward()
        self.optimizer_q.step()

        # 策略网络损失 (最大化Q值)
        policy_loss = -self.qnetwork_local(states).gather(1,
                                                          torch.argmax(self.policy_network(states), dim=1,
                                                                       keepdim=True)).mean()

        # 添加EWC正则化
        policy_loss += self.ewc.ewc_loss(self.current_task_id)

        # 更新策略网络
        self.optimizer_policy.zero_grad()
        policy_loss.backward()
        self.optimizer_policy.step()

        # 软更新目标网络
        self.soft_update(self.qnetwork_local, self.qnetwork_target, self.tau)

    def soft_update(self, local_model, target_model, tau):
        """软更新模型参数: θ_target = τ*θ_local + (1-τ)*θ_target"""
        for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
            target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data)

    def compute_fisher_information(self):
        """计算Fisher信息矩阵用于EWC"""
        # 创建一个数据加载器用于计算Fisher信息
        states, actions, _, _, _ = self.memory.sample(min(1000, len(self.memory)), self.current_task_id)
        data = [(states[i:i + self.batch_size], actions[i:i + self.batch_size])
                for i in range(0, len(states), self.batch_size)]

        # 计算并保存Fisher信息矩阵
        self.ewc.compute_fisher(data, self.current_task_id)
        self.q_ewc.compute_fisher(data, self.current_task_id)

    def save_models(self, path):
        """保存模型"""
        torch.save({
            'policy': self.policy_network.state_dict(),
            'q_local': self.qnetwork_local.state_dict(),
            'q_target': self.qnetwork_target.state_dict(),
        }, path)

    def load_models(self, path):
        """加载模型"""
        checkpoint = torch.load(path)
        self.policy_network.load_state_dict(checkpoint['policy'])
        self.qnetwork_local.load_state_dict(checkpoint['q_local'])
        self.qnetwork_target.load_state_dict(checkpoint['q_target'])


# 训练函数
def train(agent, env, n_episodes=1000, eps_start=1.0, eps_end=0.05, eps_decay=0.995,
          k_switch=300, max_t=200, output_dir="./results"):
    """训练智能体"""
    os.makedirs(output_dir, exist_ok=True)

    scores = []
    completion_rates = []
    eps = eps_start
    task_id = 0

    for i_episode in range(1, n_episodes + 1):
        state = env.reset()
        score = 0

        # 环境切换检查
        if i_episode % k_switch == 1 and i_episode > 1:
            print(f"环境切换: 激活用户组合变更，任务ID从 {task_id} 变为 {task_id + 1}")

            # 在环境切换前计算Fisher信息矩阵
            agent.compute_fisher_information()

            # 切换环境（更新激活用户）
            old_active_users = env.switch_environment()

            # 更新任务ID
            task_id += 1
            agent.current_task_id = task_id

            # 保存当前模型
            agent.save_models(f"{output_dir}/model_task_{task_id - 1}.pt")

            # 在环境切换点生成轨迹图
            env.render(save_path=f"{output_dir}/trajectory_task_{task_id - 1}_end.png")

        # 运行一个episode
        for t in range(max_t):
            action = agent.act(state, eps)
            next_state, reward, done, info = env.step(action)
            agent.step(state, action, reward, next_state, done)
            state = next_state
            score += reward

            if done:
                break

        # 更新epsilon
        eps = max(eps_end, eps_decay * eps)

        # 保存得分和完成率
        scores.append(score)
        completion_rate = info['collected'] / env.num_active_users
        completion_rates.append(completion_rate)

        # 输出训练状态
        if i_episode % 10 == 0:
            print(
                f"Episode {i_episode}/{n_episodes} | 任务ID: {task_id} | Score: {score:.2f} | Completion: {completion_rate:.2f} | Epsilon: {eps:.2f}")

        # 定期保存性能数据
        if i_episode % 100 == 0:
            # 绘制奖励曲线
            plt.figure(figsize=(10, 5))
            plt.plot(np.arange(len(scores)), scores)
            plt.title('训练奖励曲线')
            plt.xlabel('Episode')
            plt.ylabel('奖励')
            plt.savefig(f"{output_dir}/rewards_curve.png")
            plt.close()

            # 绘制完成率曲线
            plt.figure(figsize=(10, 5))
            plt.plot(np.arange(len(completion_rates)), completion_rates)
            plt.title('任务完成率曲线')
            plt.xlabel('Episode')
            plt.ylabel('完成率')
            plt.savefig(f"{output_dir}/completion_curve.png")
            plt.close()

            # 保存数据
            np.savez(f"{output_dir}/training_data.npz",
                     scores=np.array(scores),
                     completion_rates=np.array(completion_rates))

    # 训练结束，保存最终模型和数据
    agent.save_models(f"{output_dir}/model_final.pt")

    return scores, completion_rates


# 主函数
if __name__ == "__main__":
    # 环境参数
    area_size = 100
    num_users = 10
    num_active_users = 8

    # 创建环境
    env = UAVEnvironment(area_size, num_users, num_active_users)

    # 创建智能体
    agent = SACAgent(
        state_size=env.state_size,
        action_size=env.action_size,
        hidden_size=128,
        buffer_size=100000,
        batch_size=64,
        gamma=0.99,
        tau=0.005,
        lr=3e-4,
        lambda_ewc=100
    )

    # 训练参数
    n_episodes = 1500  # 总episode数
    k_switch = 300  # 每300个episode切换一次环境

    # 创建输出目录
    output_dir = f"./results_ewc_{time.strftime('%Y%m%d_%H%M%S')}"
    os.makedirs(output_dir, exist_ok=True)

    # 开始训练
    print("开始训练...")
    scores, completion_rates = train(agent, env, n_episodes=n_episodes, k_switch=k_switch, output_dir=output_dir)

    # 训练完成
    print(f"训练完成！结果保存在 {output_dir}")

    # 绘制最终结果
    plt.figure(figsize=(15, 6))

    plt.subplot(1, 2, 1)
    plt.plot(np.arange(len(scores)), scores)
    plt.title('训练奖励曲线')
    plt.xlabel('Episode')
    plt.ylabel('奖励')

    plt.subplot(1, 2, 2)
    plt.plot(np.arange(len(completion_rates)), completion_rates)
    plt.title('任务完成率曲线')
    plt.xlabel('Episode')
    plt.ylabel('完成率')

    plt.tight_layout()
    plt.savefig(f"{output_dir}/final_results.png")

    # 测试训练好的智能体
    print("测试训练好的智能体...")
    state = env.reset()
    env.render(save_path=f"{output_dir}/test_initial.png")

    done = False
    while not done:
        action = agent.act(state)
        state, _, done, _ = env.step(action)

    # 显示最终轨迹
    env.render(save_path=f"{output_dir}/test_final.png")
