import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import random
from collections import deque, namedtuple
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
import os
import copy

# 设置随机种子
torch.manual_seed(42)
np.random.seed(42)
random.seed(42)

# 设备配置
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


# ===================== 环境定义 =====================
class UAVEnv:
    def __init__(self, width=100, height=100, num_users=10, active_users=8,
                 v_max=5.0, d_collect=5.0, t_max=200, boundary_penalty=True):
        self.width = width
        self.height = height
        self.num_users = num_users
        self.active_users = active_users
        self.v_max = v_max
        self.d_collect = d_collect
        self.t_max = t_max
        self.boundary_penalty = boundary_penalty

        # 生成固定用户位置
        self.user_positions = self._generate_user_positions()
        self.active_mask = None
        self.reset()

    def _generate_user_positions(self):
        return np.random.rand(self.num_users, 2) * [self.width, self.height]

    def reset(self):
        # 随机激活用户
        active_indices = np.random.choice(self.num_users, self.active_users, replace=False)
        self.active_mask = np.zeros(self.num_users, dtype=np.int32)
        self.active_mask[active_indices] = 1

        # 无人机起始位置 (中心)
        self.uav_position = np.array([self.width / 2, self.height / 2])
        self.step_count = 0
        self.collected = np.zeros(self.num_users, dtype=bool)

        return self._get_state()

    def _get_state(self):
        # 状态: [uav_x, uav_y] + 所有用户位置(展平) + 激活状态
        state = np.concatenate([
            self.uav_position,
            self.user_positions.flatten(),
            self.active_mask
        ])
        return state

    def step(self, action):
        # 解析动作
        vx, vy = action
        vx = np.clip(vx, -self.v_max, self.v_max)
        vy = np.clip(vy, -self.v_max, self.v_max)

        # 更新无人机位置
        new_position = self.uav_position + np.array([vx, vy])

        # 边界处理
        out_of_bound = False
        if new_position[0] < 0 or new_position[0] > self.width or \
                new_position[1] < 0 or new_position[1] > self.height:
            if self.boundary_penalty:
                out_of_bound = True
            new_position[0] = np.clip(new_position[0], 0, self.width)
            new_position[1] = np.clip(new_position[1], 0, self.height)

        self.uav_position = new_position
        self.step_count += 1

        # 检查任务收集
        collected_count = 0
        for i in range(self.num_users):
            if self.active_mask[i] and not self.collected[i]:
                distance = np.linalg.norm(self.uav_position - self.user_positions[i])
                if distance <= self.d_collect:
                    self.collected[i] = True
                    collected_count += 1

        # 计算奖励
        reward = 100 * collected_count - 1  # +100 每收集一个任务, -1 每时间步

        # 终止条件
        done = False
        if out_of_bound:
            reward = -100  # 飞出边界惩罚
            done = True
        elif np.sum(self.collected) == self.active_users:
            done = True  # 收集所有任务
        elif self.step_count >= self.t_max:
            done = True  # 超时

        return self._get_state(), reward, done, {
            "collected": collected_count,
            "out_of_bound": out_of_bound
        }

    def switch_active_users(self):
        # 切换激活用户 (保持位置不变)
        active_indices = np.random.choice(self.num_users, self.active_users, replace=False)
        self.active_mask = np.zeros(self.num_users, dtype=np.int32)
        self.active_mask[active_indices] = 1
        self.reset()


# ===================== 神经网络定义 =====================
class Actor(nn.Module):
    def __init__(self, state_dim, action_dim, hidden_dim=256):
        super(Actor, self).__init__()
        self.net = nn.Sequential(
            nn.Linear(state_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, action_dim),
            nn.Tanh()  # 输出范围[-1, 1]
        )

    def forward(self, state):
        return self.net(state)  # 输出[-1, 1]范围的动作


class Critic(nn.Module):
    def __init__(self, state_dim, action_dim, hidden_dim=256):
        super(Critic, self).__init__()
        self.q1 = nn.Sequential(
            nn.Linear(state_dim + action_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1))

        self.q2 = nn.Sequential(
            nn.Linear(state_dim + action_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 1))

    def forward(self, state, action):
        x = torch.cat([state, action], dim=1)
        return self.q1(x), self.q2(x)


# ===================== 智能体定义 =====================
class CRLAgent:
    def __init__(self, state_dim, action_dim, lr=3e-4, gamma=0.99, tau=0.005,
                 alpha=0.2, buffer_size=1000000, batch_size=256, ewc_lambda=1000):
        self.state_dim = state_dim
        self.action_dim = action_dim
        self.gamma = gamma
        self.tau = tau
        self.alpha = alpha
        self.batch_size = batch_size
        self.ewc_lambda = ewc_lambda

        # 网络初始化
        self.actor = Actor(state_dim, action_dim).to(device)
        self.critic = Critic(state_dim, action_dim).to(device)
        self.critic_target = copy.deepcopy(self.critic)

        # 优化器
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=lr)
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=lr)

        # 经验回放缓冲区
        self.buffer = deque(maxlen=buffer_size)
        self.experience = namedtuple("Experience",
                                     ["state", "action", "reward", "next_state", "done"])

        # EWC相关参数
        self.ewc_params = {
            "fisher": {},
            "params": {},
            "task_count": 0
        }

    def select_action(self, state, deterministic=False):
        state = torch.FloatTensor(state).unsqueeze(0).to(device)
        with torch.no_grad():
            action = self.actor(state).cpu().numpy().flatten()
        return action

    def store_experience(self, state, action, reward, next_state, done):
        exp = self.experience(state, action, reward, next_state, done)
        self.buffer.append(exp)

    def update(self):
        if len(self.buffer) < self.batch_size:
            return {}

        # 从缓冲区采样
        batch = random.sample(self.buffer, self.batch_size)
        states, actions, rewards, next_states, dones = zip(*batch)

        states = torch.FloatTensor(np.array(states)).to(device)
        actions = torch.FloatTensor(np.array(actions)).to(device)
        rewards = torch.FloatTensor(np.array(rewards)).unsqueeze(1).to(device)
        next_states = torch.FloatTensor(np.array(next_states)).to(device)
        dones = torch.FloatTensor(np.array(dones)).unsqueeze(1).to(device)

        # Critic 更新
        with torch.no_grad():
            next_actions = self.actor(next_states)
            target_q1, target_q2 = self.critic_target(next_states, next_actions)
            target_q = torch.min(target_q1, target_q2)
            target_value = rewards + (1 - dones) * self.gamma * target_q

        current_q1, current_q2 = self.critic(states, actions)
        critic_loss = F.mse_loss(current_q1, target_value) + F.mse_loss(current_q2, target_value)

        self.critic_optimizer.zero_grad()
        critic_loss.backward()
        self.critic_optimizer.step()

        # Actor 更新
        actor_actions = self.actor(states)
        actor_q1, actor_q2 = self.critic(states, actor_actions)
        actor_q = torch.min(actor_q1, actor_q2)
        actor_loss = -actor_q.mean()

        # 添加EWC正则化
        if self.ewc_params["task_count"] > 0:
            ewc_loss = 0
            for name, param in self.actor.named_parameters():
                fisher = self.ewc_params["fisher"][name]
                old_param = self.ewc_params["params"][name]
                ewc_loss += (fisher * (param - old_param).pow(2)).sum()

            actor_loss += self.ewc_lambda * ewc_loss

        self.actor_optimizer.zero_grad()
        actor_loss.backward()
        self.actor_optimizer.step()

        # 目标网络软更新
        for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
            target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)

        return {
            "critic_loss": critic_loss.item(),
            "actor_loss": actor_loss.item()
        }

    def compute_fisher_matrix(self, env, samples=1000):
        fisher_actor = {}
        params_actor = {}

        # 保存当前参数
        for name, param in self.actor.named_parameters():
            params_actor[name] = param.data.clone()

        # 计算Fisher信息矩阵
        for name, param in self.actor.named_parameters():
            param.grad = torch.zeros_like(param)

        for _ in range(samples):
            state = env.reset()
            state = torch.FloatTensor(state).unsqueeze(0).to(device)

            self.actor.zero_grad()
            action = self.actor(state)
            log_prob = -0.5 * (action).pow(2).sum(dim=1)  # 近似对数概率
            log_prob.mean().backward()

            for name, param in self.actor.named_parameters():
                if param.grad is not None:
                    if name not in fisher_actor:
                        fisher_actor[name] = param.grad.data.clone() ** 2 / samples
                    else:
                        fisher_actor[name] += param.grad.data.clone() ** 2 / samples

        # 保存Fisher矩阵和参数
        self.ewc_params["fisher"] = fisher_actor
        self.ewc_params["params"] = params_actor
        self.ewc_params["task_count"] += 1

    def save_model(self, path):
        torch.save({
            'actor': self.actor.state_dict(),
            'critic': self.critic.state_dict(),
            'ewc_params': self.ewc_params
        }, path)

    def load_model(self, path):
        checkpoint = torch.load(path)
        self.actor.load_state_dict(checkpoint['actor'])
        self.critic.load_state_dict(checkpoint['critic'])
        self.critic_target = copy.deepcopy(self.critic)
        self.ewc_params = checkpoint['ewc_params']


# ===================== 训练与评估 =====================
def train_agent(env, agent, episodes=1000, switch_interval=100,
                save_path="uav_model.pth", plot_dir="plots"):
    os.makedirs(plot_dir, exist_ok=True)

    # 记录训练指标
    episode_rewards = []
    episode_lengths = []
    success_rates = []
    task_switch_points = []

    current_task = 0
    task_rewards = []

    for ep in range(episodes):
        # 环境切换
        if ep > 0 and ep % switch_interval == 0:
            env.switch_active_users()
            agent.compute_fisher_matrix(env)
            task_switch_points.append(ep)
            current_task += 1
            print(f"Switched to new task at episode {ep}")

            # 保存切换时的轨迹图
            plot_episode_trajectory(env, agent, ep, plot_dir)

        state = env.reset()
        total_reward = 0
        steps = 0
        done = False
        collected_count = 0

        # 存储轨迹用于可视化
        trajectory = [env.uav_position.copy()]

        while not done:
            action = agent.select_action(state)
            next_state, reward, done, info = env.step(action)
            agent.store_experience(state, action, reward, next_state, done)

            # 更新网络
            loss_info = agent.update()

            state = next_state
            total_reward += reward
            steps += 1
            collected_count += info["collected"]

            # 记录轨迹
            trajectory.append(env.uav_position.copy())

        # 记录指标
        episode_rewards.append(total_reward)
        episode_lengths.append(steps)
        success_rates.append(1 if collected_count == env.active_users else 0)
        task_rewards.append(total_reward)

        # 定期打印和保存
        if (ep + 1) % 10 == 0:
            avg_reward = np.mean(episode_rewards[-10:])
            success_rate = np.mean(success_rates[-10:])
            print(f"Episode {ep + 1}/{episodes}, Reward: {total_reward:.1f}, "
                  f"Avg Reward: {avg_reward:.1f}, Steps: {steps}, "
                  f"Success: {success_rate:.2f}")

        # 每个任务结束后计算任务性能
        if (ep + 1) % switch_interval == 0 and ep > 0:
            task_avg_reward = np.mean(task_rewards)
            task_success_rate = np.mean(success_rates[-switch_interval:])
            print(f"Task {current_task} completed: "
                  f"Avg Reward: {task_avg_reward:.1f}, "
                  f"Success Rate: {task_success_rate:.2f}")
            task_rewards = []

    # 保存最终模型
    agent.save_model(save_path)

    # 绘制训练曲线
    plot_training_curves(episode_rewards, success_rates, task_switch_points, plot_dir)

    return episode_rewards, success_rates


def plot_episode_trajectory(env, agent, episode, plot_dir):
    state = env.reset()
    done = False
    trajectory = [env.uav_position.copy()]
    active_positions = env.user_positions[env.active_mask.astype(bool)]

    while not done:
        action = agent.select_action(state)
        state, _, done, _ = env.step(action)
        trajectory.append(env.uav_position.copy())

    trajectory = np.array(trajectory)

    plt.figure(figsize=(10, 8))
    plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', linewidth=1.5, label='UAV Path')
    plt.scatter(trajectory[0, 0], trajectory[0, 1], c='green', marker='o', s=100, label='Start')
    plt.scatter(trajectory[-1, 0], trajectory[-1, 1], c='red', marker='x', s=100, label='End')

    # 绘制用户位置
    for i, pos in enumerate(env.user_positions):
        color = 'red' if env.active_mask[i] else 'gray'
        plt.scatter(pos[0], pos[1], c=color, s=80)
        plt.text(pos[0], pos[1], f"{i}", fontsize=9)

    plt.title(f"UAV Trajectory (Episode {episode})")
    plt.xlabel("X (m)")
    plt.ylabel("Y (m)")
    plt.grid(True)
    plt.legend()
    plt.axis([0, env.width, 0, env.height])
    plt.savefig(f"{plot_dir}/trajectory_ep_{episode}.png")
    plt.close()


def plot_training_curves(rewards, success_rates, switch_points, plot_dir):
    plt.figure(figsize=(12, 8))

    # 奖励曲线
    plt.subplot(2, 1, 1)
    plt.plot(rewards)
    for sp in switch_points:
        plt.axvline(x=sp, color='r', linestyle='--', alpha=0.5)
    plt.title("Episode Rewards")
    plt.xlabel("Episode")
    plt.ylabel("Reward")
    plt.grid(True)

    # 成功率曲线
    plt.subplot(2, 1, 2)
    window = 20
    moving_avg = np.convolve(success_rates, np.ones(window) / window, mode='valid')
    plt.plot(success_rates, alpha=0.3)
    plt.plot(range(window - 1, len(success_rates)), moving_avg, 'r-')
    for sp in switch_points:
        plt.axvline(x=sp, color='r', linestyle='--', alpha=0.5)
    plt.title(f"Success Rate (Moving Avg, window={window})")
    plt.xlabel("Episode")
    plt.ylabel("Success Rate")
    plt.ylim([0, 1.1])
    plt.grid(True)

    plt.tight_layout()
    plt.savefig(f"{plot_dir}/training_curves.png")
    plt.close()


# ===================== 主程序 =====================
if __name__ == "__main__":
    # 环境参数
    env_params = {
        "width": 100,
        "height": 100,
        "num_users": 10,
        "active_users": 8,
        "v_max": 5.0,
        "d_collect": 5.0,
        "t_max": 200,
        "boundary_penalty": True
    }

    # 智能体参数
    agent_params = {
        "state_dim": 2 + 10 * 2 + 10,  # uav_pos + user_pos + active_mask
        "action_dim": 2,  # [vx, vy]
        "lr": 3e-4,
        "gamma": 0.99,
        "tau": 0.005,
        "alpha": 0.2,
        "buffer_size": 100000,
        "batch_size": 256,
        "ewc_lambda": 5000  # EWC正则化强度
    }

    # 训练参数
    train_params = {
        "episodes": 1000,
        "switch_interval": 100,  # 每100个episode切换任务
        "save_path": "uav_model.pth",
        "plot_dir": "training_plots"
    }

    # 创建环境和智能体
    env = UAVEnv(**env_params)
    agent = CRLAgent(**agent_params)

    # 开始训练
    print("Starting training...")
    rewards, success_rates = train_agent(env, agent, **train_params)
    print("Training completed!")