import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import random
import matplotlib.pyplot as plt
from collections import deque, namedtuple
import copy
import math
import os
import time

# 设置随机种子以保证可复现性
torch.manual_seed(42)
np.random.seed(42)
random.seed(42)

# 经验回放的数据结构
Experience = namedtuple('Experience', ['state', 'action', 'reward', 'next_state', 'done'])


class ReplayBuffer:
    def __init__(self, capacity):
        self.buffer = deque(maxlen=capacity)

    def push(self, state, action, reward, next_state, done):
        self.buffer.append(Experience(state, action, reward, next_state, done))

    def sample(self, batch_size):
        experiences = random.sample(self.buffer, batch_size)
        states = torch.tensor([e.state for e in experiences], dtype=torch.float)
        actions = torch.tensor([e.action for e in experiences], dtype=torch.long)
        rewards = torch.tensor([e.reward for e in experiences], dtype=torch.float)
        next_states = torch.tensor([e.next_state for e in experiences], dtype=torch.float)
        dones = torch.tensor([e.done for e in experiences], dtype=torch.float)
        return states, actions, rewards, next_states, dones

    def __len__(self):
        return len(self.buffer)


class QNetwork(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(QNetwork, self).__init__()
        self.fc1 = nn.Linear(state_dim, 256)  # 增加网络容量
        self.fc2 = nn.Linear(256, 256)
        self.fc3 = nn.Linear(256, action_dim)

    def forward(self, x):
        x = torch.relu(self.fc1(x))
        x = torch.relu(self.fc2(x))
        return self.fc3(x)


class User:
    def __init__(self, x, y):
        self.x = x
        self.y = y
        self.active = False
        self.served = False  # 新增服务状态标记


class DroneEnvironment:
    def __init__(self, config):
        self.width = 100
        self.height = 100
        self.service_radius = config["service_radius"]

        # 创建固定位置的用户
        self.all_users = []
        for _ in range(10):
            x = random.uniform(0, self.width)
            y = random.uniform(0, self.height)
            self.all_users.append(User(x, y))

        # 激活用户
        self.active_users = random.sample(self.all_users, k=8)
        for user in self.all_users:
            user.active = user in self.active_users

        # 无人机状态
        self.drone_x = random.uniform(0, self.width)
        self.drone_y = random.uniform(0, self.height)
        self.speed = 5
        self.max_steps = 200
        self.steps = 0

        # 轨迹记录
        self.trajectory_x = [self.drone_x]
        self.trajectory_y = [self.drone_y]

        # 动作空间
        self.actions = ["north", "south", "east", "west", "hover"]

    def reset(self):
        # 重置无人机位置
        self.drone_x = random.uniform(0, self.width)
        self.drone_y = random.uniform(0, self.height)
        self.steps = 0

        # 重置用户服务状态
        for user in self.all_users:
            user.served = False

        # 清空轨迹
        self.trajectory_x = [self.drone_x]
        self.trajectory_y = [self.drone_y]

        return self.get_state()

    def switch_task(self):
        # 切换激活用户，模拟任务变化
        self.active_users = random.sample(self.all_users, k=8)
        for user in self.all_users:
            user.active = user in self.active_users
            user.served = False  # 重置服务状态

    def get_state(self):
        # 构建状态向量：无人机位置 + 用户激活状态
        state = [self.drone_x / self.width, self.drone_y / self.height]  # 归一化位置
        state.extend([1 if user.active else 0 for user in self.all_users])
        return np.array(state)

    def step(self, action_idx):
        action = self.actions[action_idx]
        prev_x, prev_y = self.drone_x, self.drone_y

        # 执行动作
        if action == "north":
            self.drone_y = min(self.height, self.drone_y + self.speed)
        elif action == "south":
            self.drone_y = max(0, self.drone_y - self.speed)
        elif action == "east":
            self.drone_x = min(self.width, self.drone_x + self.speed)
        elif action == "west":
            self.drone_x = max(0, self.drone_x - self.speed)
        # hover不需要动作

        # 计算移动成本
        movement_cost = math.sqrt((self.drone_x - prev_x) ** 2 + (self.drone_y - prev_y) ** 2)

        # 记录轨迹
        self.trajectory_x.append(self.drone_x)
        self.trajectory_y.append(self.drone_y)

        # 计算奖励
        reward = 0
        serviced_users = 0

        # 计算服务范围内的用户数
        for user in self.all_users:
            if user.active and not user.served:  # 只计算未服务的用户
                distance = math.sqrt((self.drone_x - user.x) ** 2 + (self.drone_y - user.y) ** 2)
                if distance < self.service_radius:
                    reward += 20  # 增加服务奖励
                    serviced_users += 1
                    user.served = True  # 标记为已服务

        # 移动惩罚（减少惩罚系数）
        reward -= 0.05 * movement_cost

        # 额外奖励：服务所有用户
        if serviced_users == len(self.active_users):
            reward += 50  # 完成所有服务的额外奖励

        # 更新步数
        self.steps += 1
        done = self.steps >= self.max_steps

        return self.get_state(), reward, done, {"serviced_users": serviced_users}

    def render(self, episode=None, step=None):
        plt.figure(figsize=(10, 10))
        plt.xlim(0, self.width)
        plt.ylim(0, self.height)

        # 绘制无人机轨迹
        plt.plot(self.trajectory_x, self.trajectory_y, 'ro-', alpha=0.6, label='Drone Path')

        # 绘制无人机当前位置
        plt.scatter(self.drone_x, self.drone_y, c='red', s=100, marker='*', label='Drone')

        # 绘制用户
        for user in self.all_users:
            if user.active:
                color = 'green' if user.served else 'orange'  # 已服务绿色，未服务橙色
                plt.scatter(user.x, user.y, c=color, s=50, edgecolors='black')

                # 如果用户在服务范围内，绘制服务圆圈
                distance = math.sqrt((self.drone_x - user.x) ** 2 + (self.drone_y - user.y) ** 2)
                if distance < self.service_radius:
                    circle = plt.Circle((user.x, user.y), self.service_radius, color='green', fill=False, alpha=0.3)
                    plt.gca().add_patch(circle)
            else:
                # 非激活用户
                plt.scatter(user.x, user.y, c='gray', s=50, alpha=0.4)

        # 绘制服务半径
        circle = plt.Circle((self.drone_x, self.drone_y), self.service_radius, color='blue', fill=False, alpha=0.2)
        plt.gca().add_patch(circle)

        plt.legend()

        # 添加episode和step信息
        title = f'Drone Trajectory'
        if episode is not None:
            title += f' (Episode {episode}'
        if step is not None:
            title += f' - Step {step}'
        plt.title(title)

        plt.xlabel('X Position (m)')
        plt.ylabel('Y Position (m)')
        plt.grid(True)

        # 创建目录保存轨迹图
        os.makedirs('../trajectories', exist_ok=True)

        # 生成唯一文件名
        timestamp = int(time.time() * 1000)
        if episode is not None and step is not None:
            filename = f'trajectories/trajectory_ep_{episode}_step_{step}_{timestamp}.png'
        else:
            filename = f'trajectories/trajectory_{timestamp}.png'

        plt.savefig(filename)
        plt.close()
        return filename


class ContinualRLAgent:
    def __init__(self, state_dim, action_dim, config):
        self.state_dim = state_dim
        self.action_dim = action_dim
        self.gamma = config["gamma"]
        self.lr = config["learning_rate"]
        self.epsilon = 1.0
        self.epsilon_decay = config["epsilon_decay"]
        self.epsilon_min = 0.01
        self.batch_size = config["batch_size"]
        self.ewc_lambda = config["ewc_lambda"]

        # 神经网络
        self.q_network = QNetwork(state_dim, action_dim)
        self.target_network = QNetwork(state_dim, action_dim)
        self.target_network.load_state_dict(self.q_network.state_dict())
        self.optimizer = optim.Adam(self.q_network.parameters(), lr=self.lr)

        # 经验回放
        self.memory = ReplayBuffer(config["memory_capacity"])

        # 持续学习相关变量
        self.previous_tasks_fisher = {}  # 存储每个任务的Fisher信息矩阵
        self.previous_tasks_params = {}  # 存储每个任务的最优参数
        self.current_task_id = 0
        self.task_seen = 0

    def select_action(self, state, eval_mode=False):
        if not eval_mode and random.random() < self.epsilon:
            return random.randrange(self.action_dim)

        state_tensor = torch.tensor(state, dtype=torch.float).unsqueeze(0)
        with torch.no_grad():
            q_values = self.q_network(state_tensor)
        return q_values.argmax().item()

    def update_model(self):
        if len(self.memory) < self.batch_size:
            return None

        states, actions, rewards, next_states, dones = self.memory.sample(self.batch_size)

        # 计算当前Q值
        q_values = self.q_network(states).gather(1, actions.unsqueeze(1)).squeeze(1)

        # 计算目标Q值
        with torch.no_grad():
            next_q_values = self.target_network(next_states).max(1)[0]
            target_q_values = rewards + (1 - dones) * self.gamma * next_q_values

        # 计算TD误差损失
        td_loss = nn.MSELoss()(q_values, target_q_values)

        # 加入EWC正则化项防止灾难性遗忘
        ewc_loss = 0
        if self.task_seen > 0:
            for task_id in range(self.task_seen):
                for name, param in self.q_network.named_parameters():
                    if name in self.previous_tasks_fisher[task_id]:
                        fisher = self.previous_tasks_fisher[task_id][name]
                        old_param = self.previous_tasks_params[task_id][name]
                        ewc_loss += (fisher * (param - old_param).pow(2)).sum()

        # 总损失 = TD损失 + EWC正则化
        loss = td_loss + self.ewc_lambda * ewc_loss

        # 更新参数
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()

        return loss.item()

    def update_target_network(self):
        self.target_network.load_state_dict(self.q_network.state_dict())

    def calculate_fisher_information(self, env, num_samples=100):
        """计算Fisher信息矩阵，用于EWC正则化"""
        fisher_dict = {}
        param_dict = {}

        # 初始化Fisher字典
        for name, param in self.q_network.named_parameters():
            fisher_dict[name] = torch.zeros_like(param)
            param_dict[name] = param.data.clone()

        # 收集样本并计算Fisher信息
        self.q_network.eval()
        for _ in range(num_samples):
            state = env.reset()
            done = False

            while not done:
                action = self.select_action(state, eval_mode=True)
                next_state, reward, done, _ = env.step(action)

                # 计算损失
                state_tensor = torch.tensor(state, dtype=torch.float).unsqueeze(0)
                action_tensor = torch.tensor([action], dtype=torch.long)
                q_values = self.q_network(state_tensor)

                # 获取对应动作的Q值
                q_value = q_values.gather(1, action_tensor.unsqueeze(1)).squeeze(1)
                log_prob = torch.log_softmax(q_values, dim=1)[0, action]

                # 计算梯度
                self.optimizer.zero_grad()
                (-log_prob).backward()

                # 累积Fisher信息
                for name, param in self.q_network.named_parameters():
                    if param.grad is not None:
                        fisher_dict[name] += param.grad.pow(2).data

                state = next_state
                if done:
                    break

        # 归一化Fisher信息
        for name in fisher_dict:
            fisher_dict[name] /= num_samples

        self.q_network.train()
        return fisher_dict, param_dict

    def finish_task(self, env):
        """当前任务完成时的处理"""
        # 计算并存储当前任务的Fisher信息和参数
        fisher, params = self.calculate_fisher_information(env)
        self.previous_tasks_fisher[self.current_task_id] = fisher
        self.previous_tasks_params[self.current_task_id] = params

        self.task_seen += 1
        self.current_task_id += 1


def train():
    # 配置参数
    config = {
        "learning_rate": 0.0005,  # 降低学习率
        "gamma": 0.97,  # 提高折扣因子
        "epsilon_decay": 0.998,  # 减缓探索衰减
        "batch_size": 128,  # 增加批大小
        "memory_capacity": 10000,  # 增加经验回放容量
        "service_radius": 15,
        "ewc_lambda": 300  # 降低EWC正则强度
    }

    # 创建环境
    env = DroneEnvironment(config)

    # 状态和动作空间
    state_dim = 2 + 10  # 无人机坐标(2) + 用户激活状态(10)
    action_dim = 5  # 北、南、东、西、悬停

    # 创建智能体
    agent = ContinualRLAgent(state_dim, action_dim, config)

    # 训练参数
    num_episodes = 1500  # 增加训练回合数
    target_update = 10
    task_switch_interval = 250  # 延长任务切换间隔
    render_interval = 100

    # 训练循环
    all_rewards = []
    task_rewards = []
    avg_rewards = []
    task_switch_points = []

    # 创建目录保存结果
    os.makedirs('../results', exist_ok=True)

    for episode in range(num_episodes):
        # 每N轮切换用户激活模式（任务）
        if episode % task_switch_interval == 0 and episode > 0:
            print(f"\n===== 切换到新任务 (Episode {episode}) =====")
            agent.finish_task(env)  # 完成当前任务，保存Fisher信息
            env.switch_task()  # 切换环境任务
            task_switch_points.append(episode)
            task_rewards = []  # 重置当前任务奖励记录

        state = env.reset()
        total_reward = 0
        done = False
        step_count = 0

        while not done:
            # 选择动作
            action = agent.select_action(state)

            # 执行动作
            next_state, reward, done, info = env.step(action)

            # 存储经验
            agent.memory.push(state, action, reward, next_state, done)

            # 更新模型
            loss = agent.update_model()

            # 更新状态和奖励
            state = next_state
            total_reward += reward
            step_count += 1

        # 更新目标网络
        if episode % target_update == 0:
            agent.update_target_network()

        # 衰减探索率
        agent.epsilon = max(agent.epsilon_min, agent.epsilon * agent.epsilon_decay)

        # 记录奖励
        all_rewards.append(total_reward)
        task_rewards.append(total_reward)

        # 计算平均奖励
        avg_reward = np.mean(all_rewards[-100:]) if len(all_rewards) >= 100 else np.mean(all_rewards)
        avg_rewards.append(avg_reward)

        # 输出进度
        if (episode + 1) % 10 == 0:
            print(
                f"Episode {episode + 1}/{num_episodes}, Reward: {total_reward:.2f}, Avg Reward: {avg_reward:.2f}, Epsilon: {agent.epsilon:.4f}")

        # 渲染环境（保存唯一轨迹图）
        if (episode + 1) % render_interval == 0 or episode == 0:
            env.render(episode=episode + 1, step=step_count)

    # 绘制奖励曲线
    plt.figure(figsize=(12, 6))
    plt.plot(all_rewards, alpha=0.6, label='Episode Reward')
    plt.plot(avg_rewards, label='Average Reward (100 episodes)', linewidth=2)

    # 标记任务切换点
    for switch_point in task_switch_points:
        plt.axvline(x=switch_point, color='r', linestyle='--', alpha=0.7)

    # 添加任务切换区域的阴影
    for i in range(len(task_switch_points)):
        start = task_switch_points[i]
        end = task_switch_points[i + 1] if i < len(task_switch_points) - 1 else num_episodes
        plt.axvspan(start, start + 20, color='red', alpha=0.1)

    plt.xlabel('Episode')
    plt.ylabel('Reward')
    plt.title('Training Progress with Task Switching')
    plt.legend()
    plt.grid(True)
    plt.savefig('results/training_rewards.png')
    plt.close()

    # 绘制每个任务的奖励曲线
    plt.figure(figsize=(12, 6))
    for i, start_ep in enumerate([0] + task_switch_points):
        end_ep = task_switch_points[i] if i < len(task_switch_points) else num_episodes
        task_rewards = all_rewards[start_ep:end_ep]
        plt.plot(range(start_ep, end_ep), task_rewards, label=f'Task {i + 1}')

    plt.xlabel('Episode')
    plt.ylabel('Reward')
    plt.title('Reward per Task')
    plt.legend()
    plt.grid(True)
    plt.savefig('results/task_rewards.png')
    plt.close()

    # 最终测试
    print("\n===== 最终评估 =====")
    test_episodes = 5
    for ep in range(test_episodes):
        state = env.reset()
        total_reward = 0
        done = False
        step_count = 0

        while not done:
            action = agent.select_action(state, eval_mode=True)
            next_state, reward, done, info = env.step(action)
            state = next_state
            total_reward += reward
            step_count += 1

        print(f"Test Episode {ep + 1}: Reward: {total_reward:.2f}, Serviced Users: {info['serviced_users']}/8")
        # 保存测试轨迹图
        env.render(episode=f"test_{ep + 1}", step=step_count)


if __name__ == "__main__":
    train()