import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import random
from collections import deque
import os
import time

# 设置随机种子确保结果可复现
SEED = 42
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)

# 检查GPU可用性
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")

# 环境参数
AREA_SIZE = 100  # 区域大小 100m x 100m
NUM_USERS = 10  # 用户数量
MAX_STEPS = 200  # 每个episode的最大步数
MAX_DISTANCE_COLLECT = 15  # UAV可收集任务的最大距离 (增大收集范围)

# UAV参数
UAV_SPEED = 5.0  # UAV速度 (m/s)
UAV_ENERGY_PER_METER = 0.1  # 每米能耗
UAV_HOVER_ENERGY = 0.5  # 悬停能耗

# 任务参数
TASK_SIZE = [10, 50]  # 任务大小范围 (MB)
TASK_DELAY_WEIGHT = 0.6  # 延迟权重
ENERGY_WEIGHT = 0.4  # 能耗权重

# DDPG参数
ACTOR_LR = 3e-4
CRITIC_LR = 3e-4
GAMMA = 0.99
TAU = 0.005
BUFFER_SIZE = 100000
BATCH_SIZE = 256
EXPLORATION_NOISE = 0.3  # 增大探索噪声


class Environment:
    def __init__(self):
        # 初始化用户位置 (固定)
        self.user_positions = np.random.uniform(0, AREA_SIZE, size=(NUM_USERS, 2))

        # 初始化任务大小 (固定)
        self.task_sizes = np.random.uniform(TASK_SIZE[0], TASK_SIZE[1], size=NUM_USERS)

        # UAV初始位置 (使用float类型)
        self.uav_position = np.array([AREA_SIZE / 2, AREA_SIZE / 2], dtype=float)

        # 任务收集状态
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)

        # 步数计数器
        self.step_count = 0

        # 总延迟和能耗
        self.total_delay = 0
        self.total_energy = 0

        # 历史轨迹
        self.trajectory = [self.uav_position.copy()]

    def reset(self):
        # 重置UAV位置到中心点 (更合理的起点)
        self.uav_position = np.array([AREA_SIZE / 2, AREA_SIZE / 2], dtype=float)

        # 重置任务收集状态
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)

        # 重置步数
        self.step_count = 0

        # 重置总延迟和能耗
        self.total_delay = 0
        self.total_energy = 0

        # 重置轨迹
        self.trajectory = [self.uav_position.copy()]

        return self._get_state()

    def step(self, action):
        # 更新UAV位置 (action是相对移动，范围[-1,1])
        action = np.clip(action, -1, 1)
        movement = action * UAV_SPEED
        prev_position = self.uav_position.copy()
        self.uav_position += movement
        self.uav_position = np.clip(self.uav_position, 0, AREA_SIZE)

        # 记录轨迹
        self.trajectory.append(self.uav_position.copy())

        # 计算移动距离和能耗
        distance_moved = np.linalg.norm(self.uav_position - prev_position)
        energy_consumed = distance_moved * UAV_ENERGY_PER_METER

        # 收集任务
        newly_collected = 0
        collected_indices = []

        for i in range(NUM_USERS):
            if not self.collected_tasks[i]:
                dist_to_user = np.linalg.norm(self.uav_position - self.user_positions[i])
                if dist_to_user <= MAX_DISTANCE_COLLECT:
                    self.collected_tasks[i] = True
                    newly_collected += 1
                    collected_indices.append(i)

                    # 计算任务延迟 (与距离和任务大小成正比)
                    delay = dist_to_user * self.task_sizes[i] / 10
                    self.total_delay += delay

                    # 悬停能耗
                    energy_consumed += UAV_HOVER_ENERGY

        # 累计总能耗
        self.total_energy += energy_consumed

        # 更新步数
        self.step_count += 1

        # 计算奖励
        reward = self._calculate_reward(newly_collected, energy_consumed, collected_indices)

        # 判断是否结束
        done = (self.step_count >= MAX_STEPS) or np.all(self.collected_tasks)

        return self._get_state(), reward, done, {
            "collected": sum(self.collected_tasks),
            "energy": self.total_energy,
            "delay": self.total_delay,
            "newly_collected": newly_collected,
            "total_users": NUM_USERS
        }

    def _get_state(self):
        # 改进状态表示: UAV位置, 与每个用户的距离, 任务收集状态, 归一化步数
        state = np.zeros(2 + NUM_USERS * 2 + 1)

        # UAV位置 (归一化)
        state[0:2] = self.uav_position / AREA_SIZE

        # 与每个用户的距离和任务收集状态
        for i in range(NUM_USERS):
            dist = np.linalg.norm(self.uav_position - self.user_positions[i])
            state[2 + i * 2] = dist / np.sqrt(2 * AREA_SIZE ** 2)  # 归一化距离
            state[2 + i * 2 + 1] = float(self.collected_tasks[i])

        # 归一化步数
        state[-1] = self.step_count / MAX_STEPS

        return state

    def _calculate_reward(self, newly_collected, energy_consumed, collected_indices):
        # 基础任务收集奖励 (高奖励以激励任务收集)
        collection_reward = newly_collected * 20

        # 能耗惩罚 (适度惩罚，不要过高)
        energy_penalty = energy_consumed * 1.0

        # 任务完成进度奖励 (鼓励收集更多任务)
        collected_count = sum(self.collected_tasks)
        progress_reward = collected_count / NUM_USERS * 10

        # 接近未收集任务的奖励 (引导UAV接近任务)
        proximity_reward = 0
        for i in range(NUM_USERS):
            if not self.collected_tasks[i]:
                dist = np.linalg.norm(self.uav_position - self.user_positions[i])
                # 距离越近，奖励越高，但最大不超过MAX_DISTANCE_COLLECT
                if dist < MAX_DISTANCE_COLLECT * 2:
                    proximity_reward += (1 - dist / (MAX_DISTANCE_COLLECT * 2)) * 2

        # 全部完成奖励 (大幅奖励全部收集任务)
        completion_reward = 100 if collected_count == NUM_USERS else 0

        # 时间惩罚 (随着步数增加而增加，避免无效巡航)
        time_penalty = self.step_count * 0.1

        # 如果收集了新任务，减少时间惩罚
        if newly_collected > 0:
            time_penalty *= 0.5

        # 综合奖励
        reward = collection_reward + progress_reward + proximity_reward + completion_reward - energy_penalty - time_penalty

        return reward

    def render(self, episode=0, clear_output=True):
        """可视化当前环境状态"""
        plt.figure(figsize=(10, 10))

        # 绘制用户位置
        for i, pos in enumerate(self.user_positions):
            color = 'green' if self.collected_tasks[i] else 'red'
            plt.scatter(pos[0], pos[1], s=100, c=color)
            plt.annotate(f"{i + 1}", (pos[0], pos[1]), fontsize=12)

        # 绘制UAV当前位置和轨迹
        trajectory = np.array(self.trajectory)
        plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', alpha=0.5)
        plt.scatter(self.uav_position[0], self.uav_position[1], s=200, c='blue', marker='*')

        # 绘制收集范围
        circle = plt.Circle((self.uav_position[0], self.uav_position[1]),
                            MAX_DISTANCE_COLLECT, color='blue', fill=False, alpha=0.3)
        plt.gca().add_patch(circle)

        plt.xlim(0, AREA_SIZE)
        plt.ylim(0, AREA_SIZE)
        plt.title(f"轮次 {episode}, 步数 {self.step_count}, 已收集 {sum(self.collected_tasks)}/{NUM_USERS}")
        plt.grid(True)

        plt.savefig(f"results/step_{episode}_{self.step_count}.png")
        plt.close()


# Actor网络 (策略网络)
class Actor(nn.Module):
    def __init__(self, state_dim, action_dim, max_action):
        super(Actor, self).__init__()

        # 增大网络容量
        self.layer1 = nn.Linear(state_dim, 512)
        self.layer2 = nn.Linear(512, 256)
        self.layer3 = nn.Linear(256, 128)
        self.layer4 = nn.Linear(128, action_dim)

        self.max_action = max_action

        # 初始化网络权重
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0.0)

    def forward(self, state):
        x = torch.relu(self.layer1(state))
        x = torch.relu(self.layer2(x))
        x = torch.relu(self.layer3(x))
        action = torch.tanh(self.layer4(x))
        return self.max_action * action


# Critic网络 (价值网络)
class Critic(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(Critic, self).__init__()

        # Q1 架构
        self.q1_layer1 = nn.Linear(state_dim + action_dim, 512)
        self.q1_layer2 = nn.Linear(512, 256)
        self.q1_layer3 = nn.Linear(256, 128)
        self.q1_output = nn.Linear(128, 1)

        # Q2 架构 (双Q网络提高稳定性)
        self.q2_layer1 = nn.Linear(state_dim + action_dim, 512)
        self.q2_layer2 = nn.Linear(512, 256)
        self.q2_layer3 = nn.Linear(256, 128)
        self.q2_output = nn.Linear(128, 1)

        # 初始化网络权重
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0.01)

    def forward(self, state, action):
        x = torch.cat([state, action], dim=1)

        # Q1
        q1 = torch.relu(self.q1_layer1(x))
        q1 = torch.relu(self.q1_layer2(q1))
        q1 = torch.relu(self.q1_layer3(q1))
        q1 = self.q1_output(q1)

        # Q2
        q2 = torch.relu(self.q2_layer1(x))
        q2 = torch.relu(self.q2_layer2(q2))
        q2 = torch.relu(self.q2_layer3(q2))
        q2 = self.q2_output(q2)

        return q1, q2

    def Q1(self, state, action):
        x = torch.cat([state, action], dim=1)

        q1 = torch.relu(self.q1_layer1(x))
        q1 = torch.relu(self.q1_layer2(q1))
        q1 = torch.relu(self.q1_layer3(q1))
        q1 = self.q1_output(q1)

        return q1


class ReplayBuffer:
    def __init__(self, max_size=BUFFER_SIZE):
        self.buffer = deque(maxlen=max_size)

    def add(self, state, action, reward, next_state, done):
        self.buffer.append((state, action, reward, next_state, done))

    def sample(self, batch_size):
        batch = random.sample(self.buffer, min(len(self.buffer), batch_size))
        state, action, reward, next_state, done = map(np.stack, zip(*batch))
        return state, action, reward, next_state, done

    def __len__(self):
        return len(self.buffer)


class TD3:
    """Twin Delayed DDPG (TD3) - 比DDPG更稳定的算法"""

    def __init__(self, state_dim, action_dim, max_action):
        self.actor = Actor(state_dim, action_dim, max_action).to(device)
        self.actor_target = Actor(state_dim, action_dim, max_action).to(device)
        self.actor_target.load_state_dict(self.actor.state_dict())
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=ACTOR_LR)

        self.critic = Critic(state_dim, action_dim).to(device)
        self.critic_target = Critic(state_dim, action_dim).to(device)
        self.critic_target.load_state_dict(self.critic.state_dict())
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=CRITIC_LR)

        self.max_action = max_action
        self.memory = ReplayBuffer()

        # TD3特定参数
        self.policy_noise = 0.2 * max_action
        self.noise_clip = 0.5 * max_action
        self.policy_freq = 2
        self.total_it = 0

    def select_action(self, state, add_noise=True):
        state = torch.FloatTensor(state.reshape(1, -1)).to(device)
        action = self.actor(state).cpu().data.numpy().flatten()

        if add_noise:
            noise = np.random.normal(0, self.max_action * EXPLORATION_NOISE, size=action.shape)
            action = action + noise

        return np.clip(action, -self.max_action, self.max_action)

    def train(self):
        self.total_it += 1

        if len(self.memory) < BATCH_SIZE:
            return

        # 从经验回放中采样
        state, action, reward, next_state, done = self.memory.sample(BATCH_SIZE)

        state = torch.FloatTensor(state).to(device)
        action = torch.FloatTensor(action).to(device)
        reward = torch.FloatTensor(reward.reshape(-1, 1)).to(device)
        next_state = torch.FloatTensor(next_state).to(device)
        done = torch.FloatTensor(done.reshape(-1, 1)).to(device)

        with torch.no_grad():
            # 添加目标策略平滑
            noise = torch.FloatTensor(action.shape).data.normal_(0, self.policy_noise).to(device)
            noise = noise.clamp(-self.noise_clip, self.noise_clip)

            next_action = (self.actor_target(next_state) + noise).clamp(-self.max_action, self.max_action)

            # 计算目标Q值
            target_q1, target_q2 = self.critic_target(next_state, next_action)
            target_q = torch.min(target_q1, target_q2)
            target_q = reward + (1 - done) * GAMMA * target_q

        # 计算当前Q值
        current_q1, current_q2 = self.critic(state, action)

        # 计算Critic损失
        critic_loss = nn.MSELoss()(current_q1, target_q) + nn.MSELoss()(current_q2, target_q)

        # 优化Critic
        self.critic_optimizer.zero_grad()
        critic_loss.backward()
        self.critic_optimizer.step()

        # 延迟策略更新
        if self.total_it % self.policy_freq == 0:
            # 计算Actor损失
            actor_loss = -self.critic.Q1(state, self.actor(state)).mean()

            # 优化Actor
            self.actor_optimizer.zero_grad()
            actor_loss.backward()
            self.actor_optimizer.step()

            # 更新目标网络
            for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)

            for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)


def train():
    """训练TD3智能体"""

    # 创建保存结果的目录
    os.makedirs("results", exist_ok=True)

    # 初始化环境
    env = Environment()

    # 计算状态和动作维度
    state_dim = 2 + NUM_USERS * 2 + 1
    action_dim = 2
    max_action = 1

    # 初始化TD3智能体
    agent = TD3(state_dim, action_dim, max_action)

    # 训练参数
    max_episodes = 1000
    eval_freq = 50

    # 记录训练过程
    rewards_history = []
    collection_history = []
    energy_history = []
    best_reward = -float('inf')
    best_collection = 0

    start_time = time.time()

    for episode in range(1, max_episodes + 1):
        state = env.reset()
        episode_reward = 0
        last_collection = 0

        for step in range(1, MAX_STEPS + 1):
            # 选择动作
            action = agent.select_action(state)

            # 执行动作
            next_state, reward, done, info = env.step(action)

            # 存储经验
            agent.memory.add(state, action, reward, next_state, done)

            # 训练智能体
            agent.train()

            # 更新状态和累计奖励
            state = next_state
            episode_reward += reward
            last_collection = info["collected"]

            # 每隔一定步数可视化当前状态
            if episode % eval_freq == 0 and step % 20 == 0:
                env.render(episode)

            if done:
                break

        # 记录历史数据
        rewards_history.append(episode_reward)
        collection_history.append(last_collection)
        energy_history.append(info["energy"])

        # 更新最佳结果
        if last_collection > best_collection or (last_collection == best_collection and episode_reward > best_reward):
            best_reward = episode_reward
            best_collection = last_collection

            # 保存最佳模型
            torch.save(agent.actor.state_dict(), "results/best_actor.pth")

        # 打印训练信息
        elapsed_time = time.time() - start_time
        print(f"轮次: {episode}/{max_episodes} | "
              f"收集任务: {last_collection}/{NUM_USERS} | "
              f"奖励: {episode_reward:.2f} | "
              f"能耗: {info['energy']:.2f} | "
              f"步数: {env.step_count} | "
              f"用时: {elapsed_time:.2f}秒")

        # 每隔一定轮次生成图表
        if episode % eval_freq == 0:
            plt.figure(figsize=(15, 5))

            # 奖励曲线
            plt.subplot(1, 3, 1)
            plt.plot(rewards_history)
            plt.title("累计奖励")
            plt.xlabel("训练轮次")
            plt.ylabel("奖励")

            # 收集任务数量曲线
            plt.subplot(1, 3, 2)
            plt.plot(collection_history)
            plt.title("收集任务数量")
            plt.xlabel("训练轮次")
            plt.ylabel("任务数量")
            plt.ylim(0, NUM_USERS + 1)

            # 能耗曲线
            plt.subplot(1, 3, 3)
            plt.plot(energy_history)
            plt.title("总能耗")
            plt.xlabel("训练轮次")
            plt.ylabel("能耗")

            plt.tight_layout()
            plt.savefig(f"results/training_curves_episode_{episode}.png")
            plt.close()

            # 保存当前模型
            torch.save(agent.actor.state_dict(), f"results/actor_episode_{episode}.pth")

    print(f"训练完成! 最佳结果: 收集 {best_collection}/{NUM_USERS} 任务, 奖励 {best_reward:.2f}")
    return agent, env


def test_and_visualize(agent, env, model_path="results/best_actor.pth"):
    """测试训练好的模型并生成可视化结果"""

    # 加载最佳模型
    agent.actor.load_state_dict(torch.load(model_path))
    agent.actor.eval()

    # 重置环境
    state = env.reset()

    total_reward = 0
    step_rewards = []

    # 记录测试过程
    trajectory = [env.uav_position.copy()]
    collection_times = np.zeros(NUM_USERS)

    for step in range(1, MAX_STEPS + 1):
        # 选择动作 (不添加噪声)
        action = agent.select_action(state, add_noise=False)

        # 记录UAV位置
        trajectory.append(env.uav_position.copy())

        # 记录本次收集的任务
        collected_before = env.collected_tasks.copy()

        # 执行动作
        next_state, reward, done, info = env.step(action)

        # 更新收集时间
        for i in range(NUM_USERS):
            if env.collected_tasks[i] and not collected_before[i]:
                collection_times[i] = step

        # 累计奖励
        total_reward += reward
        step_rewards.append(reward)

        # 更新状态
        state = next_state

        # 可视化当前状态
        if step % 5 == 0 or done:
            env.render(step)

        if done:
            break

    # 转换为numpy数组便于绘图
    trajectory = np.array(trajectory)

    # 绘制完整轨迹图
    plt.figure(figsize=(12, 10))

    # 绘制用户位置
    for i, (x, y) in enumerate(env.user_positions):
        color = 'green' if env.collected_tasks[i] else 'red'
        plt.scatter(x, y, s=150, c=color, marker='o')

        # 标注用户编号和收集时间
        if env.collected_tasks[i]:
            plt.annotate(f"用户{i + 1}\n(步骤{int(collection_times[i])})",
                         (x, y), textcoords="offset points",
                         xytext=(0, 10), ha='center', fontsize=10)
        else:
            plt.annotate(f"用户{i + 1}\n(未收集)",
                         (x, y), textcoords="offset points",
                         xytext=(0, 10), ha='center', fontsize=10)

    # 绘制UAV轨迹
    plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', label='UAV轨迹', alpha=0.7)

    # 标注起点和终点
    plt.scatter(trajectory[0, 0], trajectory[0, 1], s=200, c='blue', marker='^', label='起点')
    plt.scatter(trajectory[-1, 0], trajectory[-1, 1], s=200, c='purple', marker='*', label='终点')

    # 每隔一定步数标记UAV位置
    for i in range(0, len(trajectory), 10):
        plt.annotate(f"{i}", (trajectory[i, 0], trajectory[i, 1]),
                     fontsize=8, ha='center', va='center',
                     bbox=dict(boxstyle="circle,pad=0.2", fc="white", alpha=0.7))

    # 为收集到的任务绘制连接线
    for i in range(NUM_USERS):
        if env.collected_tasks[i]:
            # 找到收集该任务时UAV的位置
            step = int(collection_times[i])
            if step < len(trajectory):
                uav_pos = trajectory[step]
                plt.plot([uav_pos[0], env.user_positions[i, 0]],
                         [uav_pos[1], env.user_positions[i, 1]],
                         'g--', alpha=0.5)

    plt.title(f"UAV任务收集轨迹 (收集 {sum(env.collected_tasks)}/{NUM_USERS} 任务, 总步数: {env.step_count})")
    plt.xlabel("X坐标 (m)")
    plt.ylabel("Y坐标 (m)")
    plt.grid(True)
    plt.legend()
    plt.xlim(0, AREA_SIZE)
    plt.ylim(0, AREA_SIZE)
    plt.savefig("results/final_uav_trajectory.png")
    plt.close()

    # 绘制奖励曲线
    plt.figure(figsize=(15, 5))

    plt.subplot(1, 2, 1)
    plt.plot(step_rewards)
    plt.title("每步奖励")
    plt.xlabel("步数")
    plt.ylabel("奖励")
    plt.grid(True)

    plt.subplot(1, 2, 2)
    plt.plot(np.cumsum(step_rewards))
    plt.title("累计奖励")
    plt.xlabel("步数")
    plt.ylabel("累计奖励")
    plt.grid(True)

    plt.tight_layout()
    plt.savefig("results/test_rewards.png")
    plt.close()

    print("\n测试结果:")
    print(f"收集任务: {sum(env.collected_tasks)}/{NUM_USERS} ({sum(env.collected_tasks) / NUM_USERS * 100:.1f}%)")
    print(f"总奖励: {total_reward:.2f}")
    print(f"总能耗: {info['energy']:.2f}")
    print(f"总延迟: {info['delay']:.2f}")
    print(f"总步数: {env.step_count}")

    # 输出任务收集详情
    print("\n任务收集详情:")
    for i in range(NUM_USERS):
        status = f"步骤 {int(collection_times[i])}" if env.collected_tasks[i] else "未收集"
        print(f"用户 {i + 1}: {status}")


if __name__ == "__main__":
    # 训练智能体
    agent, env = train()

    # 测试并可视化结果
    test_and_visualize(agent, env)
