import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
import random
from collections import deque
import copy
import time
import os

# 设置中文显示
try:
    font = FontProperties(fname=r"c:\windows\fonts\simhei.ttf", size=12)
    plt.rcParams['font.sans-serif'] = ['SimHei']
    plt.rcParams['axes.unicode_minus'] = False
except:
    print("警告: 无法加载中文字体，将使用默认字体")

# 检查GPU可用性
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")

# 环境参数
AREA_SIZE = 100
NUM_USERS = 10
MAX_STEPS = 200  # 减少每轮最大步数以加快训练
COLLECTION_RADIUS = 7  # 增大收集半径
MAX_SPEED = 3.0  # 提高最大速度
ENERGY_COEF = 0.05  # 降低能耗惩罚系数
DELAY_COEF = 0.8  # 提高延迟惩罚系数

# TD3参数
ACTOR_LR = 0.001
CRITIC_LR = 0.002
GAMMA = 0.99
TAU = 0.005
BUFFER_SIZE = 100000
BATCH_SIZE = 256  # 增大批处理大小
POLICY_NOISE = 0.2
NOISE_CLIP = 0.5
POLICY_FREQ = 2  # 延迟策略更新


class MECEnvironment:
    def __init__(self):
        # 随机生成用户位置 (训练中固定不变)
        self.user_positions = np.random.rand(NUM_USERS, 2) * AREA_SIZE
        self.reset()

    def reset(self):
        # UAV初始位置在区域中心
        self.uav_position = np.array([AREA_SIZE / 2, AREA_SIZE / 2])
        # 任务状态: 0未收集, 1已收集
        self.task_status = np.zeros(NUM_USERS, dtype=int)
        self.collected_count = 0
        self.steps = 0
        # 状态: UAV位置(2) + 任务状态(10)
        return self._get_state()

    def _get_state(self):
        return np.concatenate([self.uav_position, self.task_status])

    def step(self, action):
        # 解析动作 (速度向量)
        velocity = np.clip(action, -1, 1) * MAX_SPEED

        # 更新UAV位置
        new_position = self.uav_position + velocity
        # 确保UAV在区域内
        self.uav_position = np.clip(new_position, 0, AREA_SIZE)

        # 检查任务收集
        reward = 0
        collected_this_step = 0

        for i in range(NUM_USERS):
            if self.task_status[i] == 0:
                distance = np.linalg.norm(self.uav_position - self.user_positions[i])
                if distance < COLLECTION_RADIUS:
                    self.task_status[i] = 1
                    self.collected_count += 1
                    collected_this_step += 1
                    # 距离越近奖励越高
                    proximity_reward = max(0, (COLLECTION_RADIUS - distance) / COLLECTION_RADIUS) * 5
                    reward += 15 + proximity_reward  # 收集任务奖励

        # 额外奖励：当收集到新任务时
        if collected_this_step > 0:
            reward += collected_this_step * 10

        # 延迟惩罚 (未收集的任务)
        delay_penalty = DELAY_COEF * (NUM_USERS - self.collected_count)

        # 能耗惩罚 (与速度平方成正比)
        energy_penalty = ENERGY_COEF * np.sum(velocity ** 2)

        # 总奖励
        reward = reward - delay_penalty - energy_penalty

        # 完成所有任务的额外奖励
        if self.collected_count >= NUM_USERS:
            reward += 200 * (MAX_STEPS - self.steps) / MAX_STEPS

        # 终止条件
        done = (self.collected_count >= NUM_USERS) or (self.steps >= MAX_STEPS)

        self.steps += 1
        next_state = self._get_state()

        return next_state, reward, done, self.collected_count


# Actor网络 (策略网络)
class Actor(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(Actor, self).__init__()
        self.net = nn.Sequential(
            nn.Linear(state_dim, 256),
            nn.ReLU(),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Linear(128, 64),
            nn.ReLU(),
            nn.Linear(64, action_dim),
            nn.Tanh()
        )

    def forward(self, state):
        return self.net(state)


# Critic网络 (Q值网络)
class Critic(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(Critic, self).__init__()
        # Q1网络
        self.l1 = nn.Linear(state_dim + action_dim, 256)
        self.l2 = nn.Linear(256, 128)
        self.l3 = nn.Linear(128, 64)
        self.l4 = nn.Linear(64, 1)

        # Q2网络
        self.l5 = nn.Linear(state_dim + action_dim, 256)
        self.l6 = nn.Linear(256, 128)
        self.l7 = nn.Linear(128, 64)
        self.l8 = nn.Linear(64, 1)

    def forward(self, state, action):
        sa = torch.cat([state, action], 1)

        q1 = torch.relu(self.l1(sa))
        q1 = torch.relu(self.l2(q1))
        q1 = torch.relu(self.l3(q1))
        q1 = self.l4(q1)

        q2 = torch.relu(self.l5(sa))
        q2 = torch.relu(self.l6(q2))
        q2 = torch.relu(self.l7(q2))
        q2 = self.l8(q2)

        return q1, q2

    def Q1(self, state, action):
        sa = torch.cat([state, action], 1)
        q1 = torch.relu(self.l1(sa))
        q1 = torch.relu(self.l2(q1))
        q1 = torch.relu(self.l3(q1))
        q1 = self.l4(q1)
        return q1


# TD3智能体
class TD3Agent:
    def __init__(self, state_dim, action_dim):
        # 主网络
        self.actor = Actor(state_dim, action_dim).to(device)
        self.actor_target = copy.deepcopy(self.actor)
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=ACTOR_LR)

        self.critic = Critic(state_dim, action_dim).to(device)
        self.critic_target = copy.deepcopy(self.critic)
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=CRITIC_LR)

        # 经验回放
        self.replay_buffer = deque(maxlen=BUFFER_SIZE)

        self.total_it = 0

        # 高斯噪声
        self.noise = GaussianNoise(action_dim, sigma=0.2)

    def select_action(self, state, explore=True):
        state = torch.FloatTensor(state).unsqueeze(0).to(device)
        action = self.actor(state).cpu().data.numpy().flatten()
        if explore:
            action = self.noise.get_action(action)
        return np.clip(action, -1, 1)

    def remember(self, state, action, reward, next_state, done):
        self.replay_buffer.append((state, action, reward, next_state, done))

    def train(self):
        self.total_it += 1

        if len(self.replay_buffer) < BATCH_SIZE:
            return 0.0, 0.0  # 返回浮点数而不是整数

        # 从回放缓冲区采样
        batch = random.sample(self.replay_buffer, BATCH_SIZE)
        states, actions, rewards, next_states, dones = zip(*batch)

        # 转换为numpy数组避免警告
        states = np.array(states)
        actions = np.array(actions)
        rewards = np.array(rewards)
        next_states = np.array(next_states)
        dones = np.array(dones)

        states = torch.FloatTensor(states).to(device)
        actions = torch.FloatTensor(actions).to(device)
        rewards = torch.FloatTensor(rewards).unsqueeze(1).to(device)
        next_states = torch.FloatTensor(next_states).to(device)
        dones = torch.FloatTensor(dones).unsqueeze(1).to(device)

        with torch.no_grad():
            # 选择目标动作并添加噪声
            noise = (
                    torch.randn_like(actions) * POLICY_NOISE
            ).clamp(-NOISE_CLIP, NOISE_CLIP).to(device)

            next_actions = (self.actor_target(next_states) + noise).clamp(-1, 1)

            # 计算目标Q值
            target_Q1, target_Q2 = self.critic_target(next_states, next_actions)
            target_Q = torch.min(target_Q1, target_Q2)
            target_Q = rewards + (1 - dones) * GAMMA * target_Q

        # 更新Critic
        current_Q1, current_Q2 = self.critic(states, actions)
        critic_loss = nn.MSELoss()(current_Q1, target_Q) + nn.MSELoss()(current_Q2, target_Q)

        self.critic_optimizer.zero_grad()
        critic_loss.backward()
        self.critic_optimizer.step()

        actor_loss = 0.0
        # 延迟策略更新
        if self.total_it % POLICY_FREQ == 0:
            # 更新Actor
            actor_loss_val = -self.critic.Q1(states, self.actor(states)).mean()

            self.actor_optimizer.zero_grad()
            actor_loss_val.backward()
            self.actor_optimizer.step()

            # 软更新目标网络
            for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)

            for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)

            actor_loss = actor_loss_val.item()

        return critic_loss.item(), actor_loss


# 高斯噪声
class GaussianNoise:
    def __init__(self, action_dim, sigma=0.1):
        self.action_dim = action_dim
        self.sigma = sigma

    def get_action(self, action):
        noise = np.random.normal(0, self.sigma, self.action_dim)
        return action + noise


# 训练函数
def train_td3(episodes=1000):
    env = MECEnvironment()
    state_dim = 2 + NUM_USERS  # UAV位置 + 任务状态
    action_dim = 2  # 速度向量

    agent = TD3Agent(state_dim, action_dim)
    episode_rewards = []
    collected_tasks_history = []
    avg_rewards = []
    avg_collected = []

    # 创建保存目录
    os.makedirs("td3_results", exist_ok=True)

    best_score = -np.inf
    best_episode = 0

    start_time = time.time()

    for episode in range(episodes):
        state = env.reset()
        total_reward = 0
        collected_tasks = 0

        while True:
            action = agent.select_action(state)
            next_state, reward, done, collected = env.step(action)

            agent.remember(state, action, reward, next_state, done)
            critic_loss, actor_loss = agent.train()

            state = next_state
            total_reward += reward
            collected_tasks = collected

            if done:
                break

        episode_rewards.append(total_reward)
        collected_tasks_history.append(collected_tasks)

        # 保存最佳模型
        if collected_tasks > best_score:
            best_score = collected_tasks
            best_episode = episode
            torch.save(agent.actor.state_dict(), f"td3_results/best_actor_{best_score}.pth")
            torch.save(agent.critic.state_dict(), f"td3_results/best_critic_{best_score}.pth")

        # 每100轮计算平均
        if episode % 100 == 0 or episode == episodes - 1:
            avg_reward = np.mean(episode_rewards[-min(100, len(episode_rewards)):])
            avg_collect = np.mean(collected_tasks_history[-min(100, len(collected_tasks_history)):])
            avg_rewards.append(avg_reward)
            avg_collected.append(avg_collect)

            print(f"轮次: {episode + 1}/{episodes}, 平均奖励: {avg_reward:.2f}, "
                  f"平均收集任务: {avg_collect:.2f}/{NUM_USERS}, "
                  f"最佳收集: {best_score}/{NUM_USERS} (轮次 {best_episode + 1})")

        # 每轮打印
        print(f"轮次: {episode + 1}/{episodes}, 奖励: {total_reward:.2f}, "
              f"收集任务: {collected_tasks}/{NUM_USERS}, "
              f"步数: {env.steps}")

    training_time = time.time() - start_time
    print(f"训练完成! 总时间: {training_time:.2f}秒")

    # 保存最终模型
    torch.save(agent.actor.state_dict(), "td3_results/final_actor.pth")
    torch.save(agent.critic.state_dict(), "td3_results/final_critic.pth")

    return episode_rewards, collected_tasks_history, env.user_positions, avg_rewards, avg_collected


# 测试并绘制轨迹
def test_and_plot_trajectory(user_positions, model_path="td3_results/best_actor_10.pth"):
    env = MECEnvironment()
    env.user_positions = user_positions  # 使用训练时的用户位置

    state_dim = 2 + NUM_USERS
    action_dim = 2

    # 加载训练好的模型
    actor = Actor(state_dim, action_dim).to(device)
    if os.path.exists(model_path):
        actor.load_state_dict(torch.load(model_path))
    else:
        print(f"警告: 模型文件 {model_path} 不存在，使用随机模型")
    actor.eval()

    state = env.reset()
    trajectory = [env.uav_position.copy()]
    collected_history = [0]

    while True:
        with torch.no_grad():
            action = actor(torch.FloatTensor(state).unsqueeze(0).to(device)).cpu().data.numpy().flatten()

        next_state, _, done, collected = env.step(action)

        trajectory.append(env.uav_position.copy())
        collected_history.append(collected)
        state = next_state

        if done:
            break

    # 绘制轨迹
    plt.figure(figsize=(12, 10))
    plt.title(f"UAV飞行轨迹与服务用户 (收集{collected}/{NUM_USERS})", fontsize=16)

    # 绘制用户位置
    plt.scatter(user_positions[:, 0], user_positions[:, 1], c='red', s=100, label="用户位置")

    # 绘制轨迹
    trajectory = np.array(trajectory)
    plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', linewidth=1.5, label="UAV轨迹")
    plt.scatter(trajectory[0, 0], trajectory[0, 1], c='green', s=150, marker='s', label="起始点")
    plt.scatter(trajectory[-1, 0], trajectory[-1, 1], c='purple', s=150, marker='*', label="终点")

    # 添加收集点标记
    for i, user in enumerate(user_positions):
        plt.text(user[0] + 2, user[1] - 2, f'用户{i + 1}')

    # 标记收集点
    collected_positions = user_positions[:collected]
    plt.scatter(collected_positions[:, 0], collected_positions[:, 1],
                s=200, facecolors='none', edgecolors='g', linewidths=2,
                label=f"已收集({collected})")

    plt.xlabel("X坐标 (m)")
    plt.ylabel("Y坐标 (m)")
    plt.legend(loc='upper right')
    plt.grid(True)
    plt.xlim(0, AREA_SIZE)
    plt.ylim(0, AREA_SIZE)
    plt.savefig("td3_results/uav_trajectory.png")
    plt.show()

    # 绘制收集过程
    plt.figure(figsize=(10, 6))
    plt.plot(collected_history, 'g-', linewidth=2)
    plt.title("任务收集过程")
    plt.xlabel("步数")
    plt.ylabel("已收集任务数量")
    plt.grid(True)
    plt.savefig("td3_results/task_collection_process.png")
    plt.show()

    return collected


# 主程序
if __name__ == "__main__":
    # 训练模型
    rewards, collected_history, user_positions, avg_rewards, avg_collected = train_td3(episodes=1000)

    # 绘制奖励曲线
    plt.figure(figsize=(12, 8))
    plt.subplot(2, 1, 1)
    plt.plot(rewards)
    plt.title("训练奖励曲线")
    plt.xlabel("训练轮次")
    plt.ylabel("累计奖励")
    plt.grid(True)

    plt.subplot(2, 1, 2)
    plt.plot(avg_rewards, 'r-', linewidth=2)
    plt.title("平均奖励曲线 (每100轮)")
    plt.xlabel("每100轮")
    plt.ylabel("平均奖励")
    plt.grid(True)
    plt.tight_layout()
    plt.savefig("td3_results/training_rewards.png")
    plt.show()

    # 绘制任务收集曲线
    plt.figure(figsize=(12, 8))
    plt.subplot(2, 1, 1)
    plt.plot(collected_history)
    plt.title("收集任务数量变化")
    plt.xlabel("训练轮次")
    plt.ylabel("收集任务数量")
    plt.grid(True)

    plt.subplot(2, 1, 2)
    plt.plot(avg_collected, 'r-', linewidth=2)
    plt.title("平均收集任务数量 (每100轮)")
    plt.xlabel("每100轮")
    plt.ylabel("平均收集数量")
    plt.grid(True)
    plt.tight_layout()
    plt.savefig("td3_results/collected_tasks.png")
    plt.show()

    # 测试并绘制轨迹
    final_collected = test_and_plot_trajectory(user_positions)
    print(f"最终收集任务数量: {final_collected}/{NUM_USERS}")

    # 性能报告
    final_avg_collected = np.mean(collected_history[-100:])
    max_collected = max(collected_history)
    success_rate = sum(np.array(collected_history) == NUM_USERS) / len(collected_history) * 100

    print("\n性能报告:")
    print(f"最终100轮平均收集任务: {final_avg_collected:.2f}/{NUM_USERS}")
    print(f"最大收集任务: {max_collected}/{NUM_USERS}")
    print(f"完全收集成功率: {success_rate:.2f}%")