import torch
import torch.nn as nn
from torch.distributions import Categorical
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import os
import time

# --- 超参数设置 (保持不变) ---
AREA_SIZE = 100.0;
NUM_TASKS = 5;
MAX_STEPS = 200;
COLLECTION_RADIUS = 10.0;
UAV_SPEED = 5.0
LEARNING_RATE = 3e-4;
GAMMA = 0.99;
GAE_LAMBDA = 0.95;
PPO_CLIP = 0.2;
PPO_EPOCHS = 10;
UPDATE_TIMESTEP = 4096
TOTAL_EPISODES = 5000;
PHASE1_EPISODES = 1000;
PHASE2_EPISODES = 3000;
PHASE2_NOISE_LEVEL = 15.0
SAVE_FREQ = 200
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")


# --- 环境 (保持不变) ---
class CurriculumUAVEnv:
    def __init__(self, fixed_seed=42):
        self.area_size = AREA_SIZE;
        self.num_tasks = NUM_TASKS;
        self.max_steps = MAX_STEPS
        self.action_space_dim = 9;
        self.state_space_dim = self.num_tasks * 3
        self.action_vectors = self._create_action_vectors()
        self.fixed_seed = fixed_seed;
        np.random.seed(self.fixed_seed)
        self.base_task_positions = np.random.uniform(0, self.area_size, size=(self.num_tasks, 2))

    def _create_action_vectors(self):
        angles = np.linspace(0, 2 * np.pi, 8, endpoint=False)
        vectors = np.array([[np.cos(ang), np.sin(ang)] for ang in angles]) * UAV_SPEED
        return np.vstack([np.array([[0, 0]]), vectors])

    def reset(self, episode):
        self.uav_pos = np.array([self.area_size / 2, self.area_size / 2])
        self.collected_status = np.zeros(self.num_tasks, dtype=bool);
        self.current_step = 0
        if episode < PHASE1_EPISODES:
            self.task_positions = self.base_task_positions.copy()
        elif episode < PHASE2_EPISODES:
            noise = np.random.uniform(-PHASE2_NOISE_LEVEL, PHASE2_NOISE_LEVEL, size=(self.num_tasks, 2))
            self.task_positions = np.clip(self.base_task_positions + noise, 0, self.area_size)
        else:
            self.task_positions = np.random.uniform(0, self.area_size, size=(self.num_tasks, 2))
        return self._get_state()

    def _get_state(self):
        state_list = []
        for i in range(self.num_tasks):
            relative_pos = (self.task_positions[i] - self.uav_pos) / self.area_size
            state_list.extend([relative_pos[0], relative_pos[1], float(self.collected_status[i])])
        return np.array(state_list)

    def step(self, action):
        self.uav_pos += self.action_vectors[action]
        self.uav_pos = np.clip(self.uav_pos, 0, self.area_size);
        self.current_step += 1
        reward, newly_collected = 0, 0
        for i in range(self.num_tasks):
            if not self.collected_status[i] and np.linalg.norm(
                    self.uav_pos - self.task_positions[i]) <= COLLECTION_RADIUS:
                self.collected_status[i] = True;
                newly_collected += 1
        if newly_collected > 0: reward += newly_collected * 25
        reward -= 0.1;
        done = False
        if np.all(self.collected_status): reward += 100; done = True
        if self.current_step >= self.max_steps: done = True
        return self._get_state(), reward, done, {}


# --- 【核心修正】修复ActorCritic网络中的BUG ---
class ActorCritic(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(ActorCritic, self).__init__()
        shared_layers = nn.Sequential(nn.Linear(state_dim, 256), nn.Tanh(), nn.Linear(256, 256), nn.Tanh())
        self.actor = nn.Sequential(shared_layers, nn.Linear(256, action_dim))
        self.critic = nn.Sequential(shared_layers, nn.Linear(256, 1))

    def act(self, state):
        """
        【已修正】修复了之前版本中灾难性的BUG
        """
        action_logits = self.actor(state)
        dist = Categorical(logits=action_logits)

        # 1. 先采样一个动作
        action = dist.sample()
        # 2. 然后计算这同一个动作的对数概率
        action_log_prob = dist.log_prob(action)

        return action.detach(), action_log_prob.detach()

    def evaluate(self, state, action):
        action_logits = self.actor(state)
        dist = Categorical(logits=action_logits)
        action_log_prob = dist.log_prob(action)
        dist_entropy = dist.entropy()
        state_value = self.critic(state)
        return action_log_prob, torch.squeeze(state_value), dist_entropy


# --- PPO Agent和Buffer (保持不变) ---
class RolloutBuffer:
    def __init__(self): self.clear()

    def clear(self): self.actions, self.states, self.logprobs, self.rewards, self.is_terminals = [], [], [], [], []


class PPOAgent:
    def __init__(self, state_dim, action_dim):
        self.buffer = RolloutBuffer()
        self.policy = ActorCritic(state_dim, action_dim).to(device)
        self.optimizer = optim.Adam(self.policy.parameters(), lr=LEARNING_RATE, eps=1e-5)
        self.policy_old = ActorCritic(state_dim, action_dim).to(device)
        self.policy_old.load_state_dict(self.policy.state_dict())
        self.MseLoss = nn.MSELoss()

    def select_action(self, state):
        with torch.no_grad():
            state = torch.FloatTensor(state).to(device)
            action, action_logprob = self.policy_old.act(state)
        self.buffer.states.append(state)
        self.buffer.actions.append(action)
        self.buffer.logprobs.append(action_logprob)
        return action.item()

    def select_deterministic_action(self, state):
        with torch.no_grad():
            state = torch.FloatTensor(state).to(device)
            action = torch.argmax(self.policy_old.actor(state)).item()
        return action

    def update(self):
        rewards = []
        discounted_reward = 0
        for reward, is_terminal in zip(reversed(self.buffer.rewards), reversed(self.buffer.is_terminals)):
            if is_terminal: discounted_reward = 0
            discounted_reward = reward + (GAMMA * discounted_reward)
            rewards.insert(0, discounted_reward)
        rewards = torch.tensor(rewards, dtype=torch.float32).to(device)
        rewards = (rewards - rewards.mean()) / (rewards.std() + 1e-7)
        old_states = torch.squeeze(torch.stack(self.buffer.states, dim=0)).detach().to(device)
        old_actions = torch.squeeze(torch.stack(self.buffer.actions, dim=0)).detach().to(device)
        old_logprobs = torch.squeeze(torch.stack(self.buffer.logprobs, dim=0)).detach().to(device)
        for _ in range(PPO_EPOCHS):
            logprobs, state_values, dist_entropy = self.policy.evaluate(old_states, old_actions)
            ratios = torch.exp(logprobs - old_logprobs.detach())
            advantages = rewards - state_values.detach()
            surr1 = ratios * advantages
            surr2 = torch.clamp(ratios, 1 - PPO_CLIP, 1 + PPO_CLIP) * advantages
            loss = -torch.min(surr1, surr2) + 0.5 * self.MseLoss(state_values, rewards) - 0.01 * dist_entropy
            self.optimizer.zero_grad()
            loss.mean().backward()
            self.optimizer.step()
        self.policy_old.load_state_dict(self.policy.state_dict())
        self.buffer.clear()


# --- 绘图与主循环 (保持不变) ---
def plot_rewards(episode_rewards, i_episode):
    plt.figure(figsize=(12, 6));
    plt.plot(episode_rewards, label='每轮奖励', alpha=0.3)
    moving_avg = np.convolve(episode_rewards, np.ones(100) / 100, mode='valid')
    plt.plot(np.arange(len(moving_avg)) + 99, moving_avg, color='red', linewidth=2, label='100轮移动平均')
    plt.axvline(x=PHASE1_EPISODES, color='green', linestyle='--', label='阶段1结束');
    plt.axvline(x=PHASE2_EPISODES, color='orange', linestyle='--', label='阶段2结束')
    plt.title(f"Episode {i_episode} 奖励变化 (修正版)", fontsize=16);
    plt.xlabel("Episode");
    plt.ylabel("总奖励");
    plt.grid(True);
    plt.legend()
    plt.savefig(f"results/reward_curve_episode_{i_episode}.png");
    plt.close()


def test_and_visualize_trajectory(agent, env, i_episode):
    print(f"--- 生成第 {i_episode} 轮的轨迹图中... ---")
    state = env.reset(i_episode);
    initial_task_positions = env.task_positions.copy();
    trajectory = [env.uav_pos.copy()]
    for _ in range(env.max_steps):
        action = agent.select_deterministic_action(state);
        state, _, done, _ = env.step(action);
        trajectory.append(env.uav_pos.copy())
        if done: break
    trajectory = np.array(trajectory);
    plt.figure(figsize=(10, 10))
    plt.scatter(initial_task_positions[:, 0], initial_task_positions[:, 1], c='red', s=100, marker='x', label='任务点');
    plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', label='UAV轨迹')
    plt.scatter(trajectory[0, 0], trajectory[0, 1], c='green', s=150, marker='o', label='起点');
    plt.scatter(trajectory[-1, 0], trajectory[-1, 1], c='purple', s=150, marker='*', label='终点')
    plt.title(f"Episode {i_episode} 学习策略轨迹 (修正版)", fontsize=16);
    plt.xlabel("X");
    plt.ylabel("Y");
    plt.xlim(0, env.area_size);
    plt.ylim(0, env.area_size)
    plt.grid(True);
    plt.legend();
    plt.gca().set_aspect('equal', adjustable='box');
    plt.savefig(f"results/trajectory_episode_{i_episode}.png");
    plt.close()


if __name__ == "__main__":
    os.makedirs("results", exist_ok=True);
    env = CurriculumUAVEnv();
    agent = PPOAgent(state_dim=env.state_space_dim, action_dim=env.action_space_dim)
    episode_rewards, time_step = [], 0;
    print("开始训练 (PPO课程学习-最终修正版)...");
    start_time = time.time()
    for i_episode in range(1, TOTAL_EPISODES + 1):
        state = env.reset(i_episode);
        current_episode_reward = 0
        for t in range(MAX_STEPS):
            time_step += 1;
            action = agent.select_action(state);
            state, reward, done, _ = env.step(action)
            agent.buffer.rewards.append(reward);
            agent.buffer.is_terminals.append(done);
            current_episode_reward += reward
            if len(agent.buffer.states) >= UPDATE_TIMESTEP: agent.update()
            if done: break
        episode_rewards.append(current_episode_reward)
        if i_episode % 20 == 0:
            avg_reward = np.mean(episode_rewards[-100:])
            print(
                f"Episode {i_episode}/{TOTAL_EPISODES} | Timestep {time_step} | Avg Reward (last 100): {avg_reward:.2f} | Time: {time.time() - start_time:.1f}s")
        if i_episode % SAVE_FREQ == 0 or i_episode == TOTAL_EPISODES:
            plot_rewards(episode_rewards, i_episode);
            test_and_visualize_trajectory(agent, env, i_episode)
    print("训练完成！")
