import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
import random
from collections import deque
import os
import time
import gymnasium as gym
from gymnasium import spaces
from stable_baselines3 import PPO
from stable_baselines3.common.policies import ActorCriticPolicy
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.vec_env import DummyVecEnv
from stable_baselines3.common.monitor import Monitor

'''PPO算法 - 使用Stable Baseline3 - 加入基站卸载决策'''
SEED = 45
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

EPISODES_PER_TASK = 200

# 环境参数
AREA_SIZE = 200
NUM_USERS = 10
MAX_STEPS = 300
MAX_DISTANCE_COLLECT = 15

# UAV参数
UAV_HEIGHT = 30.0
UAV_COMPUTE_CAPACITY = 1e10
MIN_UAV_SPEED = 2.0  # 最小速度
MAX_UAV_SPEED = 20.0  # 最大速度
DEFAULT_UAV_SPEED = 10.0  # 默认速度

# 基站参数
BASE_STATION_POSITION = np.array([0.0, 0.0])
BASE_STATION_COMPUTE_CAPACITY = 2e11
BASE_STATION_HEIGHT = 10.0

# PPO参数
LEARNING_RATE = 3e-4
GAMMA = 0.99
REWARD_SCALE = 0.15

# GRU参数
SEQUENCE_LENGTH = 10
HIDDEN_SIZE = 128

# 通信参数
BANDWIDTH = 1e6
USER_TRANSMIT_POWER = 0.1
CHANNEL_GAIN_REF_DB = 30.0
CHANNEL_GAIN_REF_LINEAR = 10 ** (CHANNEL_GAIN_REF_DB / 10)
PATH_LOSS_EXPONENT = 2.5
BOLTZMANN_CONSTANT = 1.38e-23
TEMPERATURE_KELVIN = 290
NOISE_POWER = BOLTZMANN_CONSTANT * TEMPERATURE_KELVIN * BANDWIDTH
RICE_FACTOR = 5

# 任务参数
TASK_SIZE_BITS = [1e6, 2e6]
TASK_CPU_CYCLES = [5e8, 15e8]

# UAV飞行能耗模型参数
UAV_WEIGHT_KG = 2.0
GRAVITY = 9.81
AIR_DENSITY = 1.225
ROTOR_RADIUS = 0.4
NUM_ROTORS = 4
P_INDUCED_COEFF = UAV_WEIGHT_KG * GRAVITY * np.sqrt(
    UAV_WEIGHT_KG * GRAVITY / (2 * AIR_DENSITY * np.pi * ROTOR_RADIUS ** 2))
P_PROFILE_COEFF = 0.012
P_PARASITE_COEFF = 0.6
EFFECTIVE_SWITCHED_CAPACITANCE = 1e-27

# 权重参数
DELAY_WEIGHT = 0.7
ENERGY_WEIGHT = 0.3
DELAY_SCALE = 100.0
ENERGY_SCALE = 0.001


class UAVTaskCollectionEnv(gym.Env):
    """Gym环境接口"""

    def __init__(self):
        super(UAVTaskCollectionEnv, self).__init__()

        # 状态空间：2 + NUM_USERS * 4 + 1 + 1 (加入基站距离)
        state_dim = 2 + NUM_USERS * 4 + 1 + 1
        self.observation_space = spaces.Box(
            low=0.0, high=1.0, shape=(SEQUENCE_LENGTH, state_dim), dtype=np.float32
        )

        # 动作空间：[x_movement, y_movement, speed, offloading_ratio]
        self.action_space = spaces.Box(
            low=-1.0, high=1.0, shape=(4,), dtype=np.float32
        )

        # 初始化环境状态
        self.user_positions = np.random.uniform(0, AREA_SIZE, size=(NUM_USERS, 2))
        self.task_cpu_cycles = np.random.uniform(TASK_CPU_CYCLES[0], TASK_CPU_CYCLES[1], size=NUM_USERS)
        self.task_sizes = np.random.uniform(TASK_SIZE_BITS[0], TASK_SIZE_BITS[1], size=NUM_USERS)
        self.task_generating_users = np.ones(NUM_USERS, dtype=bool)
        self.uav_position = np.array([0.0, 0.0], dtype=float)
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)
        self.step_count = 0

        self.user_completion_delays = np.zeros(NUM_USERS)
        self.user_offloading_delays = np.zeros(NUM_USERS)
        self.user_computation_delays = np.zeros(NUM_USERS)
        self.user_computation_energies = np.zeros(NUM_USERS)
        self.user_offloading_ratios = np.zeros(NUM_USERS)
        self.total_flight_energy = 0
        self.current_speed = DEFAULT_UAV_SPEED

        self.trajectory = [self.uav_position.copy()]
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])
        self.observation_history = deque(maxlen=SEQUENCE_LENGTH)
        self.current_phase = 1

        self.episode_reward_breakdown = {
            'collection_reward': 0.0,
            'proximity_reward': 0.0,
            'completion_reward': 0.0,
            'cost': 0.0,
            'step_penalty': 0.0
        }

    def _calculate_rice_channel_gain(self, distance_2d, height=UAV_HEIGHT):
        """计算莱斯信道增益"""
        distance_3d = np.sqrt(distance_2d ** 2 + height ** 2)
        if distance_3d < 1.0: distance_3d = 1.0
        path_loss = CHANNEL_GAIN_REF_LINEAR * (distance_3d ** (-PATH_LOSS_EXPONENT))
        K = RICE_FACTOR
        h_los = 1.0
        h_nlos_real = np.random.normal(0, 1)
        h_nlos_imag = np.random.normal(0, 1)
        h_nlos = (h_nlos_real + 1j * h_nlos_imag) / np.sqrt(2)
        h = np.sqrt(K / (K + 1)) * h_los + np.sqrt(1 / (K + 1)) * h_nlos
        fading_gain = abs(h) ** 2
        return path_loss * fading_gain

    def _calculate_offloading_delay(self, user_index, distance_2d):
        """计算用户到无人机的卸载时延"""
        channel_gain = self._calculate_rice_channel_gain(distance_2d)
        snr = (USER_TRANSMIT_POWER * channel_gain) / NOISE_POWER
        data_rate = BANDWIDTH * np.log2(1 + snr)
        return self.task_sizes[user_index] / data_rate

    def _calculate_uav_to_bs_delay(self, task_size):
        """计算无人机到基站的传输时延"""
        bs_distance_2d = np.linalg.norm(self.uav_position - BASE_STATION_POSITION)
        height_diff = abs(UAV_HEIGHT - BASE_STATION_HEIGHT)
        channel_gain = self._calculate_rice_channel_gain(bs_distance_2d, height_diff)
        snr = (USER_TRANSMIT_POWER * channel_gain) / NOISE_POWER
        data_rate = BANDWIDTH * np.log2(1 + snr)
        return task_size / data_rate

    def _calculate_completion_delay(self, user_index, offloading_ratio):
        """计算总完成时延，考虑本地和基站处理的并行处理"""
        total_task_size = self.task_sizes[user_index]
        total_cpu_cycles = self.task_cpu_cycles[user_index]

        user_to_uav_delay = self._calculate_offloading_delay(user_index,
                                                             np.linalg.norm(
                                                                 self.uav_position - self.user_positions[user_index]))

        if offloading_ratio == 0:
            local_computation_delay = total_cpu_cycles / UAV_COMPUTE_CAPACITY
            total_delay = user_to_uav_delay + local_computation_delay
        elif offloading_ratio == 1:
            uav_to_bs_delay = self._calculate_uav_to_bs_delay(total_task_size)
            bs_computation_delay = total_cpu_cycles / BASE_STATION_COMPUTE_CAPACITY
            total_delay = user_to_uav_delay + uav_to_bs_delay + bs_computation_delay
        else:
            local_task_size = total_task_size * (1 - offloading_ratio)
            bs_task_size = total_task_size * offloading_ratio
            local_cpu_cycles = total_cpu_cycles * (1 - offloading_ratio)
            bs_cpu_cycles = total_cpu_cycles * offloading_ratio

            local_computation_delay = local_cpu_cycles / UAV_COMPUTE_CAPACITY
            local_total_delay = user_to_uav_delay + local_computation_delay

            uav_to_bs_delay = self._calculate_uav_to_bs_delay(bs_task_size)
            bs_computation_delay = bs_cpu_cycles / BASE_STATION_COMPUTE_CAPACITY
            bs_total_delay = user_to_uav_delay + uav_to_bs_delay + bs_computation_delay

            total_delay = max(local_total_delay, bs_total_delay)

        return total_delay

    def _calculate_flight_energy(self, distance_moved, actual_speed=None, time_delta=1.0):
        """使用实际速度计算飞行能耗"""
        if actual_speed is None:
            speed = distance_moved / time_delta
        else:
            speed = actual_speed

        power = P_INDUCED_COEFF * (
                np.sqrt(1 + (speed ** 4) / (4 * P_INDUCED_COEFF ** 2)) - (speed ** 2) / (2 * P_INDUCED_COEFF)) \
                + P_PROFILE_COEFF * (1 + 3 * (speed ** 2)) \
                + 0.5 * P_PARASITE_COEFF * AIR_DENSITY * speed ** 3
        return power * time_delta

    def _calculate_computation_energy(self, user_index, offloading_ratio):
        local_cpu_cycles = self.task_cpu_cycles[user_index] * (1 - offloading_ratio)
        working_frequency = UAV_COMPUTE_CAPACITY
        energy = EFFECTIVE_SWITCHED_CAPACITANCE * (working_frequency ** 2) * local_cpu_cycles
        return energy

    def update_task_generating_users(self, phase):
        self.current_phase = phase
        if phase == 1:
            self.task_generating_users = np.ones(NUM_USERS, dtype=bool)
        elif phase == 2:
            indices = np.random.choice(NUM_USERS, 9, replace=False)
            self.task_generating_users = np.zeros(NUM_USERS, dtype=bool)
            self.task_generating_users[indices] = True
        else:
            indices = np.random.choice(NUM_USERS, 8, replace=False)
            self.task_generating_users = np.zeros(NUM_USERS, dtype=bool)
            self.task_generating_users[indices] = True

    def reset(self, seed=None, options=None):
        super().reset(seed=seed)
        if seed is not None:
            np.random.seed(seed)
            random.seed(seed)

        self.uav_position = np.array([0.0, 0.0], dtype=float)
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)
        self.step_count = 0

        self.user_completion_delays.fill(0)
        self.user_offloading_delays.fill(0)
        self.user_computation_delays.fill(0)
        self.user_computation_energies.fill(0)
        self.user_offloading_ratios.fill(0)
        self.total_flight_energy = 0
        self.current_speed = DEFAULT_UAV_SPEED

        self.trajectory = [self.uav_position.copy()]
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])
        self.observation_history.clear()

        for key in self.episode_reward_breakdown:
            self.episode_reward_breakdown[key] = 0.0

        initial_state = self._get_state()
        for _ in range(SEQUENCE_LENGTH):
            self.observation_history.append(initial_state)

        return self._get_gru_state(), {}

    def step(self, action):
        """处理4维动作：[direction_x, direction_y, speed, offloading_ratio]"""
        direction_raw = action[:2]
        direction_norm = np.linalg.norm(direction_raw)
        if direction_norm > 1e-6:
            movement_direction = direction_raw / direction_norm
        else:
            movement_direction = np.array([0.0, 0.0])

        speed = (action[2] + 1) / 2 * (MAX_UAV_SPEED - MIN_UAV_SPEED) + MIN_UAV_SPEED
        offloading_ratio = (action[3] + 1) / 2

        movement = movement_direction * speed
        prev_position = self.uav_position.copy()
        self.uav_position += movement
        self.uav_position = np.clip(self.uav_position, 0, AREA_SIZE)
        self.trajectory.append(self.uav_position.copy())

        actual_movement = self.uav_position - prev_position
        distance_moved = np.linalg.norm(actual_movement)

        if distance_moved > 0:
            actual_speed = speed if direction_norm > 1e-6 else 0
            flight_energy_step = self._calculate_flight_energy(distance_moved, actual_speed=actual_speed)
        else:
            flight_energy_step = self._calculate_flight_energy(0, actual_speed=0)

        self.total_flight_energy += flight_energy_step
        self.current_speed = speed if direction_norm > 1e-6 else 0

        new_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])

        newly_collected = 0
        for i in range(NUM_USERS):
            if self.task_generating_users[i] and not self.collected_tasks[i]:
                if new_distances[i] <= MAX_DISTANCE_COLLECT:
                    self.collected_tasks[i] = True
                    newly_collected += 1

                    self.user_offloading_ratios[i] = offloading_ratio
                    self.user_offloading_delays[i] = self._calculate_offloading_delay(i, new_distances[i])
                    self.user_completion_delays[i] = self._calculate_completion_delay(i, offloading_ratio)
                    self.user_computation_delays[i] = self.user_completion_delays[i] - self.user_offloading_delays[i]
                    self.user_computation_energies[i] = self._calculate_computation_energy(i, offloading_ratio)

        self.step_count += 1

        completed_indices = np.where(self.collected_tasks & self.task_generating_users)[0]

        if len(completed_indices) > 0:
            total_delay = np.sum(self.user_completion_delays[completed_indices])
            total_comp_energy = np.sum(self.user_computation_energies[completed_indices])
            avg_total_delay = np.mean(self.user_completion_delays[completed_indices])
        else:
            total_delay = 0.0
            total_comp_energy = 0.0
            avg_total_delay = 0.0

        total_energy = self.total_flight_energy + total_comp_energy

        reward_info = self.calculate_reward(newly_collected, total_energy, total_delay, new_distances,
                                            self.last_distances)
        reward = reward_info['total_reward']

        for key in self.episode_reward_breakdown:
            self.episode_reward_breakdown[key] += reward_info[key]

        self.last_distances = new_distances

        total_tasks_to_collect = sum(self.task_generating_users)
        collected_required_tasks = sum(self.collected_tasks & self.task_generating_users)
        done = (self.step_count >= MAX_STEPS) or (collected_required_tasks == total_tasks_to_collect)
        terminated = done
        truncated = False

        self.observation_history.append(self._get_state())

        info = {
            "collected_required": collected_required_tasks,
            "total_required": total_tasks_to_collect,
            "energy": total_energy,
            "delay": avg_total_delay,
            "reward_breakdown": reward_info,
            "episode_reward_breakdown": self.episode_reward_breakdown
        }

        return self._get_gru_state(), reward, terminated, truncated, info

    def _get_state(self):
        """状态空间加入基站距离信息"""
        state = np.zeros(2 + NUM_USERS * 4 + 1 + 1)
        state[0:2] = self.uav_position / AREA_SIZE

        for i in range(NUM_USERS):
            dist = np.linalg.norm(self.uav_position - self.user_positions[i])
            idx = 2 + i * 4
            state[idx] = dist / np.sqrt(2 * AREA_SIZE ** 2)
            state[idx + 1] = float(self.collected_tasks[i])
            state[idx + 2] = float(self.task_generating_users[i])
            state[idx + 3] = self.task_cpu_cycles[i] / TASK_CPU_CYCLES[1]

        state[-2] = self.step_count / MAX_STEPS

        bs_distance = np.linalg.norm(self.uav_position - BASE_STATION_POSITION)
        state[-1] = bs_distance / np.sqrt(2 * AREA_SIZE ** 2)

        return state.astype(np.float32)

    def _get_gru_state(self):
        while len(self.observation_history) < SEQUENCE_LENGTH:
            self.observation_history.append(self._get_state())
        return np.array(list(self.observation_history), dtype=np.float32)

    def calculate_reward(self, newly_collected, total_energy, total_delay, new_distances, old_distances):
        collected_required = sum(self.collected_tasks & self.task_generating_users)
        total_required = sum(self.task_generating_users) if sum(self.task_generating_users) > 0 else 1
        completion_rate = collected_required / total_required

        time_factor = max(0, 1 - self.step_count / MAX_STEPS)
        collection_reward = newly_collected * 30.0 * (1 + time_factor * 0.5)

        proximity_reward = 0.0
        uncollected_indices = np.where(self.task_generating_users & ~self.collected_tasks)[0]
        if len(uncollected_indices) > 0:
            uncollected_distances_old = old_distances[uncollected_indices]
            uncollected_distances_new = new_distances[uncollected_indices]
            closest_idx = np.argmin(uncollected_distances_new)
            dist_diff = uncollected_distances_old[closest_idx] - uncollected_distances_new[closest_idx]

            if uncollected_distances_new[closest_idx] < 50:
                proximity_reward = dist_diff * 0.5
            else:
                proximity_reward = dist_diff * 0.3

        progress = self.step_count / MAX_STEPS
        step_penalty = 0.25 + 0.5 * progress

        completion_reward = 0
        done = (self.step_count >= MAX_STEPS) or (collected_required == total_required)
        if done:
            step_efficiency = max(0.1, 1 - self.step_count / MAX_STEPS)
            base_completion = completion_rate * 200
            bonus = 500 if completion_rate == 1.0 else 0
            completion_reward = (base_completion + bonus) * step_efficiency

        cost = 0
        if done and collected_required > 0:
            if self.current_phase == 1:
                delay_weight, energy_weight = 0.6, 0.4
            elif self.current_phase == 2:
                delay_weight, energy_weight = 0.7, 0.3
            else:
                delay_weight, energy_weight = 0.8, 0.2

            delay_penalty = total_delay * delay_weight * 1200
            energy_penalty = total_energy * energy_weight * 0.015
            cost = (delay_penalty + energy_penalty) / collected_required

        total_reward = (collection_reward + proximity_reward + completion_reward - step_penalty - cost)

        return {
            'total_reward': total_reward * REWARD_SCALE,
            'collection_reward': collection_reward * REWARD_SCALE,
            'proximity_reward': proximity_reward * REWARD_SCALE,
            'completion_reward': completion_reward * REWARD_SCALE,
            'cost': -cost * REWARD_SCALE,
            'step_penalty': -step_penalty * REWARD_SCALE
        }

    def render(self, mode='rgb_array'):
        """可视化环境"""
        plt.figure(figsize=(10, 10))

        # 绘制基站
        plt.scatter(BASE_STATION_POSITION[0], BASE_STATION_POSITION[1],
                    s=300, c='orange', marker='s', label='Base Station', edgecolors='black', linewidth=2)
        plt.annotate('BS\n(MEC)', BASE_STATION_POSITION,
                     textcoords="offset points", xytext=(0, -25),
                     ha='center', fontsize=10, fontweight='bold')

        for i, pos in enumerate(self.user_positions):
            if self.task_generating_users[i]:
                color = 'green' if self.collected_tasks[i] else 'red'
            else:
                color = 'gray'
            plt.scatter(pos[0], pos[1], s=100, c=color)

            if self.task_generating_users[i] and self.collected_tasks[i]:
                offload_ratio = self.user_offloading_ratios[i]
                task_info = f"{i + 1}\n{self.task_sizes[i] / 1e6:.1f}Mb\n{self.task_cpu_cycles[i] / 1e9:.1f}Gcy\nOffload:{offload_ratio:.2f}"
            else:
                task_info = f"{i + 1}\n{self.task_sizes[i] / 1e6:.1f}Mb\n{self.task_cpu_cycles[i] / 1e9:.1f}Gcy"

            plt.annotate(task_info, (pos[0], pos[1]), fontsize=8, ha='center', va='bottom')

        trajectory = np.array(self.trajectory)
        plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', alpha=0.5)
        plt.scatter(self.uav_position[0], self.uav_position[1], s=200, c='blue', marker='*')
        circle = plt.Circle((self.uav_position[0], self.uav_position[1]), MAX_DISTANCE_COLLECT, color='blue',
                            fill=False, alpha=0.3)
        plt.gca().add_patch(circle)

        plt.plot([self.uav_position[0], BASE_STATION_POSITION[0]],
                 [self.uav_position[1], BASE_STATION_POSITION[1]],
                 'orange', linestyle='--', alpha=0.5, linewidth=1)

        plt.xlim(0, AREA_SIZE)
        plt.ylim(0, AREA_SIZE)
        title = f"Step {self.step_count}, 收集: {sum(self.collected_tasks & self.task_generating_users)}/{sum(self.task_generating_users)} 任务"
        if hasattr(self, 'current_speed'):
            title += f", 当前速度: {self.current_speed:.1f} m/s"
        plt.title(title)
        plt.grid(True)
        plt.legend()
        plt.close()


# 自定义GRU特征提取器
class GRUExtractor(BaseFeaturesExtractor):
    def __init__(self, observation_space: gym.Space, features_dim: int = 128):
        super(GRUExtractor, self).__init__(observation_space, features_dim)

        # 获取输入维度
        self.seq_len, self.input_dim = observation_space.shape
        self.hidden_size = HIDDEN_SIZE

        # GRU层
        self.gru = nn.GRU(
            input_size=self.input_dim,
            hidden_size=self.hidden_size,
            num_layers=1,
            batch_first=True
        )

        # 输出层
        self.output_layer = nn.Linear(self.hidden_size, features_dim)

    def forward(self, observations: torch.Tensor) -> torch.Tensor:
        # observations shape: (batch_size, seq_len, input_dim)
        batch_size = observations.size(0)

        # GRU前向传播
        gru_out, _ = self.gru(observations)  # (batch_size, seq_len, hidden_size)

        # 取最后一个时间步的输出
        last_output = gru_out[:, -1, :]  # (batch_size, hidden_size)

        # 通过输出层
        features = self.output_layer(last_output)  # (batch_size, features_dim)

        return features


# 自定义PPO策略
class CustomPPOPolicy(ActorCriticPolicy):
    def __init__(self, observation_space, action_space, lr_schedule, **kwargs):
        # 使用自定义的特征提取器
        super(CustomPPOPolicy, self).__init__(
            observation_space,
            action_space,
            lr_schedule,
            features_extractor_class=GRUExtractor,
            features_extractor_kwargs=dict(features_dim=128),
            **kwargs
        )


# 训练回调函数
class CustomCallback(BaseCallback):
    def __init__(self, env_wrapper, eval_freq=50, verbose=1):
        super(CustomCallback, self).__init__(verbose)
        self.env_wrapper = env_wrapper
        self.eval_freq = eval_freq
        self.episode_count = 0
        self.phase_episode_count = 0
        self.current_phase = 1
        self.episodes_per_phase = EPISODES_PER_TASK

        # 记录训练指标
        self.rewards_history = []
        self.collection_history = []
        self.energy_history = []
        self.delay_history = []

    def _on_step(self) -> bool:
        # 检查是否完成一个episode
        if self.locals.get('dones', [False])[0]:  # PPO是向量化环境
            self.episode_count += 1
            self.phase_episode_count += 1

            # 获取episode信息
            if 'infos' in self.locals:
                info = self.locals['infos'][0]
                episode_reward = sum(self.locals.get('episode_rewards', [0]))

                self.rewards_history.append(episode_reward)
                self.collection_history.append(info.get('collected_required', 0))
                self.energy_history.append(info.get('energy', 0))
                self.delay_history.append(info.get('delay', 0))

                # 打印训练进度
                if self.episode_count % 10 == 0:
                    collected = info.get('collected_required', 0)
                    total = info.get('total_required', 1)
                    print(f"Phase {self.current_phase}, Episode {self.phase_episode_count}/{self.episodes_per_phase}: "
                          f"Tasks {collected}/{total}, Reward: {episode_reward:.2f}, "
                          f"Energy: {info.get('energy', 0):.1f}, Delay: {info.get('delay', 0):.3f}")

            # 检查是否需要切换到下一个阶段
            if self.phase_episode_count >= self.episodes_per_phase and self.current_phase < 3:
                print(f"\n=== 切换到阶段 {self.current_phase + 1} ===")
                self.current_phase += 1
                self.phase_episode_count = 0

                # 更新环境的任务生成用户
                if hasattr(self.env_wrapper, 'env'):
                    self.env_wrapper.env.update_task_generating_users(self.current_phase)
                else:
                    self.env_wrapper.update_task_generating_users(self.current_phase)

            # 生成可视化图表
            if self.episode_count % self.eval_freq == 0:
                self._generate_training_plots()

                # 渲染环境
                if hasattr(self.env_wrapper, 'env'):
                    self.env_wrapper.env.render()
                else:
                    self.env_wrapper.render()

        return True

    def _generate_training_plots(self):
        """生成训练曲线"""
        if not self.rewards_history:
            return

        plt.figure(figsize=(20, 5))

        # 奖励曲线
        plt.subplot(1, 4, 1)
        plt.plot(self.rewards_history, alpha=0.6, label='Episode Reward')
        if len(self.rewards_history) >= 10:
            smoothed = [np.mean(self.rewards_history[max(0, i - 9):i + 1]) for i in range(len(self.rewards_history))]
            plt.plot(smoothed, label='Smoothed', color='red')
        plt.axvline(x=EPISODES_PER_TASK, color='green', linestyle='--', label='Phase 1->2')
        plt.axvline(x=2 * EPISODES_PER_TASK, color='purple', linestyle='--', label='Phase 2->3')
        plt.title("Training Reward")
        plt.xlabel("Episode")
        plt.ylabel("Reward")
        plt.legend()
        plt.grid(True)

        # 任务收集曲线
        plt.subplot(1, 4, 2)
        plt.plot(self.collection_history)
        plt.axvline(x=EPISODES_PER_TASK, color='green', linestyle='--')
        plt.axvline(x=2 * EPISODES_PER_TASK, color='purple', linestyle='--')
        plt.title("Collected Tasks")
        plt.xlabel("Episode")
        plt.ylabel("Number of Tasks")
        plt.grid(True)

        # 能耗曲线
        plt.subplot(1, 4, 3)
        plt.plot(self.energy_history)
        plt.axvline(x=EPISODES_PER_TASK, color='green', linestyle='--')
        plt.axvline(x=2 * EPISODES_PER_TASK, color='purple', linestyle='--')
        plt.title("Total Energy")
        plt.xlabel("Episode")
        plt.ylabel("Energy")
        plt.grid(True)

        # 延迟曲线
        plt.subplot(1, 4, 4)
        plt.plot(self.delay_history)
        plt.axvline(x=EPISODES_PER_TASK, color='green', linestyle='--')
        plt.axvline(x=2 * EPISODES_PER_TASK, color='purple', linestyle='--')
        plt.title("Avg Delay")
        plt.xlabel("Episode")
        plt.ylabel("Delay (s)")
        plt.grid(True)

        plt.tight_layout()
        plt.savefig(f"results/training_curves_episode_{self.episode_count}.png", dpi=150, bbox_inches='tight')
        plt.close()


def train_ppo():
    """使用PPO训练智能体"""
    print("开始PPO训练...")
    os.makedirs("results", exist_ok=True)

    # 创建环境
    def make_env():
        env = UAVTaskCollectionEnv()
        env = Monitor(env)
        return env

    # 创建向量化环境
    env = DummyVecEnv([make_env])

    # 创建PPO智能体
    model = PPO(
        CustomPPOPolicy,
        env,
        learning_rate=LEARNING_RATE,
        n_steps=2048,  # 每次更新前收集的步数
        batch_size=64,  # 批处理大小
        n_epochs=10,  # 每次更新的epoch数
        gamma=GAMMA,  # 折扣因子
        gae_lambda=0.95,  # GAE参数
        clip_range=0.2,  # PPO裁剪参数
        ent_coef=0.01,  # 熵系数
        vf_coef=0.5,  # 价值函数损失系数
        max_grad_norm=0.5,  # 梯度裁剪
        use_sde=False,  # 不使用状态依赖探索
        sde_sample_freq=-1,
        target_kl=None,
        tensorboard_log="./ppo_uav_logs/",
        policy_kwargs=dict(
            log_std_init=-2,
            ortho_init=False,
            activation_fn=torch.nn.ReLU,
            net_arch=[256, 256]  # Actor和Critic网络架构
        ),
        verbose=1,
        device=device,
        seed=SEED
    )

    # 创建回调函数
    callback = CustomCallback(env.envs[0], eval_freq=50)

    # 开始训练
    total_timesteps = 3 * EPISODES_PER_TASK * MAX_STEPS
    print(f"总训练步数: {total_timesteps}")

    model.learn(
        total_timesteps=total_timesteps,
        callback=callback,
        tb_log_name="PPO",
        reset_num_timesteps=False
    )

    # 保存最终模型
    model.save("results/ppo_final_model")
    print("训练完成！模型已保存到 results/ppo_final_model")

    return model, env


def test_ppo_model(model_path="results/ppo_final_model", phase=3, render_steps=True):
    """测试PPO模型"""
    print(f"开始测试PPO模型 (阶段 {phase})...")

    # 设置随机种子保证可复现性
    torch.manual_seed(SEED)
    np.random.seed(SEED)
    random.seed(SEED)

    # 创建测试环境
    env = UAVTaskCollectionEnv()
    env.update_task_generating_users(phase)

    # 加载模型
    model = PPO.load(model_path, device=device)

    # 重置环境
    obs, _ = env.reset(seed=SEED)

    # 测试记录
    total_reward = 0
    step_rewards = []
    trajectory = [env.uav_position.copy()]
    collection_times = np.zeros(NUM_USERS)
    collection_order = []

    print(f"开始测试... (最大步数: {MAX_STEPS})")

    for step in range(MAX_STEPS):
        # 使用模型预测动作
        action, _states = model.predict(obs, deterministic=False)  # 保持一定随机性

        # 记录收集前状态
        collected_before = env.collected_tasks.copy()
        trajectory.append(env.uav_position.copy())

        # 执行动作
        obs, reward, terminated, truncated, info = env.step(action)

        # 记录收集信息
        for i in range(NUM_USERS):
            if env.task_generating_users[i] and env.collected_tasks[i] and not collected_before[i]:
                collection_times[i] = step + 1
                collection_order.append(i)

        total_reward += reward
        step_rewards.append(reward)

        # 定期渲染
        if render_steps and (step % 20 == 0 or terminated or truncated):
            print(f"Step {step + 1}: Reward={reward:.3f}, "
                  f"Collected={info['collected_required']}/{info['total_required']}, "
                  f"Position=({env.uav_position[0]:.1f}, {env.uav_position[1]:.1f}), "
                  f"Speed={env.current_speed:.1f}")

        # 检查是否完成
        if terminated or truncated:
            if info['collected_required'] == info['total_required']:
                print(f"✅ 成功完成所有任务! 用时 {step + 1} 步")
            else:
                print(f"⏰ 达到最大步数限制")
            break

    # 生成详细的测试报告
    generate_test_report(env, info, total_reward, step_rewards, trajectory, collection_times, collection_order, phase)

    return env, info, total_reward


def generate_test_report(env, info, total_reward, step_rewards, trajectory, collection_times, collection_order, phase):
    """生成详细的测试报告和可视化"""

    print(f"\n{'=' * 60}")
    print(f"测试报告 - 阶段 {phase}")
    print(f"{'=' * 60}")

    # 基本统计
    collected_count = info['collected_required']
    total_count = info['total_required']
    success_rate = collected_count / total_count * 100 if total_count > 0 else 0

    print(f"任务完成情况: {collected_count}/{total_count} ({success_rate:.1f}%)")
    print(f"总奖励: {total_reward:.2f}")
    print(f"总步数: {env.step_count}")
    print(f"总能耗: {info['energy']:.2f}")
    print(f"平均延迟: {info['delay']:.4f} 秒")

    # 卸载决策统计
    print(f"\n卸载决策统计:")
    collected_indices = [i for i in range(NUM_USERS)
                         if env.task_generating_users[i] and env.collected_tasks[i]]

    if collected_indices:
        offload_ratios = [env.user_offloading_ratios[i] for i in collected_indices]
        avg_offload = np.mean(offload_ratios)
        print(f"平均卸载比例: {avg_offload:.3f}")

        # 分类统计
        local_tasks = sum(1 for ratio in offload_ratios if ratio < 0.1)
        mixed_tasks = sum(1 for ratio in offload_ratios if 0.1 <= ratio < 0.9)
        remote_tasks = sum(1 for ratio in offload_ratios if ratio >= 0.9)

        print(f"本地处理任务: {local_tasks}")
        print(f"混合处理任务: {mixed_tasks}")
        print(f"远程处理任务: {remote_tasks}")

    # 任务收集详情
    print(f"\n任务收集时间线:")
    for idx, (user_id, step) in enumerate([(i, int(collection_times[i])) for i in collection_order]):
        offload_ratio = env.user_offloading_ratios[user_id]
        print(f"  {idx + 1}. 用户 {user_id + 1}: 第 {step} 步收集 (卸载比例: {offload_ratio:.3f})")

    # 未完成任务
    uncollected = [i + 1 for i in range(NUM_USERS)
                   if env.task_generating_users[i] and not env.collected_tasks[i]]
    if uncollected:
        print(f"\n未收集任务: 用户 {uncollected}")

    # 生成轨迹可视化
    generate_trajectory_visualization(env, trajectory, collection_times, phase)

    # 生成奖励曲线
    generate_reward_visualization(step_rewards, phase)

    print(f"{'=' * 60}")


def generate_trajectory_visualization(env, trajectory, collection_times, phase):
    """生成轨迹可视化"""
    trajectory = np.array(trajectory)

    plt.figure(figsize=(12, 10))

    # 绘制基站
    plt.scatter(BASE_STATION_POSITION[0], BASE_STATION_POSITION[1],
                s=300, c='orange', marker='s', label='基站 (MEC)',
                edgecolors='black', linewidth=2, zorder=5)

    # 绘制用户
    for i, (x, y) in enumerate(env.user_positions):
        if env.task_generating_users[i]:
            if env.collected_tasks[i]:
                color = 'green'
                marker = 'o'
                size = 150
                # 显示收集步数和卸载比例
                step_collected = int(collection_times[i]) if collection_times[i] > 0 else 0
                offload_ratio = env.user_offloading_ratios[i]
                label_text = f"用户{i + 1}\n步数:{step_collected}\n卸载:{offload_ratio:.2f}"
            else:
                color = 'red'
                marker = 'o'
                size = 150
                label_text = f"用户{i + 1}\n(未收集)"
        else:
            color = 'gray'
            marker = 'o'
            size = 100
            label_text = f"用户{i + 1}\n(无任务)"

        plt.scatter(x, y, s=size, c=color, marker=marker, zorder=4)
        plt.annotate(label_text, (x, y), xytext=(5, 5),
                     textcoords='offset points', fontsize=9,
                     bbox=dict(boxstyle='round,pad=0.3', facecolor='white', alpha=0.8))

    # 绘制UAV轨迹
    plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-',
             linewidth=2, alpha=0.7, label='UAV轨迹', zorder=3)

    # 起点和终点
    plt.scatter(trajectory[0, 0], trajectory[0, 1],
                s=200, c='blue', marker='^', label='起点', zorder=6)
    plt.scatter(trajectory[-1, 0], trajectory[-1, 1],
                s=200, c='purple', marker='*', label='终点', zorder=6)

    # 添加轨迹点标记（每10步）
    for i in range(0, len(trajectory), max(1, len(trajectory) // 20)):
        plt.scatter(trajectory[i, 0], trajectory[i, 1],
                    s=30, c='lightblue', marker='o', alpha=0.6, zorder=2)
        if i % max(1, len(trajectory) // 10) == 0:  # 标注步数
            plt.annotate(f'{i}', (trajectory[i, 0], trajectory[i, 1]),
                         fontsize=8, ha='center', va='center',
                         bbox=dict(boxstyle="circle,pad=0.2", fc="white", alpha=0.7))

    # 绘制收集连线
    for i in range(NUM_USERS):
        if env.task_generating_users[i] and env.collected_tasks[i]:
            step_idx = min(int(collection_times[i]), len(trajectory) - 1)
            if step_idx > 0:
                uav_pos = trajectory[step_idx]
                plt.plot([uav_pos[0], env.user_positions[i, 0]],
                         [uav_pos[1], env.user_positions[i, 1]],
                         'g--', alpha=0.5, linewidth=2, zorder=1)

    # 最终位置到基站连线
    plt.plot([trajectory[-1, 0], BASE_STATION_POSITION[0]],
             [trajectory[-1, 1], BASE_STATION_POSITION[1]],
             'orange', linestyle='--', alpha=0.7, linewidth=2,
             label='UAV-基站连接', zorder=1)

    # 图表设置
    plt.xlim(-10, AREA_SIZE + 10)
    plt.ylim(-10, AREA_SIZE + 10)
    plt.xlabel('X坐标 (m)', fontsize=12)
    plt.ylabel('Y坐标 (m)', fontsize=12)
    plt.title(f'UAV任务收集轨迹 - 阶段{phase}\n'
              f'完成: {sum(env.collected_tasks & env.task_generating_users)}/{sum(env.task_generating_users)} 任务, '
              f'步数: {env.step_count}', fontsize=14)
    plt.grid(True, alpha=0.3)
    plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
    plt.tight_layout()

    # 保存图像
    plt.savefig(f"results/test_trajectory_phase_{phase}.png",
                dpi=300, bbox_inches='tight')
    plt.close()

    print(f"轨迹可视化已保存: results/test_trajectory_phase_{phase}.png")


def generate_reward_visualization(step_rewards, phase):
    """生成奖励曲线可视化"""
    plt.figure(figsize=(15, 5))

    # 步奖励
    plt.subplot(1, 2, 1)
    plt.plot(step_rewards, linewidth=1, alpha=0.8)
    plt.title(f'步奖励曲线 - 阶段{phase}')
    plt.xlabel('步数')
    plt.ylabel('奖励')
    plt.grid(True, alpha=0.3)

    # 累积奖励
    plt.subplot(1, 2, 2)
    cumulative_rewards = np.cumsum(step_rewards)
    plt.plot(cumulative_rewards, linewidth=2, color='red')
    plt.title(f'累积奖励曲线 - 阶段{phase}')
    plt.xlabel('步数')
    plt.ylabel('累积奖励')
    plt.grid(True, alpha=0.3)

    # 最终统计
    plt.figtext(0.5, 0.02,
                f'总奖励: {cumulative_rewards[-1]:.2f} | '
                f'平均步奖励: {np.mean(step_rewards):.3f} | '
                f'奖励标准差: {np.std(step_rewards):.3f}',
                ha='center', fontsize=10)

    plt.tight_layout()
    plt.savefig(f"results/test_rewards_phase_{phase}.png",
                dpi=300, bbox_inches='tight')
    plt.close()

    print(f"奖励曲线已保存: results/test_rewards_phase_{phase}.png")


def comprehensive_test_ppo(model_path="results/ppo_final_model", num_tests=5):
    """进行多次PPO测试以获得统计结果"""
    print(f"\n{'=' * 60}")
    print(f"开始综合PPO测试 ({num_tests} 次测试)")
    print(f"{'=' * 60}")

    results = {
        'phase_1': [],
        'phase_2': [],
        'phase_3': []
    }

    for phase in range(1, 4):
        print(f"\n--- 阶段 {phase} 测试 ---")
        phase_results = []

        for test_num in range(num_tests):
            print(f"\n测试 {test_num + 1}/{num_tests}:")

            # 使用不同的种子
            test_seed = SEED + test_num * 13

            try:
                # 创建测试环境
                env = UAVTaskCollectionEnv()
                env.update_task_generating_users(phase)

                # 加载模型
                model = PPO.load(model_path, device=device)

                # 重置环境
                obs, _ = env.reset(seed=test_seed)

                total_reward = 0

                # 运行测试
                for step in range(MAX_STEPS):
                    action, _states = model.predict(obs, deterministic=False)
                    obs, reward, terminated, truncated, info = env.step(action)
                    total_reward += reward

                    if terminated or truncated:
                        break

                # 记录结果
                success_rate = info['collected_required'] / info['total_required'] * 100
                result = {
                    'success_rate': success_rate,
                    'total_reward': total_reward,
                    'steps': env.step_count,
                    'energy': info['energy'],
                    'delay': info['delay'],
                    'collected': info['collected_required'],
                    'total_tasks': info['total_required']
                }

                phase_results.append(result)
                print(f"  成功率: {success_rate:.1f}%, 奖励: {total_reward:.2f}, 步数: {env.step_count}")

            except Exception as e:
                print(f"  测试失败: {e}")
                continue

        results[f'phase_{phase}'] = phase_results

        # 打印阶段统计
        if phase_results:
            avg_success = np.mean([r['success_rate'] for r in phase_results])
            avg_reward = np.mean([r['total_reward'] for r in phase_results])
            avg_steps = np.mean([r['steps'] for r in phase_results])
            print(f"\n阶段 {phase} 平均结果:")
            print(f"  平均成功率: {avg_success:.1f}%")
            print(f"  平均奖励: {avg_reward:.2f}")
            print(f"  平均步数: {avg_steps:.1f}")

    # 生成综合报告
    generate_comprehensive_report(results)
    return results


def generate_comprehensive_report(results):
    """生成综合测试报告"""
    print(f"\n{'=' * 60}")
    print("综合测试报告")
    print(f"{'=' * 60}")

    for phase in range(1, 4):
        phase_key = f'phase_{phase}'
        if phase_key in results and results[phase_key]:
            data = results[phase_key]

            print(f"\n阶段 {phase}:")
            print(f"  测试次数: {len(data)}")
            print(f"  平均成功率: {np.mean([d['success_rate'] for d in data]):.1f}% "
                  f"(标准差: {np.std([d['success_rate'] for d in data]):.1f}%)")
            print(f"  平均总奖励: {np.mean([d['total_reward'] for d in data]):.2f} "
                  f"(标准差: {np.std([d['total_reward'] for d in data]):.2f})")
            print(f"  平均完成步数: {np.mean([d['steps'] for d in data]):.1f} "
                  f"(标准差: {np.std([d['steps'] for d in data]):.1f})")
            print(f"  平均能耗: {np.mean([d['energy'] for d in data]):.2f}")
            print(f"  平均延迟: {np.mean([d['delay'] for d in data]):.4f} 秒")

            # 最佳结果
            best_idx = np.argmax([d['success_rate'] for d in data])
            best = data[best_idx]
            print(f"  最佳结果: 成功率 {best['success_rate']:.1f}%, "
                  f"奖励 {best['total_reward']:.2f}, 步数 {best['steps']}")

    print(f"{'=' * 60}")


if __name__ == "__main__":
    print("开始PPO智能体训练和测试...")

    # 训练阶段
    print("\n🚀 开始训练...")
    model, env = train_ppo()

    print("\n✅ 训练完成！开始测试各阶段性能...")

    # 测试各个阶段
    for phase in range(1, 4):
        print(f"\n{'=' * 40}")
        print(f"测试阶段 {phase}")
        print(f"{'=' * 40}")

        try:
            test_ppo_model("results/ppo_final_model", phase=phase)
        except Exception as e:
            print(f"阶段 {phase} 测试失败: {e}")

    # 综合测试
    print(f"\n{'=' * 40}")
    print("开始综合性能测试")
    print(f"{'=' * 40}")

    try:
        comprehensive_test_ppo("results/ppo_final_model", num_tests=3)
    except Exception as e:
        print(f"综合测试失败: {e}")

    print("\n🎉 所有测试完成！")
    print("结果文件保存在 'results/' 目录下")
    print("训练日志保存在 'ppo_uav_logs/' 目录下")
