import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import random
from collections import deque
import os
import time

SEED = 45
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

EPISODES_PER_TASK = 600

AREA_SIZE = 200
NUM_USERS = 10
MAX_STEPS = 300
MAX_DISTANCE_COLLECT = 15

UAV_HEIGHT = 30.0
UAV_COMPUTE_CAPACITY = 1e10
MIN_UAV_SPEED = 2.0
MAX_UAV_SPEED = 20.0
DEFAULT_UAV_SPEED = 10.0

BASE_STATION_POSITION = np.array([0.0, 0.0])
BASE_STATION_COMPUTE_CAPACITY = 2e11
BASE_STATION_HEIGHT = 10.0

# 修改后的超参数
ACTOR_LR = 1e-4  # 降低学习率
CRITIC_LR = 1e-3  # 降低学习率
GAMMA = 0.98  # 稍微降低折扣因子
REWARD_SCALE = 1.0  # 移除额外缩放

PPO_CLIP = 0.15  # 降低clip ratio
PPO_EPOCHS = 6  # 增加训练轮数
BATCH_SIZE = 128  # 增加batch size
BUFFER_SIZE = 2048

EWC_LAMBDA = 0.5  # 降低EWC权重
FISHER_SAMPLE_SIZE = 1000

SEQUENCE_LENGTH = 10
HIDDEN_SIZE = 128

BANDWIDTH = 1e6
USER_TRANSMIT_POWER = 0.1
CHANNEL_GAIN_REF_DB = 30.0
CHANNEL_GAIN_REF_LINEAR = 10 ** (CHANNEL_GAIN_REF_DB / 10)
PATH_LOSS_EXPONENT = 2.5
BOLTZMANN_CONSTANT = 1.38e-23
TEMPERATURE_KELVIN = 290
NOISE_POWER = BOLTZMANN_CONSTANT * TEMPERATURE_KELVIN * BANDWIDTH
RICE_FACTOR = 5

TASK_SIZE_BITS = [1e6, 2e6]
TASK_CPU_CYCLES = [5e8, 15e8]

UAV_WEIGHT_KG = 2.0
GRAVITY = 9.81
AIR_DENSITY = 1.225
ROTOR_RADIUS = 0.4
NUM_ROTORS = 4
P_INDUCED_COEFF = UAV_WEIGHT_KG * GRAVITY * np.sqrt(
    UAV_WEIGHT_KG * GRAVITY / (2 * AIR_DENSITY * np.pi * ROTOR_RADIUS ** 2))
P_PROFILE_COEFF = 0.012
P_PARASITE_COEFF = 0.6
EFFECTIVE_SWITCHED_CAPACITANCE = 1e-27

DELAY_WEIGHT = 0.7
ENERGY_WEIGHT = 0.3
DELAY_SCALE = 100.0
ENERGY_SCALE = 0.001


# 添加奖励归一化类
class RewardNormalizer:
    def __init__(self, alpha=0.99):
        self.alpha = alpha
        self.mean = 0.0
        self.var = 1.0
        self.count = 0

    def update(self, reward):
        self.count += 1
        delta = reward - self.mean
        self.mean += delta / min(self.count, 1000)  # 限制历史影响
        self.var = self.alpha * self.var + (1 - self.alpha) * delta ** 2

    def normalize(self, reward):
        if self.var > 0:
            return (reward - self.mean) / (np.sqrt(self.var) + 1e-8)
        return reward


class Environment:
    def __init__(self):
        self.user_positions = np.random.uniform(0, AREA_SIZE, size=(NUM_USERS, 2))
        self.task_cpu_cycles = np.random.uniform(TASK_CPU_CYCLES[0], TASK_CPU_CYCLES[1], size=NUM_USERS)
        self.task_sizes = np.random.uniform(TASK_SIZE_BITS[0], TASK_SIZE_BITS[1], size=NUM_USERS)
        self.task_generating_users = np.ones(NUM_USERS, dtype=bool)
        self.uav_position = np.array([0.0, 0.0], dtype=float)
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)
        self.step_count = 0

        self.user_completion_delays = np.zeros(NUM_USERS)
        self.user_offloading_delays = np.zeros(NUM_USERS)
        self.user_computation_delays = np.zeros(NUM_USERS)
        self.user_computation_energies = np.zeros(NUM_USERS)
        self.total_flight_energy = 0

        self.user_offloading_ratios = np.zeros(NUM_USERS)

        self.trajectory = [self.uav_position.copy()]
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])
        self.observation_history = deque(maxlen=SEQUENCE_LENGTH)
        self.current_phase = 1

        self.episode_reward_breakdown = {
            'collection_reward': 0.0,
            'proximity_reward': 0.0,
            'completion_reward': 0.0,
            'cost': 0.0,
            'step_penalty': 0.0
        }

    def _calculate_rice_channel_gain(self, distance_2d, height=UAV_HEIGHT):
        distance_3d = np.sqrt(distance_2d ** 2 + height ** 2)
        if distance_3d < 1.0: distance_3d = 1.0
        path_loss = CHANNEL_GAIN_REF_LINEAR * (distance_3d ** (-PATH_LOSS_EXPONENT))
        K = RICE_FACTOR
        h_los = 1.0
        h_nlos_real = np.random.normal(0, 1)
        h_nlos_imag = np.random.normal(0, 1)
        h_nlos = (h_nlos_real + 1j * h_nlos_imag) / np.sqrt(2)
        h = np.sqrt(K / (K + 1)) * h_los + np.sqrt(1 / (K + 1)) * h_nlos
        fading_gain = abs(h) ** 2
        return path_loss * fading_gain

    def _calculate_offloading_delay(self, user_index, distance_2d):
        channel_gain = self._calculate_rice_channel_gain(distance_2d)
        snr = (USER_TRANSMIT_POWER * channel_gain) / NOISE_POWER
        data_rate = BANDWIDTH * np.log2(1 + snr)
        return self.task_sizes[user_index] / data_rate

    def _calculate_uav_to_bs_delay(self, task_size):
        bs_distance_2d = np.linalg.norm(self.uav_position - BASE_STATION_POSITION)
        height_diff = abs(UAV_HEIGHT - BASE_STATION_HEIGHT)
        channel_gain = self._calculate_rice_channel_gain(bs_distance_2d, height_diff)
        snr = (USER_TRANSMIT_POWER * channel_gain) / NOISE_POWER
        data_rate = BANDWIDTH * np.log2(1 + snr)
        return task_size / data_rate

    def _calculate_completion_delay(self, user_index, offloading_ratio):
        total_task_size = self.task_sizes[user_index]
        total_cpu_cycles = self.task_cpu_cycles[user_index]

        user_to_uav_delay = self._calculate_offloading_delay(user_index,
                                                             np.linalg.norm(
                                                                 self.uav_position - self.user_positions[user_index]))

        if offloading_ratio == 0:
            local_computation_delay = total_cpu_cycles / UAV_COMPUTE_CAPACITY
            total_delay = user_to_uav_delay + local_computation_delay

        elif offloading_ratio == 1:
            uav_to_bs_delay = self._calculate_uav_to_bs_delay(total_task_size)
            bs_computation_delay = total_cpu_cycles / BASE_STATION_COMPUTE_CAPACITY
            total_delay = user_to_uav_delay + uav_to_bs_delay + bs_computation_delay

        else:
            local_task_size = total_task_size * (1 - offloading_ratio)
            bs_task_size = total_task_size * offloading_ratio

            local_cpu_cycles = total_cpu_cycles * (1 - offloading_ratio)
            bs_cpu_cycles = total_cpu_cycles * offloading_ratio

            local_computation_delay = local_cpu_cycles / UAV_COMPUTE_CAPACITY
            local_total_delay = user_to_uav_delay + local_computation_delay

            uav_to_bs_delay = self._calculate_uav_to_bs_delay(bs_task_size)
            bs_computation_delay = bs_cpu_cycles / BASE_STATION_COMPUTE_CAPACITY
            bs_total_delay = user_to_uav_delay + uav_to_bs_delay + bs_computation_delay

            total_delay = max(local_total_delay, bs_total_delay)

        return total_delay

    def _calculate_flight_energy(self, distance_moved, actual_speed=None, time_delta=1.0):
        if actual_speed is None:
            speed = distance_moved / time_delta
        else:
            speed = actual_speed

        power = P_INDUCED_COEFF * (
                np.sqrt(1 + (speed ** 4) / (4 * P_INDUCED_COEFF ** 2)) - (speed ** 2) / (2 * P_INDUCED_COEFF)) \
                + P_PROFILE_COEFF * (1 + 3 * (speed ** 2)) \
                + 0.5 * P_PARASITE_COEFF * AIR_DENSITY * speed ** 3
        return power * time_delta

    def _calculate_computation_energy(self, user_index, offloading_ratio):
        local_cpu_cycles = self.task_cpu_cycles[user_index] * (1 - offloading_ratio)
        working_frequency = UAV_COMPUTE_CAPACITY
        energy = EFFECTIVE_SWITCHED_CAPACITANCE * (working_frequency ** 2) * local_cpu_cycles
        return energy

    def update_task_generating_users(self, phase):
        self.current_phase = phase
        if phase == 1:
            self.task_generating_users = np.ones(NUM_USERS, dtype=bool)
        elif phase == 2:
            indices = np.random.choice(NUM_USERS, 9, replace=False)
            self.task_generating_users = np.zeros(NUM_USERS, dtype=bool)
            self.task_generating_users[indices] = True
        else:
            indices = np.random.choice(NUM_USERS, 8, replace=False)
            self.task_generating_users = np.zeros(NUM_USERS, dtype=bool)
            self.task_generating_users[indices] = True
        print(f"Phase {phase}: {sum(self.task_generating_users)} users are generating tasks")
        print(f"Task generating users: {np.where(self.task_generating_users)[0]}")

    def reset(self):
        self.uav_position = np.array([0.0, 0.0], dtype=float)
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)
        self.step_count = 0

        self.user_completion_delays.fill(0)
        self.user_offloading_delays.fill(0)
        self.user_computation_delays.fill(0)
        self.user_computation_energies.fill(0)
        self.user_offloading_ratios.fill(0)
        self.total_flight_energy = 0
        self.current_speed = DEFAULT_UAV_SPEED

        self.trajectory = [self.uav_position.copy()]
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])
        self.observation_history.clear()

        for key in self.episode_reward_breakdown:
            self.episode_reward_breakdown[key] = 0.0

        initial_state = self._get_state()
        for _ in range(SEQUENCE_LENGTH):
            self.observation_history.append(initial_state)
        return self._get_gru_state()

    def step(self, action):
        direction_raw = action[:2]

        direction_norm = np.linalg.norm(direction_raw)
        if direction_norm > 1e-6:
            movement_direction = direction_raw / direction_norm
        else:
            movement_direction = np.array([0.0, 0.0])

        speed = (action[2] + 1) / 2 * (MAX_UAV_SPEED - MIN_UAV_SPEED) + MIN_UAV_SPEED

        offloading_ratio = (action[3] + 1) / 2

        movement = movement_direction * speed

        prev_position = self.uav_position.copy()
        self.uav_position += movement
        self.uav_position = np.clip(self.uav_position, 0, AREA_SIZE)
        self.trajectory.append(self.uav_position.copy())

        actual_movement = self.uav_position - prev_position
        distance_moved = np.linalg.norm(actual_movement)

        if distance_moved > 0:
            actual_speed = speed if direction_norm > 1e-6 else 0
            flight_energy_step = self._calculate_flight_energy(distance_moved, actual_speed=actual_speed)
        else:
            flight_energy_step = self._calculate_flight_energy(0, actual_speed=0)

        self.total_flight_energy += flight_energy_step

        self.current_speed = speed if direction_norm > 1e-6 else 0

        new_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])

        newly_collected = 0
        for i in range(NUM_USERS):
            if self.task_generating_users[i] and not self.collected_tasks[i]:
                if new_distances[i] <= MAX_DISTANCE_COLLECT:
                    self.collected_tasks[i] = True
                    newly_collected += 1

                    self.user_offloading_ratios[i] = offloading_ratio
                    self.user_offloading_delays[i] = self._calculate_offloading_delay(i, new_distances[i])
                    self.user_completion_delays[i] = self._calculate_completion_delay(i, offloading_ratio)
                    self.user_computation_delays[i] = self.user_completion_delays[i] - self.user_offloading_delays[i]
                    self.user_computation_energies[i] = self._calculate_computation_energy(i, offloading_ratio)

        self.step_count += 1

        completed_indices = np.where(self.collected_tasks & self.task_generating_users)[0]

        if len(completed_indices) > 0:
            total_delay = np.sum(self.user_completion_delays[completed_indices])
            total_comp_energy = np.sum(self.user_computation_energies[completed_indices])
            avg_total_delay = np.mean(self.user_completion_delays[completed_indices])
            avg_offloading_delay = np.mean(self.user_offloading_delays[completed_indices])
            avg_computation_delay = np.mean(self.user_computation_delays[completed_indices])
        else:
            total_delay = 0.0
            total_comp_energy = 0.0
            avg_total_delay, avg_offloading_delay, avg_computation_delay = 0.0, 0.0, 0.0

        total_energy = self.total_flight_energy + total_comp_energy

        reward_info = self.calculate_reward(newly_collected, total_energy, total_delay, new_distances,
                                            self.last_distances)
        reward = reward_info['total_reward']
        for key in self.episode_reward_breakdown:
            self.episode_reward_breakdown[key] += reward_info[key]

        self.last_distances = new_distances

        total_tasks_to_collect = sum(self.task_generating_users)
        collected_required_tasks = sum(self.collected_tasks & self.task_generating_users)
        done = (self.step_count >= MAX_STEPS) or (collected_required_tasks == total_tasks_to_collect)

        self.observation_history.append(self._get_state())

        return self._get_gru_state(), reward, done, {
            "collected_required": collected_required_tasks,
            "total_required": total_tasks_to_collect,
            "energy": total_energy,
            "delay": avg_total_delay,
            "reward_breakdown": reward_info,
            "flight_energy": self.total_flight_energy,
            "comp_energy": total_comp_energy,
            "delay_breakdown": {
                "avg_offloading_delay": avg_offloading_delay,
                "avg_computation_delay": avg_computation_delay,
                "avg_total_delay": avg_total_delay,
                "total_delay": total_delay,
            },
            "episode_reward_breakdown": self.episode_reward_breakdown
        }

    def _get_state(self):
        state = np.zeros(2 + NUM_USERS * 4 + 1 + 1)
        state[0:2] = self.uav_position / AREA_SIZE

        for i in range(NUM_USERS):
            dist = np.linalg.norm(self.uav_position - self.user_positions[i])
            idx = 2 + i * 4
            state[idx] = dist / np.sqrt(2 * AREA_SIZE ** 2)
            state[idx + 1] = float(self.collected_tasks[i])
            state[idx + 2] = float(self.task_generating_users[i])
            state[idx + 3] = self.task_cpu_cycles[i] / TASK_CPU_CYCLES[1]

        state[-2] = self.step_count / MAX_STEPS

        bs_distance = np.linalg.norm(self.uav_position - BASE_STATION_POSITION)
        state[-1] = bs_distance / np.sqrt(2 * AREA_SIZE ** 2)

        return state

    def _get_gru_state(self):
        while len(self.observation_history) < SEQUENCE_LENGTH:
            self.observation_history.append(self._get_state())
        return np.array(list(self.observation_history))

    # 重新设计的奖励函数
    def calculate_reward(self, newly_collected, total_energy, total_delay, new_distances, old_distances):
        # 奖励常数，方便调整
        REWARD_COLLECTION = 200.0  # 每次成功收集的奖励
        REWARD_FINAL_COMPLETION = 500.0  # 完成所有任务的额外奖励
        PROXIMITY_SCALAR = 1.5  # 接近奖励的缩放因子
        STEP_PENALTY = -0.1  # 每一步的时间惩罚
        ENERGY_PENALTY_SCALAR = -0.0001  # 每一步能耗的惩罚因子

        # 1. 核心奖励：成功收集任务
        collection_reward = newly_collected * REWARD_COLLECTION

        # 2. 引导奖励：鼓励接近最近的未收集目标
        proximity_reward = 0.0
        uncollected_indices = np.where(self.task_generating_users & ~self.collected_tasks)[0]
        if len(uncollected_indices) > 0:
            # 找到最近的未收集用户
            closest_user_idx = uncollected_indices[np.argmin(new_distances[uncollected_indices])]

            # 计算到这个最近用户的距离变化
            distance_improvement = old_distances[closest_user_idx] - new_distances[closest_user_idx]
            proximity_reward = distance_improvement * PROXIMITY_SCALAR

        # 3. 成本惩罚：基于当前步的飞行能耗
        # 我们需要计算当前步的能耗，而不是使用累积的total_energy
        # 这里为了简化，我们假设能耗与移动距离成正比，你需要根据你的物理模型调整
        distance_moved = np.linalg.norm(
            self.uav_position - self.trajectory[-2] if len(self.trajectory) > 1 else np.array([0, 0]))
        # 实际的单步能耗可以在 step 函数中计算并返回
        # 这里用一个估算值代替
        current_step_flight_energy = self._calculate_flight_energy(distance_moved, self.current_speed)
        energy_penalty = current_step_flight_energy * ENERGY_PENALTY_SCALAR

        # 4. 最终奖励/惩罚：在回合结束时计算
        final_reward = 0.0
        collected_required = sum(self.collected_tasks & self.task_generating_users)
        total_required = sum(self.task_generating_users)
        done = (self.step_count >= MAX_STEPS) or (collected_required == total_required)

        if done:
            completion_ratio = collected_required / total_required if total_required > 0 else 0
            if completion_ratio == 1.0:
                # 完成所有任务，给予巨大奖励，并根据步数给予效率奖励
                efficiency_bonus = (MAX_STEPS - self.step_count) / MAX_STEPS * REWARD_FINAL_COMPLETION
                final_reward = REWARD_FINAL_COMPLETION + efficiency_bonus
            else:
                # 未完成，根据完成率给予惩罚
                final_reward = -200 * (1 - completion_ratio)

        # 5. 总奖励
        total_reward = collection_reward + proximity_reward + STEP_PENALTY + energy_penalty + final_reward

        # 返回 breakdown 以便调试
        return {
            'total_reward': total_reward,
            'collection_reward': collection_reward,
            'proximity_reward': proximity_reward,
            'completion_reward': final_reward,  # 在done时才有值
            'cost': energy_penalty,
            'step_penalty': STEP_PENALTY
        }

    def render(self, episode=0, clear_output=True):
        plt.figure(figsize=(10, 10))

        title = f"Episode {episode}, Step {self.step_count}\n"
        title += f"收集: {sum(self.collected_tasks & self.task_generating_users)}/{sum(self.task_generating_users)} 任务"
        if hasattr(self, 'current_speed'):
            title += f", 当前速度: {self.current_speed:.1f} m/s"
        plt.title(title)

        plt.scatter(BASE_STATION_POSITION[0], BASE_STATION_POSITION[1],
                    s=300, c='orange', marker='s', label='Base Station', edgecolors='black', linewidth=2)
        plt.annotate('BS\n(MEC)', BASE_STATION_POSITION,
                     textcoords="offset points", xytext=(0, -25),
                     ha='center', fontsize=10, fontweight='bold')

        for i, pos in enumerate(self.user_positions):
            if self.task_generating_users[i]:
                color = 'green' if self.collected_tasks[i] else 'red'
            else:
                color = 'gray'
            plt.scatter(pos[0], pos[1], s=100, c=color)

            if self.task_generating_users[i] and self.collected_tasks[i]:
                offload_ratio = self.user_offloading_ratios[i]
                task_info = f"{i + 1}\n{self.task_sizes[i] / 1e6:.1f}Mb\n{self.task_cpu_cycles[i] / 1e9:.1f}Gcy\nOffload:{offload_ratio:.2f}"
            else:
                task_info = f"{i + 1}\n{self.task_sizes[i] / 1e6:.1f}Mb\n{self.task_cpu_cycles[i] / 1e9:.1f}Gcy"

            plt.annotate(task_info, (pos[0], pos[1]), fontsize=8, ha='center', va='bottom')

        trajectory = np.array(self.trajectory)
        plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', alpha=0.5)
        plt.scatter(self.uav_position[0], self.uav_position[1], s=200, c='blue', marker='*')
        circle = plt.Circle((self.uav_position[0], self.uav_position[1]), MAX_DISTANCE_COLLECT, color='blue',
                            fill=False, alpha=0.3)
        plt.gca().add_patch(circle)

        plt.plot([self.uav_position[0], BASE_STATION_POSITION[0]],
                 [self.uav_position[1], BASE_STATION_POSITION[1]],
                 'orange', linestyle='--', alpha=0.5, linewidth=1)

        plt.xlim(0, AREA_SIZE)
        plt.ylim(0, AREA_SIZE)
        plt.grid(True)
        plt.legend()
        plt.savefig(f"results/step_{episode}_{self.step_count}.png")
        plt.close()


class GRUActor(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(GRUActor, self).__init__()
        self.state_dim = state_dim
        self.seq_len = SEQUENCE_LENGTH
        self.hidden_size = HIDDEN_SIZE
        self.gru = nn.GRU(input_size=state_dim, hidden_size=self.hidden_size, num_layers=1, batch_first=True)
        self.layer1 = nn.Linear(self.hidden_size, 256)
        self.layer2 = nn.Linear(256, 128)
        self.mean = nn.Linear(128, action_dim)
        self.log_std = nn.Linear(128, action_dim)
        self.ln1 = nn.LayerNorm(256)
        self.ln2 = nn.LayerNorm(128)
        self.hidden = None
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.orthogonal_(m.weight, gain=np.sqrt(2))  # 更好的初始化
                nn.init.constant_(m.bias, 0.0)
            elif isinstance(m, nn.GRU):
                for name, param in m.named_parameters():
                    if 'weight' in name:
                        nn.init.orthogonal_(param)
                    elif 'bias' in name:
                        nn.init.constant_(param, 0.0)

    def forward(self, state, reset_hidden=False):
        batch_size = state.size(0)
        if reset_hidden or self.hidden is None or self.hidden.size(1) != batch_size:
            self.reset_hidden(batch_size)

        gru_out, self.hidden = self.gru(state, self.hidden)
        x = gru_out[:, -1]
        x = self.ln1(torch.relu(self.layer1(x)))
        x = self.ln2(torch.relu(self.layer2(x)))
        mean = torch.tanh(self.mean(x))
        log_std = torch.clamp(self.log_std(x), -20, 2)
        return mean, log_std

    def reset_hidden(self, batch_size=1):
        self.hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)


class GRUCritic(nn.Module):
    def __init__(self, state_dim):
        super(GRUCritic, self).__init__()
        self.state_dim = state_dim
        self.seq_len = SEQUENCE_LENGTH
        self.hidden_size = HIDDEN_SIZE
        self.gru = nn.GRU(input_size=state_dim, hidden_size=self.hidden_size, num_layers=1, batch_first=True)
        self.layer1 = nn.Linear(self.hidden_size, 256)
        self.layer2 = nn.Linear(256, 128)
        self.value = nn.Linear(128, 1)
        self.ln1 = nn.LayerNorm(256)
        self.ln2 = nn.LayerNorm(128)
        self.hidden = None
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.orthogonal_(m.weight, gain=np.sqrt(2))  # 更好的初始化
                nn.init.constant_(m.bias, 0.0)
            elif isinstance(m, nn.GRU):
                for name, param in m.named_parameters():
                    if 'weight' in name:
                        nn.init.orthogonal_(param)
                    elif 'bias' in name:
                        nn.init.constant_(param, 0.0)

    def forward(self, state, reset_hidden=False):
        batch_size = state.size(0)
        if reset_hidden or self.hidden is None or self.hidden.size(1) != batch_size:
            self.reset_hidden(batch_size)

        gru_out, self.hidden = self.gru(state, self.hidden)
        x = gru_out[:, -1]
        x = self.ln1(torch.relu(self.layer1(x)))
        x = self.ln2(torch.relu(self.layer2(x)))
        value = self.value(x)
        return value

    def reset_hidden(self, batch_size=1):
        self.hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)


class PPOBuffer:
    def __init__(self, max_size=BUFFER_SIZE):
        self.max_size = max_size
        self.clear()

    def clear(self):
        self.states = []
        self.actions = []
        self.rewards = []
        self.values = []
        self.log_probs = []
        self.dones = []
        self.advantages = []
        self.returns = []

    def add(self, state, action, reward, value, log_prob, done):
        self.states.append(state)
        self.actions.append(action)
        self.rewards.append(reward)
        self.values.append(value)
        self.log_probs.append(log_prob)
        self.dones.append(done)

    def compute_advantages(self, last_value, gamma=GAMMA, lam=0.95):
        rewards = np.array(self.rewards + [last_value])
        values = np.array(self.values + [last_value])
        dones = np.array(self.dones + [0])

        advantages = np.zeros_like(rewards[:-1])
        last_advantage = 0

        for t in reversed(range(len(rewards) - 1)):
            delta = rewards[t] + gamma * values[t + 1] * (1 - dones[t]) - values[t]
            advantages[t] = last_advantage = delta + gamma * lam * (1 - dones[t]) * last_advantage

        self.advantages = advantages.tolist()
        self.returns = (advantages + np.array(self.values)).tolist()

    def get_batch(self):
        indices = np.arange(len(self.states))
        np.random.shuffle(indices)

        for start in range(0, len(indices), BATCH_SIZE):
            end = start + BATCH_SIZE
            batch_indices = indices[start:end]

            yield (
                np.array([self.states[i] for i in batch_indices]),
                np.array([self.actions[i] for i in batch_indices]),
                np.array([self.advantages[i] for i in batch_indices]),
                np.array([self.returns[i] for i in batch_indices]),
                np.array([self.log_probs[i] for i in batch_indices])
            )

    def __len__(self):
        return len(self.states)


class EWC:
    def __init__(self, model, fisher_sample_size=FISHER_SAMPLE_SIZE):
        self.model = model
        self.fisher_sample_size = fisher_sample_size
        self.importance = {}
        self.old_params = {}
        self.fisher_diagonal = {}

    def _calculate_fisher_info(self, buffer):
        fisher = {}
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                fisher[name] = torch.zeros_like(param).to(device)
        self.model.train()
        samples_count = min(self.fisher_sample_size, len(buffer))
        if samples_count <= 0: return fisher

        for i in range(samples_count):
            if i < len(buffer.states):
                state = torch.FloatTensor(buffer.states[i]).unsqueeze(0).to(device)
                self.model.zero_grad()
                if isinstance(self.model, GRUActor):
                    self.model.reset_hidden(1)
                    mean, log_std = self.model(state)
                    loss = mean.pow(2).mean()
                else:
                    self.model.reset_hidden(1)
                    value = self.model(state)
                    loss = value.pow(2).mean()
                loss.backward()
                for name, param in self.model.named_parameters():
                    if param.requires_grad and param.grad is not None:
                        fisher[name] += param.grad.pow(2) / samples_count
        return fisher

    def store_task_parameters(self, task_id, buffer):
        print(f"Storing parameters for task {task_id} and computing Fisher information matrix")
        self.old_params = {}
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                self.old_params[name] = param.data.clone()
        self.importance = self._calculate_fisher_info(buffer)
        print(f"Stored {len(self.old_params)} parameters and computed Fisher matrices")

    def calculate_ewc_loss(self, lam=EWC_LAMBDA):
        loss = 0
        if not self.old_params or not self.importance: return loss
        for name, param in self.model.named_parameters():
            if name in self.old_params and name in self.importance and param.requires_grad:
                loss += torch.sum(self.importance[name] * (param - self.old_params[name]).pow(2))
        return lam * loss


class PPO:
    def __init__(self, state_dim, action_dim):
        self.state_dim = state_dim
        self.action_dim = action_dim

        # 网络初始化
        self.actor = GRUActor(state_dim, action_dim).to(device)
        self.critic = GRUCritic(state_dim).to(device)
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=ACTOR_LR)
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=CRITIC_LR)

        # 学习率调度器
        self.lr_scheduler_actor = optim.lr_scheduler.StepLR(
            self.actor_optimizer, step_size=1000, gamma=0.95)
        self.lr_scheduler_critic = optim.lr_scheduler.StepLR(
            self.critic_optimizer, step_size=1000, gamma=0.95)

        # 缓冲区和任务管理
        self.buffer = PPOBuffer()
        self.current_task = 1

        # EWC持续学习
        self.ewc_actor = EWC(self.actor)
        self.ewc_critic = EWC(self.critic)

        # 添加奖励归一化器
        self.reward_normalizer = RewardNormalizer()

        # 改进探索策略
        self.exploration_noise = 0.3  # 增加初始探索
        self.noise_decay = 0.995  # 更慢的衰减
        self.min_noise = 0.02
        self.training_mode = True

        # 动作平滑
        self.action_smoothing = 0.1  # 动作平滑系数
        self.last_action = np.zeros(action_dim)

        # 训练统计
        self.episode_count = 0
        self.update_count = 0

    def select_action(self, state, deterministic=False):
        if len(state.shape) == 2:
            state = np.expand_dims(state, 0)
        state = torch.FloatTensor(state).to(device)

        with torch.no_grad():
            self.actor.reset_hidden(1)
            mean, log_std = self.actor(state)
            std = torch.exp(log_std)

            if deterministic:
                action = mean
                log_prob = 0
            else:
                dist = torch.distributions.Normal(mean, std)
                action = dist.sample()
                log_prob = dist.log_prob(action).sum(dim=-1)

                # 训练时添加探索噪声
                if self.training_mode and self.exploration_noise > self.min_noise:
                    noise = torch.randn_like(action) * self.exploration_noise
                    action = action + noise
                    self.exploration_noise = max(self.min_noise,
                                                 self.exploration_noise * self.noise_decay)

            # 动作平滑
            action_np = torch.clamp(action, -1, 1).cpu().numpy().flatten()
            if hasattr(self, 'last_action'):
                smoothed_action = (self.action_smoothing * self.last_action +
                                   (1 - self.action_smoothing) * action_np)
                self.last_action = smoothed_action
            else:
                smoothed_action = action_np
                self.last_action = action_np

            # 获取value
            self.critic.reset_hidden(1)
            value = self.critic(state)

        return smoothed_action, log_prob.cpu().item() if not deterministic else 0, value.cpu().item()

    def switch_task(self, task_id):
        print(f"\nSwitching to task {task_id}")

        # 存储前一个任务的EWC参数
        if self.current_task > 0 and len(self.buffer) > 0:
            self.ewc_actor.store_task_parameters(self.current_task, self.buffer)
            self.ewc_critic.store_task_parameters(self.current_task, self.buffer)

        # 清理和重置
        self.buffer.clear()
        self.current_task = task_id
        self.actor.reset_hidden()
        self.critic.reset_hidden()

        # 重置动作历史
        self.last_action = np.zeros(self.action_dim)

        # 为新任务增加探索
        self.exploration_noise = max(0.1, self.exploration_noise * 1.2)

        print(f"Reset for task {task_id}, exploration noise: {self.exploration_noise:.4f}")

    def train(self):
        if len(self.buffer) < BATCH_SIZE:
            return {}

        # 奖励归一化
        rewards = np.array(self.buffer.rewards)
        for reward in rewards:
            self.reward_normalizer.update(reward)

        # 归一化奖励
        normalized_rewards = []
        for reward in rewards:
            normalized_rewards.append(self.reward_normalizer.normalize(reward))
        self.buffer.rewards = normalized_rewards

        # 计算advantages
        last_state = torch.FloatTensor(self.buffer.states[-1]).unsqueeze(0).to(device)
        with torch.no_grad():
            self.critic.reset_hidden(1)
            last_value = self.critic(last_state).item()

        self.buffer.compute_advantages(last_value, gamma=GAMMA, lam=0.95)

        # 训练统计
        actor_losses = []
        critic_losses = []
        actor_ewc_losses = []
        critic_ewc_losses = []
        entropy_losses = []
        kl_divergences = []

        # 训练循环
        for epoch in range(PPO_EPOCHS):
            for batch in self.buffer.get_batch():
                states, actions, advantages, returns, old_log_probs = batch

                states = torch.FloatTensor(states).to(device)
                actions = torch.FloatTensor(actions).to(device)
                advantages = torch.FloatTensor(advantages).to(device)
                returns = torch.FloatTensor(returns).to(device)
                old_log_probs = torch.FloatTensor(old_log_probs).to(device)

                # 更保守的advantage归一化
                if len(advantages) > 1 and advantages.std() > 1e-6:
                    advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-6)

                # 重置hidden states
                batch_size = states.size(0)
                self.actor.reset_hidden(batch_size)
                self.critic.reset_hidden(batch_size)

                # Actor更新
                mean, log_std = self.actor(states)
                std = torch.exp(log_std)
                dist = torch.distributions.Normal(mean, std)
                new_log_probs = dist.log_prob(actions).sum(dim=-1)

                # PPO损失
                ratio = torch.exp(new_log_probs - old_log_probs)
                surr1 = ratio * advantages
                surr2 = torch.clamp(ratio, 1 - PPO_CLIP, 1 + PPO_CLIP) * advantages
                actor_loss = -torch.min(surr1, surr2).mean()

                # 自适应熵系数
                entropy = dist.entropy().sum(dim=-1).mean()
                entropy_coeff = 0.01 * max(0.1, 1 - self.update_count / 5000)
                entropy_loss = -entropy_coeff * entropy
                actor_loss += entropy_loss

                # EWC正则化
                actor_ewc_loss = 0
                if self.current_task > 1:
                    actor_ewc_loss = self.ewc_actor.calculate_ewc_loss()
                    actor_loss += actor_ewc_loss
                    actor_ewc_losses.append(actor_ewc_loss.item())

                # KL散度检查
                with torch.no_grad():
                    kl_div = torch.distributions.kl.kl_divergence(
                        torch.distributions.Normal(mean.detach(), std.detach()),
                        torch.distributions.Normal(mean, std)
                    ).sum(dim=-1).mean()
                    kl_divergences.append(kl_div.item())

                # Actor梯度更新
                self.actor_optimizer.zero_grad()
                actor_loss.backward()
                torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 0.5)
                self.actor_optimizer.step()

                # Critic更新
                values = self.critic(states).squeeze()
                critic_loss = nn.MSELoss()(values, returns)

                # EWC正则化
                critic_ewc_loss = 0
                if self.current_task > 1:
                    critic_ewc_loss = self.ewc_critic.calculate_ewc_loss()
                    critic_loss += critic_ewc_loss
                    critic_ewc_losses.append(critic_ewc_loss.item())

                # Critic梯度更新
                self.critic_optimizer.zero_grad()
                critic_loss.backward()
                torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 0.5)
                self.critic_optimizer.step()

                # 记录损失
                actor_losses.append(actor_loss.item())
                critic_losses.append(critic_loss.item())
                entropy_losses.append(entropy_loss.item())

            # Early stopping基于KL散度
            if kl_divergences and np.mean(kl_divergences) > 0.02:
                print(f"Early stopping at epoch {epoch} due to high KL divergence")
                break

        # 学习率调度
        self.lr_scheduler_actor.step()
        self.lr_scheduler_critic.step()

        # 清理buffer
        self.buffer.clear()
        self.update_count += 1

        # 返回训练统计
        return {
            "actor_loss": np.mean(actor_losses) if actor_losses else 0.0,
            "critic_loss": np.mean(critic_losses) if critic_losses else 0.0,
            "actor_ewc_loss": np.mean(actor_ewc_losses) if actor_ewc_losses else 0.0,
            "critic_ewc_loss": np.mean(critic_ewc_losses) if critic_ewc_losses else 0.0,
            "entropy_loss": np.mean(entropy_losses) if entropy_losses else 0.0,
            "kl_divergence": np.mean(kl_divergences) if kl_divergences else 0.0,
            "exploration_noise": self.exploration_noise,
            "learning_rate_actor": self.lr_scheduler_actor.get_last_lr()[0],
            "learning_rate_critic": self.lr_scheduler_critic.get_last_lr()[0]
        }

    def set_training_mode(self, training=True):
        """设置训练/测试模式"""
        self.training_mode = training
        self.actor.train(training)
        self.critic.train(training)

    def save_models(self, filepath_prefix):
        """保存模型"""
        torch.save({
            'actor_state_dict': self.actor.state_dict(),
            'critic_state_dict': self.critic.state_dict(),
            'actor_optimizer': self.actor_optimizer.state_dict(),
            'critic_optimizer': self.critic_optimizer.state_dict(),
            'exploration_noise': self.exploration_noise,
            'episode_count': self.episode_count,
            'update_count': self.update_count
        }, f"{filepath_prefix}.pt")

    def load_models(self, filepath):
        """加载模型"""
        checkpoint = torch.load(filepath)
        self.actor.load_state_dict(checkpoint['actor_state_dict'])
        self.critic.load_state_dict(checkpoint['critic_state_dict'])
        self.actor_optimizer.load_state_dict(checkpoint['actor_optimizer'])
        self.critic_optimizer.load_state_dict(checkpoint['critic_optimizer'])
        self.exploration_noise = checkpoint.get('exploration_noise', 0.1)
        self.episode_count = checkpoint.get('episode_count', 0)
        self.update_count = checkpoint.get('update_count', 0)


def train():
    os.makedirs("results", exist_ok=True)
    env = Environment()

    state_dim = 2 + NUM_USERS * 4 + 1 + 1
    action_dim = 4

    agent = PPO(state_dim, action_dim)
    total_episodes = 1500
    episodes_per_task = 500
    eval_freq = 200  # 降低评估频率以减少可视化开销

    rewards_history = []
    collection_history = []
    energy_history = []
    delay_history = []
    best_reward = -float('inf')
    best_collection = 0
    losses = {"critic": [], "actor": []}

    # 用于跟踪训练统计
    running_reward = 0.0
    running_collection = 0.0
    smoothing_factor = 0.99

    start_time = time.time()
    for phase in range(1, 4):
        env.update_task_generating_users(phase)
        agent.switch_task(phase)

        for episode in range(1, episodes_per_task + 1):
            global_episode = (phase - 1) * episodes_per_task + episode
            state = env.reset()
            agent.actor.reset_hidden()
            agent.critic.reset_hidden()
            episode_reward = 0
            last_collection = 0

            for step in range(1, MAX_STEPS + 1):
                action, log_prob, value = agent.select_action(state)
                next_state, reward, done, info = env.step(action)

                agent.buffer.add(state, action, reward, value, log_prob, done)

                state = next_state
                episode_reward += reward
                last_collection = info["collected_required"]

                if done:
                    break

            # 训练
            if len(agent.buffer) >= BATCH_SIZE or done:
                loss_info = agent.train()
                if loss_info:
                    losses["critic"].append(loss_info["critic_loss"])
                    losses["actor"].append(loss_info["actor_loss"])

            # 记录历史
            rewards_history.append(episode_reward)
            collection_history.append(last_collection)
            energy_history.append(info["energy"])
            delay_history.append(info["delay"])

            # 更新运行统计
            if global_episode == 1:
                running_reward = episode_reward
                running_collection = last_collection
            else:
                running_reward = smoothing_factor * running_reward + (1 - smoothing_factor) * episode_reward
                running_collection = smoothing_factor * running_collection + (1 - smoothing_factor) * last_collection

            # 保存最佳模型
            current_required = info["total_required"]
            collection_ratio = last_collection / current_required if current_required > 0 else 0
            if collection_ratio > best_collection or (
                    collection_ratio == best_collection and episode_reward > best_reward):
                best_reward = episode_reward
                best_collection = collection_ratio
                torch.save(agent.actor.state_dict(), f"results/best_actor_phase_{phase}.pth")

            # 计算统计信息
            elapsed_time = time.time() - start_time
            collected_required = info.get("collected_required", 0)
            total_required = info.get("total_required", 1)

            avg_offloading_ratio = 0.0
            completed_tasks_with_offload = []
            for i in range(NUM_USERS):
                if env.task_generating_users[i] and env.collected_tasks[i]:
                    completed_tasks_with_offload.append(env.user_offloading_ratios[i])

            if completed_tasks_with_offload:
                avg_offloading_ratio = np.mean(completed_tasks_with_offload)

            # 获取损失信息
            avg_actor_loss = loss_info.get("actor_loss", 0.0) if 'loss_info' in locals() and loss_info else 0.0
            avg_critic_loss = loss_info.get("critic_loss", 0.0) if 'loss_info' in locals() and loss_info else 0.0
            avg_actor_ewc_loss = loss_info.get("actor_ewc_loss", 0.0) if 'loss_info' in locals() and loss_info else 0.0
            avg_critic_ewc_loss = loss_info.get("critic_ewc_loss",
                                                0.0) if 'loss_info' in locals() and loss_info else 0.0

            # 构建奖励分解字符串
            rb = info["episode_reward_breakdown"]
            reward_str = (f" Col:{rb['collection_reward']:.1f} "
                          f"Pro:{rb['proximity_reward']:.1f} "
                          f"Comp:{rb['completion_reward']:.1f} "
                          f"Cost:{rb['cost']:.1f} "
                          f"Step:{rb['step_penalty']:.1f}")

            # 构建能耗字符串
            energy_str = f"[E(Flight:{info['flight_energy']:.1f} Comp:{info['comp_energy']:.1f})]"

            # 构建延迟字符串
            db = info['delay_breakdown']
            delay_str = f"[D(Tot:{db['total_delay']:.2f}s AvgComp:{db['avg_computation_delay']:.3f}s AvgOff:{db['avg_offloading_delay']:.3f}s)]"

            # 打印训练日志
            print(
                f"P:{phase} Ep {episode:3d}/{episodes_per_task} "
                f"Tasks:{collected_required:2d}/{total_required:2d} "
                f"Steps:{env.step_count:3d} "
                f"Speed:{env.current_speed:.1f} m/s "
                f"AvgOffload: {avg_offloading_ratio:.2f} "
                f"Loss(A/C/EWC_A/EWC_C) {avg_actor_loss:.3f}/{avg_critic_loss:.3f}/{avg_actor_ewc_loss:.3f}/{avg_critic_ewc_loss:.3f} | "
                f"Total Rwd: {episode_reward:.2f} (Running: {running_reward:.2f}) "
                f"[{reward_str}] | "
                f"Total E: {info.get('energy', 0):.1f} "
                f"{energy_str} | "
                f"Avg D: {info.get('delay', 0):.3f}s "
                f"{delay_str} | "
                f"Noise: {agent.exploration_noise:.3f} | "
                f"Time: {elapsed_time:.1f}s"
            )

            # 定期评估和保存
            if global_episode % eval_freq == 0 or global_episode == total_episodes:
                # 生成轨迹图
                if global_episode % (eval_freq * 2) == 0:  # 减少轨迹图生成频率
                    env.render(global_episode)

                # 生成训练曲线
                plt.figure(figsize=(15, 5))

                plt.subplot(1, 3, 1)
                plt.plot(rewards_history, alpha=0.3, color='blue', label='Raw')
                if len(rewards_history) >= 20:
                    smoothed = [np.mean(rewards_history[max(0, i - 19):i + 1]) for i in range(len(rewards_history))]
                    plt.plot(smoothed, color='red', label='Smoothed (20)')
                plt.axvline(x=episodes_per_task, color='green', linestyle='--', label='Phase 1->2')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--', label='Phase 2->3')
                plt.title("Reward")
                plt.xlabel("Episode")
                plt.ylabel("Reward")
                plt.legend()
                plt.grid(True)

                plt.subplot(1, 3, 2)
                plt.plot(collection_history, alpha=0.7, color='green')
                if len(collection_history) >= 20:
                    smoothed_collection = [np.mean(collection_history[max(0, i - 19):i + 1]) for i in
                                           range(len(collection_history))]
                    plt.plot(smoothed_collection, color='darkgreen', label='Smoothed (20)')
                plt.axvline(x=episodes_per_task, color='green', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.title("Collected Tasks")
                plt.xlabel("Episode")
                plt.ylabel("Number of Tasks")
                plt.legend()
                plt.grid(True)

                plt.subplot(1, 3, 3)
                if len(losses["actor"]) > 0:
                    plt.plot(losses["actor"], alpha=0.7, color='orange', label='Actor Loss')
                    plt.plot(losses["critic"], alpha=0.7, color='red', label='Critic Loss')
                    plt.title("Training Losses")
                    plt.xlabel("Update")
                    plt.ylabel("Loss")
                    plt.legend()
                    plt.grid(True)
                    plt.yscale('log')

                plt.tight_layout()
                plt.savefig(f"results/training_curves_episode_{global_episode}.png", dpi=150)
                plt.close()

                # 保存检查点
                torch.save({
                    'actor_state_dict': agent.actor.state_dict(),
                    'critic_state_dict': agent.critic.state_dict(),
                    'actor_optimizer': agent.actor_optimizer.state_dict(),
                    'critic_optimizer': agent.critic_optimizer.state_dict(),
                    'episode': global_episode,
                    'phase': phase,
                    'rewards_history': rewards_history,
                    'collection_history': collection_history,
                    'energy_history': energy_history,
                    'delay_history': delay_history,
                    'best_reward': best_reward,
                    'best_collection': best_collection,
                    'running_reward': running_reward,
                    'running_collection': running_collection
                }, f"results/checkpoint_episode_{global_episode}.pt")

        # 保存阶段模型
        torch.save(agent.actor.state_dict(), f"results/actor_phase_{phase}.pth")
        torch.save(agent.critic.state_dict(), f"results/critic_phase_{phase}.pth")

        print(f"\n=== 阶段 {phase} 完成 ===")
        print(f"最佳收集率: {best_collection * 100:.1f}%")
        print(f"最佳奖励: {best_reward:.2f}")
        print(f"运行平均奖励: {running_reward:.2f}")
        print(f"运行平均收集数: {running_collection:.2f}")

    total_time = time.time() - start_time
    print(f"\n训练完成! 总时间: {total_time:.1f}秒")
    print(f"最终结果: {best_collection * 100:.1f}% 任务完成率, 奖励: {best_reward:.2f}")
    return agent, env


def test_and_visualize(agent, env, model_path="results/actor_phase_3.pth", phase=3):
    torch.manual_seed(SEED)
    torch.cuda.manual_seed(SEED)
    np.random.seed(SEED)
    random.seed(SEED)

    agent.actor.load_state_dict(torch.load(model_path))
    agent.actor.eval()
    agent.set_training_mode(False)

    env.update_task_generating_users(phase)

    # 生成新的随机环境
    env.user_positions = np.random.uniform(0, AREA_SIZE, size=(NUM_USERS, 2))
    env.task_cpu_cycles = np.random.uniform(TASK_CPU_CYCLES[0], TASK_CPU_CYCLES[1], size=NUM_USERS)
    env.task_sizes = np.random.uniform(TASK_SIZE_BITS[0], TASK_SIZE_BITS[1], size=NUM_USERS)

    state = env.reset()

    total_reward = 0
    step_rewards = []
    trajectory = [env.uav_position.copy()]
    collection_times = np.zeros(NUM_USERS)
    collection_order = []

    print(f"\n开始测试阶段 {phase}...")
    for step in range(1, MAX_STEPS + 1):
        action, _, _ = agent.select_action(state, deterministic=True)

        trajectory.append(env.uav_position.copy())
        collected_before = env.collected_tasks.copy()

        next_state, reward, done, info = env.step(action)

        # 记录任务收集时间
        for i in range(NUM_USERS):
            if env.task_generating_users[i] and env.collected_tasks[i] and not collected_before[i]:
                collection_times[i] = step
                collection_order.append(i)

        total_reward += reward
        step_rewards.append(reward)
        state = next_state

        # 每隔一定步数显示进度
        if step % 50 == 0:
            collected_required = sum(env.collected_tasks & env.task_generating_users)
            total_required = sum(env.task_generating_users)
            print(f"步数 {step}: 收集 {collected_required}/{total_required} 任务, 当前奖励: {reward:.2f}")

        collected_required = sum(env.collected_tasks & env.task_generating_users)
        total_required = sum(env.task_generating_users)
        if collected_required == total_required:
            print(f"提前完成所有任务在步数 {step}")
            break

        if done:
            break

    # 生成详细轨迹图
    trajectory = np.array(trajectory)
    plt.figure(figsize=(15, 12))

    # 绘制基站
    plt.scatter(BASE_STATION_POSITION[0], BASE_STATION_POSITION[1],
                s=300, c='orange', marker='s', label='基站 (MEC)',
                edgecolors='black', linewidth=2, zorder=5)

    # 绘制用户
    collected_users = []
    uncollected_users = []
    inactive_users = []

    for i, pos in enumerate(env.user_positions):
        if env.task_generating_users[i]:
            if env.collected_tasks[i]:
                collected_users.append(i)
                plt.scatter(pos[0], pos[1], s=200, c='green', marker='o',
                            edgecolors='darkgreen', linewidth=2, zorder=4)
                # 添加收集顺序标记
                order_idx = collection_order.index(i) + 1 if i in collection_order else '?'
                plt.annotate(f'#{order_idx}', (pos[0], pos[1]),
                             xytext=(5, 15), textcoords='offset points',
                             fontsize=10, fontweight='bold', color='white',
                             bbox=dict(boxstyle="round,pad=0.3", facecolor='darkgreen', alpha=0.7))
            else:
                uncollected_users.append(i)
                plt.scatter(pos[0], pos[1], s=200, c='red', marker='o',
                            edgecolors='darkred', linewidth=2, zorder=4)
        else:
            inactive_users.append(i)
            plt.scatter(pos[0], pos[1], s=150, c='lightgray', marker='o',
                        edgecolors='gray', linewidth=1, alpha=0.5, zorder=3)

    # 绘制轨迹
    if len(trajectory) > 1:
        # 使用颜色渐变表示时间
        for i in range(len(trajectory) - 1):
            alpha = 0.3 + 0.7 * (i / len(trajectory))  # 渐变透明度
            plt.plot(trajectory[i:i + 2, 0], trajectory[i:i + 2, 1],
                     'b-', alpha=alpha, linewidth=2)

    # 绘制UAV最终位置
    plt.scatter(env.uav_position[0], env.uav_position[1],
                s=300, c='blue', marker='*', label='UAV',
                edgecolors='darkblue', linewidth=2, zorder=6)

    # 绘制收集范围
    circle = plt.Circle((env.uav_position[0], env.uav_position[1]),
                        MAX_DISTANCE_COLLECT, color='blue', fill=False,
                        alpha=0.5, linestyle='--', linewidth=2)
    plt.gca().add_patch(circle)

    # 用户信息标注
    for i, pos in enumerate(env.user_positions):
        if env.task_generating_users[i]:
            task_info = (f"U{i + 1}\n"
                         f"大小:{env.task_sizes[i] / 1e6:.1f}Mb\n"
                         f"计算:{env.task_cpu_cycles[i] / 1e9:.1f}Gcy")

            if env.collected_tasks[i]:
                offload_ratio = env.user_offloading_ratios[i]
                task_info += f"\n卸载率:{offload_ratio:.2f}"
                task_info += f"\n完成延迟:{env.user_completion_delays[i]:.3f}s"
                color = 'darkgreen'
            else:
                color = 'darkred'

            plt.annotate(task_info, (pos[0], pos[1]),
                         xytext=(0, -40), textcoords='offset points',
                         fontsize=8, ha='center', va='top',
                         bbox=dict(boxstyle="round,pad=0.3", facecolor='white',
                                   edgecolor=color, alpha=0.8))

    # 设置图形属性
    plt.xlim(-10, AREA_SIZE + 10)
    plt.ylim(-10, AREA_SIZE + 10)
    plt.grid(True, alpha=0.3)
    plt.xlabel('X 坐标 (m)', fontsize=12)
    plt.ylabel('Y 坐标 (m)', fontsize=12)

    # 统计信息
    collected_required = sum(env.collected_tasks & env.task_generating_users)
    total_required = sum(env.task_generating_users)
    completion_rate = collected_required / total_required if total_required > 0 else 0

    title = (f"测试结果 - 阶段 {phase}\n"
             f"完成任务: {collected_required}/{total_required} ({completion_rate * 100:.1f}%)\n"
             f"总步数: {env.step_count}, 总奖励: {total_reward:.2f}\n"
             f"总能耗: {info['energy']:.1f}J, 平均延迟: {info['delay']:.3f}s")

    plt.title(title, fontsize=14, pad=20)

    # 图例
    legend_elements = [
        plt.Line2D([0], [0], marker='s', color='w', markerfacecolor='orange',
                   markersize=12, label='基站 (MEC)', markeredgecolor='black'),
        plt.Line2D([0], [0], marker='*', color='w', markerfacecolor='blue',
                   markersize=15, label='UAV', markeredgecolor='darkblue'),
        plt.Line2D([0], [0], marker='o', color='w', markerfacecolor='green',
                   markersize=10, label='已收集任务', markeredgecolor='darkgreen'),
        plt.Line2D([0], [0], marker='o', color='w', markerfacecolor='red',
                   markersize=10, label='未收集任务', markeredgecolor='darkred'),
        plt.Line2D([0], [0], marker='o', color='w', markerfacecolor='lightgray',
                   markersize=8, label='非活跃用户', markeredgecolor='gray'),
        plt.Line2D([0], [0], color='blue', linestyle='-', alpha=0.7,
                   linewidth=2, label='UAV轨迹'),
        plt.Line2D([0], [0], color='blue', linestyle='--', alpha=0.5,
                   linewidth=2, label='收集范围')
    ]

    plt.legend(handles=legend_elements, loc='upper left', bbox_to_anchor=(1.02, 1))
    plt.tight_layout()
    plt.savefig(f"results/test_trajectory_phase_{phase}.png", dpi=300, bbox_inches='tight')
    plt.show()

    # 生成性能分析图
    plt.figure(figsize=(15, 10))

    # 奖励变化图
    plt.subplot(2, 3, 1)
    plt.plot(step_rewards, 'b-', alpha=0.7)
    plt.title('步奖励变化')
    plt.xlabel('步数')
    plt.ylabel('奖励')
    plt.grid(True, alpha=0.3)

    # 任务收集时间图
    plt.subplot(2, 3, 2)
    if len(collection_order) > 0:
        collection_steps = [collection_times[i] for i in collection_order]
        plt.bar(range(len(collection_order)), collection_steps,
                color=['green' if i < len(collection_order) else 'red'
                       for i in range(len(collection_order))])
        plt.title('任务收集时间')
        plt.xlabel('收集顺序')
        plt.ylabel('步数')
        plt.xticks(range(len(collection_order)),
                   [f'U{i + 1}' for i in collection_order], rotation=45)
    plt.grid(True, alpha=0.3)

    # 能耗分析
    plt.subplot(2, 3, 3)
    flight_energy = info['flight_energy']
    comp_energy = info['comp_energy']
    energy_data = [flight_energy, comp_energy]
    energy_labels = ['飞行能耗', '计算能耗']
    colors = ['skyblue', 'lightcoral']
    plt.pie(energy_data, labels=energy_labels, colors=colors, autopct='%1.1f%%')
    plt.title(f'能耗分布 (总计: {info["energy"]:.1f}J)')

    # 延迟分析
    plt.subplot(2, 3, 4)
    if collected_required > 0:
        completed_indices = np.where(env.collected_tasks & env.task_generating_users)[0]
        offloading_delays = env.user_offloading_delays[completed_indices]
        computation_delays = env.user_computation_delays[completed_indices]

        plt.bar(range(len(completed_indices)), offloading_delays,
                label='卸载延迟', alpha=0.8, color='orange')
        plt.bar(range(len(completed_indices)), computation_delays,
                bottom=offloading_delays, label='计算延迟', alpha=0.8, color='purple')
        plt.title('各任务延迟分解')
        plt.xlabel('已完成任务')
        plt.ylabel('延迟 (s)')
        plt.xticks(range(len(completed_indices)),
                   [f'U{i + 1}' for i in completed_indices], rotation=45)
        plt.legend()
    plt.grid(True, alpha=0.3)

    # 卸载比率分析
    plt.subplot(2, 3, 5)
    if collected_required > 0:
        completed_indices = np.where(env.collected_tasks & env.task_generating_users)[0]
        offload_ratios = env.user_offloading_ratios[completed_indices]
        plt.hist(offload_ratios, bins=10, alpha=0.7, color='green', edgecolor='black')
        plt.title(f'卸载比率分布\n平均: {np.mean(offload_ratios):.2f}')
        plt.xlabel('卸载比率')
        plt.ylabel('任务数量')
    plt.grid(True, alpha=0.3)

    # 距离分析
    plt.subplot(2, 3, 6)
    final_distances = [np.linalg.norm(env.uav_position - pos)
                       for pos in env.user_positions]
    user_categories = ['已收集', '未收集', '非活跃']
    category_distances = [
        [final_distances[i] for i in collected_users],
        [final_distances[i] for i in uncollected_users],
        [final_distances[i] for i in inactive_users]
    ]

    positions = [1, 2, 3]
    box_plot = plt.boxplot([d for d in category_distances if d],
                           positions=positions[:len([d for d in category_distances if d])],
                           labels=[cat for cat, d in zip(user_categories, category_distances) if d])
    plt.title('UAV与用户最终距离分布')
    plt.ylabel('距离 (m)')
    plt.grid(True, alpha=0.3)

    plt.tight_layout()
    plt.savefig(f"results/test_analysis_phase_{phase}.png", dpi=300, bbox_inches='tight')
    plt.show()

    # 打印详细结果
    print(f"\n=== 测试结果详情 ===")
    print(f"阶段: {phase}")
    print(f"完成任务: {collected_required}/{total_required} ({completion_rate * 100:.1f}%)")
    print(f"总步数: {env.step_count}")
    print(f"总奖励: {total_reward:.2f}")
    print(f"总能耗: {info['energy']:.1f}J")
    print(f"  - 飞行能耗: {info['flight_energy']:.1f}J ({info['flight_energy'] / info['energy'] * 100:.1f}%)")
    print(f"  - 计算能耗: {info['comp_energy']:.1f}J ({info['comp_energy'] / info['energy'] * 100:.1f}%)")
    print(f"平均延迟: {info['delay']:.3f}s")

    if collected_required > 0:
        completed_indices = np.where(env.collected_tasks & env.task_generating_users)[0]
        avg_offload_ratio = np.mean(env.user_offloading_ratios[completed_indices])
        print(f"平均卸载比率: {avg_offload_ratio:.2f}")

        print(f"\n收集顺序:")
        for idx, user_id in enumerate(collection_order):
            print(f"  {idx + 1}. 用户{user_id + 1} (步数 {collection_times[user_id]})")

    return {
        'completion_rate': completion_rate,
        'total_reward': total_reward,
        'total_energy': info['energy'],
        'total_steps': env.step_count,
        'average_delay': info['delay'],
        'trajectory': trajectory,
        'collection_order': collection_order
    }


def compare_phases():
    """比较不同阶段的性能"""
    print("\n=== 阶段性能比较 ===")

    # 创建智能体和环境
    state_dim = 2 + NUM_USERS * 4 + 1 + 1
    action_dim = 4
    agent = PPO(state_dim, action_dim)
    env = Environment()

    results = {}

    # 测试每个阶段
    for phase in range(1, 4):
        print(f"\n测试阶段 {phase}...")
        model_path = f"results/actor_phase_{phase}.pth"

        if os.path.exists(model_path):
            result = test_and_visualize(agent, env, model_path, phase)
            results[phase] = result
        else:
            print(f"找不到阶段 {phase} 的模型文件: {model_path}")

    # 生成比较图
    if results:
        plt.figure(figsize=(15, 10))

        phases = list(results.keys())
        completion_rates = [results[p]['completion_rate'] * 100 for p in phases]
        total_rewards = [results[p]['total_reward'] for p in phases]
        total_energies = [results[p]['total_energy'] for p in phases]
        total_steps = [results[p]['total_steps'] for p in phases]
        avg_delays = [results[p]['average_delay'] for p in phases]

        # 完成率比较
        plt.subplot(2, 3, 1)
        bars = plt.bar(phases, completion_rates, color=['green', 'orange', 'red'])
        plt.title('任务完成率比较')
        plt.xlabel('阶段')
        plt.ylabel('完成率 (%)')
        plt.ylim(0, 100)
        for i, (phase, rate) in enumerate(zip(phases, completion_rates)):
            plt.text(phase, rate + 1, f'{rate:.1f}%', ha='center', va='bottom')
        plt.grid(True, alpha=0.3)

        # 奖励比较
        plt.subplot(2, 3, 2)
        plt.bar(phases, total_rewards, color='blue', alpha=0.7)
        plt.title('总奖励比较')
        plt.xlabel('阶段')
        plt.ylabel('总奖励')
        for i, (phase, reward) in enumerate(zip(phases, total_rewards)):
            plt.text(phase, reward + max(total_rewards) * 0.01, f'{reward:.1f}',
                     ha='center', va='bottom')
        plt.grid(True, alpha=0.3)

        # 能耗比较
        plt.subplot(2, 3, 3)
        plt.bar(phases, total_energies, color='red', alpha=0.7)
        plt.title('总能耗比较')
        plt.xlabel('阶段')
        plt.ylabel('总能耗 (J)')
        for i, (phase, energy) in enumerate(zip(phases, total_energies)):
            plt.text(phase, energy + max(total_energies) * 0.01, f'{energy:.0f}J',
                     ha='center', va='bottom')
        plt.grid(True, alpha=0.3)

        # 步数比较
        plt.subplot(2, 3, 4)
        plt.bar(phases, total_steps, color='purple', alpha=0.7)
        plt.title('总步数比较')
        plt.xlabel('阶段')
        plt.ylabel('总步数')
        for i, (phase, steps) in enumerate(zip(phases, total_steps)):
            plt.text(phase, steps + max(total_steps) * 0.01, f'{steps}',
                     ha='center', va='bottom')
        plt.grid(True, alpha=0.3)

        # 延迟比较
        plt.subplot(2, 3, 5)
        plt.bar(phases, avg_delays, color='orange', alpha=0.7)
        plt.title('平均延迟比较')
        plt.xlabel('阶段')
        plt.ylabel('平均延迟 (s)')
        for i, (phase, delay) in enumerate(zip(phases, avg_delays)):
            plt.text(phase, delay + max(avg_delays) * 0.01, f'{delay:.3f}s',
                     ha='center', va='bottom')
        plt.grid(True, alpha=0.3)

        # 效率分析 (完成率/步数)
        plt.subplot(2, 3, 6)
        efficiencies = [cr / ts * 1000 for cr, ts in zip(completion_rates, total_steps)]
        plt.bar(phases, efficiencies, color='green', alpha=0.7)
        plt.title('任务收集效率')
        plt.xlabel('阶段')
        plt.ylabel('完成率/千步 (%/1000步)')
        for i, (phase, eff) in enumerate(zip(phases, efficiencies)):
            plt.text(phase, eff + max(efficiencies) * 0.01, f'{eff:.2f}',
                     ha='center', va='bottom')
        plt.grid(True, alpha=0.3)

        plt.tight_layout()
        plt.savefig("results/phases_comparison.png", dpi=300, bbox_inches='tight')
        plt.show()

        # 打印比较表格
        print(f"\n{'阶段':<6} {'完成率':<8} {'奖励':<8} {'能耗(J)':<8} {'步数':<6} {'延迟(s)':<8} {'效率':<8}")
        print("-" * 60)
        for phase in phases:
            r = results[phase]
            eff = r['completion_rate'] * 100 / r['total_steps'] * 1000
            print(f"{phase:<6} {r['completion_rate'] * 100:<8.1f} {r['total_reward']:<8.1f} "
                  f"{r['total_energy']:<8.0f} {r['total_steps']:<6} {r['average_delay']:<8.3f} {eff:<8.2f}")


def main():
    """主程序"""
    import argparse

    parser = argparse.ArgumentParser(description='UAV Task Collection with PPO')
    parser.add_argument('--mode', choices=['train', 'test', 'compare'],
                        default='train', help='运行模式')
    parser.add_argument('--phase', type=int, choices=[1, 2, 3],
                        default=3, help='测试阶段')
    parser.add_argument('--model', type=str,
                        help='模型文件路径')

    args = parser.parse_args()

    if args.mode == 'train':
        print("开始训练...")
        agent, env = train()

        # 训练完成后进行测试
        print("\n训练完成，开始最终测试...")
        for phase in range(1, 4):
            model_path = f"results/actor_phase_{phase}.pth"
            if os.path.exists(model_path):
                print(f"\n测试阶段 {phase}...")
                test_and_visualize(agent, env, model_path, phase)

        # 生成阶段比较
        compare_phases()

    elif args.mode == 'test':
        # 创建智能体和环境
        state_dim = 2 + NUM_USERS * 4 + 1 + 1
        action_dim = 4
        agent = PPO(state_dim, action_dim)
        env = Environment()

        model_path = args.model if args.model else f"results/actor_phase_{args.phase}.pth"

        if os.path.exists(model_path):
            print(f"测试阶段 {args.phase} 使用模型: {model_path}")
            test_and_visualize(agent, env, model_path, args.phase)
        else:
            print(f"找不到模型文件: {model_path}")

    elif args.mode == 'compare':
        compare_phases()


if __name__ == "__main__":
    main()
