import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import random
from collections import deque
import os
import time
import torch.nn.functional as F

SEED = 45
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

AREA_SIZE = 200
NUM_USERS = 12
MAX_STEPS = 300
MAX_DISTANCE_COLLECT = 15

UAV_HEIGHT = 30.0
UAV_COMPUTE_CAPACITY = 1e10
MIN_UAV_SPEED = 2.0
MAX_UAV_SPEED = 20.0
DEFAULT_UAV_SPEED = 10.0

BASE_STATION_POSITION = np.array([0.0, 0.0])
BASE_STATION_COMPUTE_CAPACITY = 2e11
BASE_STATION_HEIGHT = 10.0

ACTOR_LR = 3e-4
CRITIC_LR = 3e-4
GAMMA = 0.99
TAU = 0.005
BUFFER_SIZE = 200000
BATCH_SIZE = 125
EXPLORATION_NOISE_START = 0.35
EXPLORATION_NOISE_END = 0.05
REWARD_SCALE = 0.1

EWC_LAMBDA = 0.0
FISHER_SAMPLE_SIZE = 0

SEQUENCE_LENGTH = 20
HIDDEN_SIZE = 128

BANDWIDTH = 1e6
USER_TRANSMIT_POWER = 0.1
CHANNEL_GAIN_REF_DB = 30.0
CHANNEL_GAIN_REF_LINEAR = 10 ** (CHANNEL_GAIN_REF_DB / 10)
PATH_LOSS_EXPONENT = 2.5
BOLTZMANN_CONSTANT = 1.38e-23
TEMPERATURE_KELVIN = 290
NOISE_POWER = BOLTZMANN_CONSTANT * TEMPERATURE_KELVIN * BANDWIDTH
RICE_FACTOR = 5

TASK_SIZE_BITS = [1e6, 2e6]
TASK_CPU_CYCLES = [5e8, 15e8]

UAV_WEIGHT_KG = 2.0
GRAVITY = 9.81
AIR_DENSITY = 1.225
ROTOR_RADIUS = 0.4
NUM_ROTORS = 4
P_INDUCED_COEFF = UAV_WEIGHT_KG * GRAVITY * np.sqrt(
    UAV_WEIGHT_KG * GRAVITY / (2 * AIR_DENSITY * np.pi * ROTOR_RADIUS ** 2))
P_PROFILE_COEFF = 0.012
P_PARASITE_COEFF = 0.6
EFFECTIVE_SWITCHED_CAPACITANCE = 1e-27

DELAY_WEIGHT = 0.7
ENERGY_WEIGHT = 0.3
DELAY_SCALE = 100.0
ENERGY_SCALE = 0.001

class Environment:
    def __init__(self):
        self.user_positions = np.random.uniform(0, AREA_SIZE, size=(NUM_USERS, 2))
        self.task_cpu_cycles = np.random.uniform(TASK_CPU_CYCLES[0], TASK_CPU_CYCLES[1], size=NUM_USERS)
        self.task_sizes = np.random.uniform(TASK_SIZE_BITS[0], TASK_SIZE_BITS[1], size=NUM_USERS)
        self.task_generating_users = np.ones(NUM_USERS, dtype=bool)
        self.uav_position = np.array([0.0, 0.0], dtype=float)
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)
        self.step_count = 0
        self.user_completion_delays = np.zeros(NUM_USERS)
        self.user_offloading_delays = np.zeros(NUM_USERS)
        self.user_computation_delays = np.zeros(NUM_USERS)
        self.user_computation_energies = np.zeros(NUM_USERS)
        self.total_flight_energy = 0
        self.user_offloading_ratios = np.zeros(NUM_USERS)
        self.trajectory = [self.uav_position.copy()]
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])
        self.observation_history = deque(maxlen=SEQUENCE_LENGTH)
        self.current_phase = 1
        self.phase_1_users = None
        self.episode_reward_breakdown = {
            'collection_reward': 0.0,
            'proximity_reward': 0.0,
            'completion_reward': 0.0,
            'cost': 0.0,
            'step_penalty': 0.0
        }

    def _calculate_rice_channel_gain(self, distance_2d, height=UAV_HEIGHT):
        distance_3d = np.sqrt(distance_2d ** 2 + height ** 2)
        if distance_3d < 1.0: distance_3d = 1.0
        path_loss = CHANNEL_GAIN_REF_LINEAR * (distance_3d ** (-PATH_LOSS_EXPONENT))
        K = RICE_FACTOR
        h_los = 1.0
        h_nlos_real = np.random.normal(0, 1)
        h_nlos_imag = np.random.normal(0, 1)
        h_nlos = (h_nlos_real + 1j * h_nlos_imag) / np.sqrt(2)
        h = np.sqrt(K / (K + 1)) * h_los + np.sqrt(1 / (K + 1)) * h_nlos
        fading_gain = abs(h) ** 2
        return path_loss * fading_gain

    def _calculate_offloading_delay(self, user_index, distance_2d):
        channel_gain = self._calculate_rice_channel_gain(distance_2d)
        snr = (USER_TRANSMIT_POWER * channel_gain) / NOISE_POWER
        data_rate = BANDWIDTH * np.log2(1 + snr)
        return self.task_sizes[user_index] / data_rate

    def _calculate_uav_to_bs_delay(self, task_size):
        bs_distance_2d = np.linalg.norm(self.uav_position - BASE_STATION_POSITION)
        height_diff = abs(UAV_HEIGHT - BASE_STATION_HEIGHT)
        channel_gain = self._calculate_rice_channel_gain(bs_distance_2d, height_diff)
        snr = (USER_TRANSMIT_POWER * channel_gain) / NOISE_POWER
        data_rate = BANDWIDTH * np.log2(1 + snr)
        return task_size / data_rate

    def _calculate_completion_delay(self, user_index, offloading_ratio):
        total_task_size = self.task_sizes[user_index]
        total_cpu_cycles = self.task_cpu_cycles[user_index]
        user_to_uav_delay = self._calculate_offloading_delay(user_index,
                                                             np.linalg.norm(
                                                                 self.uav_position - self.user_positions[user_index]))
        if offloading_ratio == 0:
            local_computation_delay = total_cpu_cycles / UAV_COMPUTE_CAPACITY
            total_delay = user_to_uav_delay + local_computation_delay
        elif offloading_ratio == 1:
            uav_to_bs_delay = self._calculate_uav_to_bs_delay(total_task_size)
            bs_computation_delay = total_cpu_cycles / BASE_STATION_COMPUTE_CAPACITY
            total_delay = user_to_uav_delay + uav_to_bs_delay + bs_computation_delay
        else:
            local_task_size = total_task_size * (1 - offloading_ratio)
            bs_task_size = total_task_size * offloading_ratio
            local_cpu_cycles = total_cpu_cycles * (1 - offloading_ratio)
            bs_cpu_cycles = total_cpu_cycles * offloading_ratio
            local_computation_delay = local_cpu_cycles / UAV_COMPUTE_CAPACITY
            local_total_delay = user_to_uav_delay + local_computation_delay
            uav_to_bs_delay = self._calculate_uav_to_bs_delay(bs_task_size)
            bs_computation_delay = bs_cpu_cycles / BASE_STATION_COMPUTE_CAPACITY
            bs_total_delay = user_to_uav_delay + uav_to_bs_delay + bs_computation_delay
            total_delay = max(local_total_delay, bs_total_delay)
        return total_delay

    def _calculate_flight_energy(self, distance_moved, actual_speed=None, time_delta=1.0):
        if actual_speed is None:
            speed = distance_moved / time_delta
        else:
            speed = actual_speed
        power = P_INDUCED_COEFF * (
                np.sqrt(1 + (speed ** 4) / (4 * P_INDUCED_COEFF ** 2)) - (speed ** 2) / (2 * P_INDUCED_COEFF)) \
                + P_PROFILE_COEFF * (1 + 3 * (speed ** 2)) \
                + 0.5 * P_PARASITE_COEFF * AIR_DENSITY * speed ** 3
        return power * time_delta

    def _calculate_computation_energy(self, user_index, offloading_ratio):
        local_cpu_cycles = self.task_cpu_cycles[user_index] * (1 - offloading_ratio)
        working_frequency = UAV_COMPUTE_CAPACITY
        energy = EFFECTIVE_SWITCHED_CAPACITANCE * (working_frequency ** 2) * local_cpu_cycles
        return energy

    def update_task_generating_users(self, phase):
        self.current_phase = phase
        self.task_generating_users = np.zeros(NUM_USERS, dtype=bool)
        if phase == 1:
            indices = np.random.choice(NUM_USERS, 10, replace=False)
            self.task_generating_users[indices] = True
            self.phase_1_users = self.task_generating_users.copy()
        elif phase == 2:
            self.task_generating_users = np.ones(NUM_USERS, dtype=bool)
        elif phase == 3:
            indices = np.random.choice(NUM_USERS, 8, replace=False)
            self.task_generating_users[indices] = True
        elif phase == 4:
            if self.phase_1_users is None:
                print("错误: 未找到第一阶段的用户数据! 将重新为第四阶段生成10个用户。")
                indices = np.random.choice(NUM_USERS, 10, replace=False)
                self.task_generating_users[indices] = True
            else:
                self.task_generating_users = self.phase_1_users.copy()
        print(f"Phase {phase}: {sum(self.task_generating_users)} users are generating tasks")
        print(f"Task generating users: {np.where(self.task_generating_users)[0]}")

    def reset(self):
        self.uav_position = np.array([0.0, 0.0], dtype=float)
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)
        self.step_count = 0
        self.user_completion_delays.fill(0)
        self.user_offloading_delays.fill(0)
        self.user_computation_delays.fill(0)
        self.user_computation_energies.fill(0)
        self.user_offloading_ratios.fill(0)
        self.total_flight_energy = 0
        self.current_speed = DEFAULT_UAV_SPEED
        self.trajectory = [self.uav_position.copy()]
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])
        self.observation_history.clear()
        for key in self.episode_reward_breakdown:
            self.episode_reward_breakdown[key] = 0.0
        initial_state = self._get_state()
        for _ in range(SEQUENCE_LENGTH):
            self.observation_history.append(initial_state)
        return self._get_gru_state()

    def step(self, action):
        direction_raw = action[:2]
        direction_norm = np.linalg.norm(direction_raw)
        if direction_norm > 1e-6:
            movement_direction = direction_raw / direction_norm
        else:
            movement_direction = np.array([0.0, 0.0])
        speed = (action[2] + 1) / 2 * (MAX_UAV_SPEED - MIN_UAV_SPEED) + MIN_UAV_SPEED
        offloading_ratio = (action[3] + 1) / 2
        movement = movement_direction * speed
        prev_position = self.uav_position.copy()
        self.uav_position += movement
        self.uav_position = np.clip(self.uav_position, 0, AREA_SIZE)
        self.trajectory.append(self.uav_position.copy())
        actual_movement = self.uav_position - prev_position
        distance_moved = np.linalg.norm(actual_movement)
        if distance_moved > 0:
            actual_speed = speed if direction_norm > 1e-6 else 0
            flight_energy_step = self._calculate_flight_energy(distance_moved, actual_speed=actual_speed)
        else:
            flight_energy_step = self._calculate_flight_energy(0, actual_speed=0)
        self.total_flight_energy += flight_energy_step
        self.current_speed = speed if direction_norm > 1e-6 else 0
        new_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])
        newly_collected = 0
        for i in range(NUM_USERS):
            if self.task_generating_users[i] and not self.collected_tasks[i]:
                if new_distances[i] <= MAX_DISTANCE_COLLECT:
                    self.collected_tasks[i] = True
                    newly_collected += 1
                    self.user_offloading_ratios[i] = offloading_ratio
                    self.user_offloading_delays[i] = self._calculate_offloading_delay(i, new_distances[i])
                    self.user_completion_delays[i] = self._calculate_completion_delay(i, offloading_ratio)
                    self.user_computation_delays[i] = self.user_completion_delays[i] - self.user_offloading_delays[i]
                    self.user_computation_energies[i] = self._calculate_computation_energy(i, offloading_ratio)
        self.step_count += 1
        completed_indices = np.where(self.collected_tasks & self.task_generating_users)[0]
        if len(completed_indices) > 0:
            total_delay = np.sum(self.user_completion_delays[completed_indices])
            total_comp_energy = np.sum(self.user_computation_energies[completed_indices])
            avg_total_delay = np.mean(self.user_completion_delays[completed_indices])
            avg_offloading_delay = np.mean(self.user_offloading_delays[completed_indices])
            avg_computation_delay = np.mean(self.user_computation_delays[completed_indices])
        else:
            total_delay = 0.0
            total_comp_energy = 0.0
            avg_total_delay, avg_offloading_delay, avg_computation_delay = 0.0, 0.0, 0.0
        total_energy = self.total_flight_energy + total_comp_energy
        reward_info = self.calculate_reward(newly_collected, total_energy, total_delay, new_distances,
                                                      self.last_distances)
        reward = reward_info['total_reward']
        for key in self.episode_reward_breakdown:
            self.episode_reward_breakdown[key] += reward_info[key]
        self.last_distances = new_distances
        total_tasks_to_collect = sum(self.task_generating_users)
        collected_required_tasks = sum(self.collected_tasks & self.task_generating_users)
        done = (self.step_count >= MAX_STEPS) or (collected_required_tasks == total_tasks_to_collect)
        self.observation_history.append(self._get_state())
        return self._get_gru_state(), reward, done, {
            "collected_required": collected_required_tasks,
            "total_required": total_tasks_to_collect,
            "energy": total_energy,
            "delay": avg_total_delay,
            "reward_breakdown": reward_info,
            "flight_energy": self.total_flight_energy,
            "comp_energy": total_comp_energy,
            "delay_breakdown": {
                "avg_offloading_delay": avg_offloading_delay,
                "avg_computation_delay": avg_computation_delay,
                "avg_total_delay": avg_total_delay,
                "total_delay": total_delay,
            },
            "episode_reward_breakdown": self.episode_reward_breakdown
        }

    def _get_state(self):
        state = np.zeros(2 + NUM_USERS * 4 + 1 + 1)
        state[0:2] = self.uav_position / AREA_SIZE
        for i in range(NUM_USERS):
            dist = np.linalg.norm(self.uav_position - self.user_positions[i])
            idx = 2 + i * 4
            state[idx] = dist / np.sqrt(2 * AREA_SIZE ** 2)
            state[idx + 1] = float(self.collected_tasks[i])
            state[idx + 2] = float(self.task_generating_users[i])
            state[idx + 3] = self.task_cpu_cycles[i] / TASK_CPU_CYCLES[1]
        state[-2] = self.step_count / MAX_STEPS
        bs_distance = np.linalg.norm(self.uav_position - BASE_STATION_POSITION)
        state[-1] = bs_distance / np.sqrt(2 * AREA_SIZE ** 2)
        return state

    def _get_gru_state(self):
        while len(self.observation_history) < SEQUENCE_LENGTH:
            self.observation_history.append(self._get_state())
        return np.array(list(self.observation_history))

    def calculate_reward(self, newly_collected, total_energy, total_delay, new_distances, old_distances):
        collected_required = sum(self.collected_tasks & self.task_generating_users)
        total_required = sum(self.task_generating_users) if sum(self.task_generating_users) > 0 else 1
        completion_rate = collected_required / total_required
        proximity_reward = 0.0
        uncollected_indices = np.where(self.task_generating_users & ~self.collected_tasks)[0]
        if len(uncollected_indices) > 0:
            uncollected_distances_old = old_distances[uncollected_indices]
            uncollected_distances_new = new_distances[uncollected_indices]
            closest_idx = np.argmin(uncollected_distances_new)
            dist_diff = uncollected_distances_old[closest_idx] - uncollected_distances_new[closest_idx]
            if uncollected_distances_new[closest_idx] < 50:
                proximity_reward = dist_diff * 0.24
            else:
                proximity_reward = dist_diff * 0.12
        time_factor = max(0, 1 - self.step_count / MAX_STEPS)
        collection_reward = newly_collected * 30.0 * (1 + time_factor * 0.5)
        progress = self.step_count / MAX_STEPS
        step_penalty = 0.15 + 0.3 * progress
        completion_reward = 0
        done = (self.step_count >= MAX_STEPS) or (collected_required == total_required)
        if done:
            step_efficiency = max(0.1, 1 - self.step_count / MAX_STEPS)
            base_completion = completion_rate * 400
            bonus = 1000 if completion_rate == 1.0 else 0
            completion_reward = (base_completion + bonus) * step_efficiency
        cost = 0
        if done and collected_required > 0:
            delay_penalty = total_delay * DELAY_WEIGHT * 120
            energy_penalty = total_energy * ENERGY_WEIGHT * 0.0015
            cost = (delay_penalty + energy_penalty) / collected_required
        total_reward = (collection_reward +
                        proximity_reward +
                        completion_reward -
                        step_penalty -
                        cost)
        return {
            'total_reward': total_reward * REWARD_SCALE,
            'proximity_reward': proximity_reward * REWARD_SCALE,
            'collection_reward': collection_reward * REWARD_SCALE,
            'completion_reward': completion_reward * REWARD_SCALE,
            'cost': -cost * REWARD_SCALE,
            'step_penalty': -step_penalty * REWARD_SCALE
        }

    def render(self, episode=0, clear_output=True):
        plt.figure(figsize=(10, 10))
        title = f"Episode {episode}, Step {self.step_count}\n"
        title += f"收集: {sum(self.collected_tasks & self.task_generating_users)}/{sum(self.task_generating_users)} 任务"
        if hasattr(self, 'current_speed'):
            title += f", 当前速度: {self.current_speed:.1f} m/s"
        plt.title(title)
        plt.scatter(BASE_STATION_POSITION[0], BASE_STATION_POSITION[1],
                    s=300, c='orange', marker='s', label='Base Station', edgecolors='black', linewidth=2)
        plt.annotate('BS\n(MEC)', BASE_STATION_POSITION,
                     textcoords="offset points", xytext=(0, -25),
                     ha='center', fontsize=10, fontweight='bold')
        for i, pos in enumerate(self.user_positions):
            if self.task_generating_users[i]:
                color = 'green' if self.collected_tasks[i] else 'red'
            else:
                color = 'gray'
            plt.scatter(pos[0], pos[1], s=100, c=color)
            if self.task_generating_users[i] and self.collected_tasks[i]:
                offload_ratio = self.user_offloading_ratios[i]
                task_info = f"{i + 1}\n{self.task_sizes[i] / 1e6:.1f}Mb\n{self.task_cpu_cycles[i] / 1e9:.1f}Gcy\nOffload:{offload_ratio:.2f}"
            else:
                task_info = f"{i + 1}\n{self.task_sizes[i] / 1e6:.1f}Mb\n{self.task_cpu_cycles[i] / 1e9:.1f}Gcy"
            plt.annotate(task_info, (pos[0], pos[1]), fontsize=8, ha='center', va='bottom')
        trajectory = np.array(self.trajectory)
        plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', alpha=0.5)
        plt.scatter(self.uav_position[0], self.uav_position[1], s=200, c='blue', marker='*')
        circle = plt.Circle((self.uav_position[0], self.uav_position[1]), MAX_DISTANCE_COLLECT, color='blue',
                            fill=False, alpha=0.3)
        plt.gca().add_patch(circle)
        plt.plot([self.uav_position[0], BASE_STATION_POSITION[0]],
                 [self.uav_position[1], BASE_STATION_POSITION[1]],
                 'orange', linestyle='--', alpha=0.5, linewidth=1)
        plt.xlim(0, AREA_SIZE)
        plt.ylim(0, AREA_SIZE)
        title = f"Episode {episode}, Step {self.step_count}\n"
        title += f"收集: {sum(self.collected_tasks & self.task_generating_users)}/{sum(self.task_generating_users)} 任务"
        plt.title(title)
        plt.grid(True)
        plt.legend()
        plt.savefig(f"results/step_{episode}_{self.step_count}.png")
        plt.close()

class GRUActor(nn.Module):
    def __init__(self, state_dim, action_dim, max_action):
        super(GRUActor, self).__init__()
        self.state_dim = state_dim
        self.seq_len = SEQUENCE_LENGTH
        self.hidden_size = HIDDEN_SIZE
        self.gru = nn.GRU(input_size=state_dim, hidden_size=self.hidden_size, num_layers=1, batch_first=True)
        self.layer1 = nn.Linear(self.hidden_size, 256)
        self.layer2 = nn.Linear(256, 128)
        self.layer3 = nn.Linear(128, action_dim)
        self.max_action = max_action
        self.ln1 = nn.LayerNorm(256)
        self.ln2 = nn.LayerNorm(128)
        self.hidden = None
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0.0)

    def forward(self, state, reset_hidden=False):
        batch_size = state.size(0)
        if reset_hidden or self.hidden is None or self.hidden.size(1) != batch_size:
            self.reset_hidden(batch_size)
        gru_out, self.hidden = self.gru(state, self.hidden)
        x = gru_out[:, -1]
        x = self.ln1(torch.relu(self.layer1(x)))
        x = self.ln2(torch.relu(self.layer2(x)))
        action = torch.tanh(self.layer3(x))
        return self.max_action * action

    def reset_hidden(self, batch_size=1):
        self.hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)

class GRUCritic(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(GRUCritic, self).__init__()
        self.state_dim = state_dim
        self.seq_len = SEQUENCE_LENGTH
        self.hidden_size = HIDDEN_SIZE

        self.q1_gru = nn.GRU(input_size=state_dim, hidden_size=self.hidden_size, num_layers=1, batch_first=True)
        self.q1_layer1 = nn.Linear(self.hidden_size + action_dim, 256)
        self.q1_layer2 = nn.Linear(256, 128)
        self.q1_output = nn.Linear(128, 1)
        self.q1_ln1 = nn.LayerNorm(256)
        self.q1_ln2 = nn.LayerNorm(128)

        self.q2_gru = nn.GRU(input_size=state_dim, hidden_size=self.hidden_size, num_layers=1, batch_first=True)
        self.q2_layer1 = nn.Linear(self.hidden_size + action_dim, 256)
        self.q2_layer2 = nn.Linear(256, 128)
        self.q2_output = nn.Linear(128, 1)
        self.q2_ln1 = nn.LayerNorm(256)
        self.q2_ln2 = nn.LayerNorm(128)

        self.q1_hidden = None
        self.q2_hidden = None
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0.01)

    def forward(self, state, action, reset_hidden=False):
        batch_size = state.size(0)
        if reset_hidden or self.q1_hidden is None or self.q1_hidden.size(1) != batch_size:
            self.reset_q1_hidden(batch_size)
        if reset_hidden or self.q2_hidden is None or self.q2_hidden.size(1) != batch_size:
            self.reset_q2_hidden(batch_size)

        q1_gru_out, self.q1_hidden = self.q1_gru(state, self.q1_hidden.to(state.device))
        q2_gru_out, self.q2_hidden = self.q2_gru(state, self.q2_hidden.to(state.device))

        q1_state = q1_gru_out[:, -1]
        q2_state = q2_gru_out[:, -1]

        q1_x = torch.cat([q1_state, action], dim=1)
        q2_x = torch.cat([q2_state, action], dim=1)

        q1 = self.q1_ln1(torch.relu(self.q1_layer1(q1_x)))
        q1 = self.q1_ln2(torch.relu(self.q1_layer2(q1)))
        q1_value = self.q1_output(q1)

        q2 = self.q2_ln1(torch.relu(self.q2_layer1(q2_x)))
        q2 = self.q2_ln2(torch.relu(self.q2_layer2(q2)))
        q2_value = self.q2_output(q2)

        return q1_value, q2_value

    def Q1(self, state, action, reset_hidden=False):
        batch_size = state.size(0)
        if reset_hidden or self.q1_hidden is None or self.q1_hidden.size(1) != batch_size:
            self.reset_q1_hidden(batch_size)

        q1_gru_out, self.q1_hidden = self.q1_gru(state, self.q1_hidden.to(state.device))
        q1_state = q1_gru_out[:, -1]
        q1_x = torch.cat([q1_state, action], dim=1)
        q1 = self.q1_ln1(torch.relu(self.q1_layer1(q1_x)))
        q1 = self.q1_ln2(torch.relu(self.q1_layer2(q1)))
        q1_value = self.q1_output(q1)
        return q1_value

    def reset_hidden(self, batch_size=1):
        self.reset_q1_hidden(batch_size)
        self.reset_q2_hidden(batch_size)

    def reset_q1_hidden(self, batch_size=1):
        self.q1_hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)

    def reset_q2_hidden(self, batch_size=1):
        self.q2_hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)

class ReplayBuffer:
    def __init__(self, max_size=BUFFER_SIZE):
        self.buffer = deque(maxlen=max_size)

    def add(self, state, action, reward, next_state, done):
        self.buffer.append((state, action, reward, next_state, done))

    def sample(self, batch_size):
        batch = random.sample(self.buffer, min(len(self.buffer), batch_size))
        state, action, reward, next_state, done = map(
            lambda x: np.stack(x).astype(np.float32), zip(*batch)
        )
        return state, action, reward, next_state, done

    def __len__(self):
        return len(self.buffer)

class EWC:
    def __init__(self, model, fisher_sample_size=FISHER_SAMPLE_SIZE):
        self.model = model
        self.fisher_sample_size = fisher_sample_size
        self.importance = {}
        self.old_params = {}
        self.fisher_diagonal = {}

    def _calculate_fisher_info(self, replay_buffer):
        fisher = {}
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                fisher[name] = torch.zeros_like(param).to(device)
        self.model.train()
        samples_count = min(self.fisher_sample_size, len(replay_buffer))
        if samples_count <= 0: return fisher
        for _ in range(samples_count):
            states, actions, _, _, _ = replay_buffer.sample(1)
            states = torch.FloatTensor(states).to(device)
            actions = torch.FloatTensor(actions).to(device)
            self.model.zero_grad()
            if isinstance(self.model, GRUActor):
                self.model.reset_hidden(1)
                if hasattr(self.model, 'sample'):
                    _, _, mean = self.model.sample(states)
                    loss = ((mean - actions) ** 2).mean()
                else:
                    outputs = self.model(states)
                    loss = ((outputs - actions) ** 2).mean()
            else:
                self.model.reset_hidden(1)
                outputs, _ = self.model(states, actions)
                loss = outputs.mean()
            loss.backward()
            for name, param in self.model.named_parameters():
                if param.requires_grad and param.grad is not None:
                    fisher[name] += param.grad.pow(2) / samples_count
        return fisher

    def store_task_parameters(self, task_id, replay_buffer):
        print(f"Storing parameters for task {task_id} and computing Fisher information matrix")
        self.old_params = {}
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                self.old_params[name] = param.data.clone()
        self.importance = self._calculate_fisher_info(replay_buffer)
        print(f"Stored {len(self.old_params)} parameters and computed Fisher matrices")

    def calculate_ewc_loss(self, lam=EWC_LAMBDA):
        loss = 0
        if not self.old_params or not self.importance: return loss
        for name, param in self.model.named_parameters():
            if name in self.old_params and name in self.importance and param.requires_grad:
                loss += torch.sum(self.importance[name] * (param - self.old_params[name]).pow(2))
        return lam * loss

class TD3:
    def __init__(self, state_dim, action_dim, max_action):
        self.actor = GRUActor(state_dim, action_dim, max_action).to(device)
        self.actor_target = GRUActor(state_dim, action_dim, max_action).to(device)
        self.actor_target.load_state_dict(self.actor.state_dict())
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=ACTOR_LR)

        self.critic = GRUCritic(state_dim, action_dim).to(device)
        self.critic_target = GRUCritic(state_dim, action_dim).to(device)
        self.critic_target.load_state_dict(self.critic.state_dict())
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=CRITIC_LR)

        self.max_action = max_action
        self.memory = ReplayBuffer()
        self.policy_noise = 0.2 * max_action
        self.noise_clip = 0.5 * max_action
        self.policy_freq = 2
        self.total_it = 0
        self.ewc_actor = EWC(self.actor)
        self.ewc_critic = EWC(self.critic)
        self.current_task = 1
        self.actor_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.actor_optimizer, mode='max', factor=0.5, patience=100, verbose=True)
        self.critic_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.critic_optimizer, mode='max', factor=0.5, patience=100, verbose=True)

    def select_action(self, state, noise_scale=EXPLORATION_NOISE_START):
        if len(state.shape) == 2:
            state = np.expand_dims(state, 0)
        state = torch.FloatTensor(state).to(device)
        action = self.actor(state).cpu().data.numpy().flatten()
        if noise_scale > 0:
            noise = np.random.normal(0, self.max_action * noise_scale, size=action.shape)
            action = action + noise
        return np.clip(action, -self.max_action, self.max_action)

    def switch_task(self, task_id):
        print(f"\nSwitching to task {task_id}")
        if self.current_task > 0 and len(self.memory) > 0:
            self.ewc_actor.store_task_parameters(self.current_task, self.memory)
            self.ewc_critic.store_task_parameters(self.current_task, self.memory)
        print(f"Clearing replay buffer for new task.")
        self.memory.buffer.clear()
        self.current_task = task_id
        self.actor.reset_hidden()
        self.critic.reset_hidden()
        print(f"Reset GRU states for new task {task_id}")

    def train(self):
        self.total_it += 1
        if len(self.memory) < BATCH_SIZE:
            return

        state, action, reward, next_state, done = self.memory.sample(BATCH_SIZE)
        state = torch.FloatTensor(state).to(device)
        action = torch.FloatTensor(action).to(device)
        reward = torch.FloatTensor(reward.reshape(-1, 1)).to(device)
        next_state = torch.FloatTensor(next_state).to(device)
        done = torch.FloatTensor(done.reshape(-1, 1)).to(device)

        self.critic.reset_hidden(BATCH_SIZE)
        self.actor.reset_hidden(BATCH_SIZE)

        with torch.no_grad():
            self.critic_target.reset_hidden(BATCH_SIZE)
            self.actor_target.reset_hidden(BATCH_SIZE)

            noise = torch.FloatTensor(action.shape).data.normal_(0, self.policy_noise).to(device)
            noise = noise.clamp(-self.noise_clip, self.noise_clip)
            next_action = (self.actor_target(next_state) + noise).clamp(-self.max_action, self.max_action)

            target_q1, target_q2 = self.critic_target(next_state, next_action)
            target_q = torch.min(target_q1, target_q2)
            target_q_value = reward + (1 - done) * GAMMA * target_q

        current_q1, current_q2 = self.critic(state, action)

        critic_loss = F.mse_loss(current_q1, target_q_value) + F.mse_loss(current_q2, target_q_value)

        critic_ewc_loss = 0
        if self.current_task > 1:
            critic_ewc_loss = self.ewc_critic.calculate_ewc_loss()
            critic_loss += critic_ewc_loss

        self.critic_optimizer.zero_grad()
        critic_loss.backward(retain_graph=True)
        torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 1.0)
        self.critic_optimizer.step()

        actor_loss = 0
        actor_ewc_loss = 0

        if self.total_it % self.policy_freq == 0:
            self.actor.reset_hidden(BATCH_SIZE)
            self.critic.reset_q1_hidden(BATCH_SIZE)

            actor_loss = -self.critic.Q1(state, self.actor(state)).mean()

            if self.current_task > 1:
                actor_ewc_loss = self.ewc_actor.calculate_ewc_loss()
                actor_loss += actor_ewc_loss

            self.actor_optimizer.zero_grad()
            actor_loss.backward()
            torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 1.0)
            self.actor_optimizer.step()

            for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)
            for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)

        return {
            "critic_loss": critic_loss.item(),
            "actor_loss": actor_loss if isinstance(actor_loss, (int, float)) else actor_loss.item(),
            "critic_ewc_loss": critic_ewc_loss.item() if isinstance(critic_ewc_loss, torch.Tensor) else critic_ewc_loss,
            "actor_ewc_loss": actor_ewc_loss.item() if isinstance(actor_ewc_loss, torch.Tensor) else actor_ewc_loss
        }

    def update_lr_schedulers(self, reward):
        self.actor_scheduler.step(reward)
        self.critic_scheduler.step(reward)

def train():
    os.makedirs("results", exist_ok=True)
    env = Environment()

    state_dim = 2 + NUM_USERS * 4 + 1 + 1
    action_dim = 4
    max_action = 1

    agent = TD3(state_dim, action_dim, max_action)
    total_episodes = 800
    episodes_per_task = 200
    eval_freq = 50

    rewards_history = []
    smoothed_rewards = []
    collection_history = []
    energy_history = []
    delay_history = []
    best_reward = -float('inf')
    best_collection = 0
    losses = {"critic": [], "actor": []}

    start_time = time.time()
    for phase in range(1, 5):
        env.update_task_generating_users(phase)
        agent.switch_task(phase)
        phase_noise = np.linspace(EXPLORATION_NOISE_START, EXPLORATION_NOISE_END, episodes_per_task)

        for episode in range(1, episodes_per_task + 1):
            global_episode = (phase - 1) * episodes_per_task + episode
            state = env.reset()
            agent.actor.reset_hidden()
            agent.critic.reset_hidden()
            episode_reward = 0
            last_collection = 0
            episode_losses = {"critic": [], "actor": []}
            current_noise = phase_noise[episode - 1]

            for step in range(1, MAX_STEPS + 1):
                action = agent.select_action(state, noise_scale=current_noise)
                next_state, reward, done, info = env.step(action)
                agent.memory.add(state, action, reward, next_state, done)
                loss_info = agent.train()
                if loss_info:
                    episode_losses["critic"].append(loss_info["critic_loss"])
                    episode_losses["actor"].append(loss_info["actor_loss"])

                    agent.last_ewc_losses = {
                        'actor_ewc': loss_info.get("actor_ewc_loss", 0.0),
                        'critic_ewc': loss_info.get("critic_ewc_loss", 0.0)
                    }

                state = next_state
                episode_reward += reward
                last_collection = info["collected_required"]
                if done:
                    if global_episode % eval_freq == 0:
                        print(f"--- Episode {global_episode} finished. Generating final trajectory plot. ---")
                        env.render(global_episode)
                    break

            rewards_history.append(episode_reward)
            collection_history.append(last_collection)
            energy_history.append(info["energy"])
            delay_history.append(info["delay"])

            if len(rewards_history) >= 10:
                smoothed_rewards.append(np.mean(rewards_history[-10:]))
            else:
                smoothed_rewards.append(episode_reward)
            if episode_losses["critic"]: losses["critic"].append(np.mean(episode_losses["critic"]))
            if episode_losses["actor"]: losses["actor"].append(np.mean(episode_losses["actor"]))
            agent.update_lr_schedulers(episode_reward)

            current_required = info["total_required"]
            collection_ratio = last_collection / current_required if current_required > 0 else 0
            if collection_ratio > best_collection or (
                    collection_ratio == best_collection and episode_reward > best_reward):
                best_reward = episode_reward
                best_collection = collection_ratio
                torch.save(agent.actor.state_dict(), f"results/best_actor_phase_{phase}.pth")

            elapsed_time = time.time() - start_time
            collected_required = info.get("collected_required", 0)
            total_required = info.get("total_required", 1)

            avg_offloading_ratio = 0.0
            completed_tasks_with_offload = []
            for i in range(NUM_USERS):
                if env.task_generating_users[i] and env.collected_tasks[i]:
                    completed_tasks_with_offload.append(env.user_offloading_ratios[i])

            if completed_tasks_with_offload:
                avg_offloading_ratio = np.mean(completed_tasks_with_offload)

            avg_actor_loss = np.mean(episode_losses["actor"]) if episode_losses["actor"] else 0.0
            avg_critic_loss = np.mean(episode_losses["critic"]) if episode_losses["critic"] else 0.0

            avg_actor_ewc_loss = 0.0
            avg_critic_ewc_loss = 0.0
            if episode_losses["actor"] and len(episode_losses) > 0:
                if hasattr(agent, 'last_ewc_losses'):
                    avg_actor_ewc_loss = agent.last_ewc_losses.get('actor_ewc', 0.0)
                    avg_critic_ewc_loss = agent.last_ewc_losses.get('critic_ewc', 0.0)

            reward_str = ""
            if 'reward_breakdown' in info:
                rb = info['reward_breakdown']
                reward_str = (f"Rwd(Col:{rb['collection_reward']:.1f} "
                              f"Pro:{rb['proximity_reward']:.1f} "
                              f"Comp:{rb['completion_reward']:.1f} "
                              f"Cost:{rb['cost']:.1f} "
                              f"Step:{rb['step_penalty']:.1f} ")

            energy_str = ""
            if 'flight_energy' in info and 'comp_energy' in info:
                energy_str = f"E(Flight:{info['flight_energy']:.1f} Comp:{info['comp_energy']:.1f})"

            delay_str = ""
            if 'delay_breakdown' in info:
                db = info['delay_breakdown']
                delay_str = f"D(Tot:{db['total_delay']:.2f}s AvgComp:{db['avg_computation_delay']:.3f}s AvgOff:{db['avg_offloading_delay']:.3f}s)"
            rb = info["episode_reward_breakdown"]

            reward_str = (f"Pro:{rb['proximity_reward']:.1f} "
                          f"Col:{rb['collection_reward']:.1f} "
                          f"Comp:{rb['completion_reward']:.1f} "
                          f"Cost:{rb['cost']:.1f} "
                          f"Step:{rb['step_penalty']:.1f}")

            print(
                f"P:{phase} Ep {episode:3d}/{episodes_per_task} "
                f"Tasks:{collected_required:2d}/{total_required:2d} "
                f"Steps:{env.step_count:3d} "
                f"Speed:{env.current_speed:.1f} m/s "
                f"Noise:{current_noise:.3f} "
                f"AvgOffload: {avg_offloading_ratio:.2f} "
                f"Loss(A/C/EWC_A/EWC_C) {avg_actor_loss:.3f}/{avg_critic_loss:.3f}/{avg_actor_ewc_loss:.3f}/{avg_critic_ewc_loss:.3f} | "
                f"Total Rwd: {episode_reward:.2f} "
                f"[{reward_str}] | "
                f"Total E: {info.get('energy', 0):.1f} "
                f"[{energy_str}] | "
                f"Avg D: {info.get('delay', 0):.3f}s "
                f"[{delay_str}] | "
                f"Time: {elapsed_time:.1f}s"
            )

            if global_episode % eval_freq == 0 or global_episode == total_episodes:
                plt.figure(figsize=(25, 5))

                plt.subplot(1, 5, 1)
                plt.plot(rewards_history, alpha=0.3, color='blue', label='Raw')
                plt.plot(smoothed_rewards, color='red', label='Smoothed')
                plt.axvline(x=episodes_per_task, color='green', linestyle='--', label='Phase 1->2')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--', label='Phase 2->3')
                plt.axvline(x=3 * episodes_per_task, color='orange', linestyle='--', label='Phase 3->4')
                plt.title("Reward")
                plt.xlabel("Episode")
                plt.ylabel("Reward")
                plt.legend()
                plt.grid(True)

                plt.subplot(1, 5, 2)
                plt.plot(collection_history)
                plt.axvline(x=episodes_per_task, color='green', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.title("Collected Tasks")
                plt.xlabel("Episode")
                plt.ylabel("Number of Tasks")
                plt.grid(True)

                plt.subplot(1, 5, 3)
                plt.plot(energy_history)
                plt.axvline(x=episodes_per_task, color='green', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.title("Total Energy")
                plt.xlabel("Episode")
                plt.ylabel("Energy")
                plt.grid(True)

                plt.subplot(1, 5, 4)
                plt.plot(delay_history)
                plt.axvline(x=episodes_per_task, color='green', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.title("Avg Delay")
                plt.xlabel("Episode")
                plt.ylabel("Delay (s)")
                plt.grid(True)

                plt.subplot(1, 5, 5)
                if losses["critic"]: plt.plot(losses["critic"], label='Critic Loss')
                if losses["actor"]: plt.plot(losses["actor"], label='Actor Loss')
                plt.axvline(x=episodes_per_task, color='green', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.title("Training Loss")
                plt.xlabel("Episode")
                plt.ylabel("Loss")
                plt.legend()
                plt.grid(True)

                plt.tight_layout()
                plt.savefig(f"results/training_curves_episode_{global_episode}.png")
                plt.close()

                torch.save({
                    'actor_state_dict': agent.actor.state_dict(),
                    'critic_state_dict': agent.critic.state_dict(),
                    'actor_optimizer': agent.actor_optimizer.state_dict(),
                    'critic_optimizer': agent.critic_optimizer.state_dict(),
                    'episode': global_episode,
                    'phase': phase,
                    'rewards_history': rewards_history,
                    'collection_history': collection_history,
                    'energy_history': energy_history,
                    'delay_history': delay_history,
                    'best_reward': best_reward,
                    'best_collection': best_collection
                }, f"results/checkpoint_episode_{global_episode}.pt")

        torch.save(agent.actor.state_dict(), f"results/actor_phase_{phase}.pth")
        torch.save(agent.critic.state_dict(), f"results/critic_phase_{phase}.pth")

    print(f"Training completed! Best result: {best_collection * 100:.1f}% tasks, Reward: {best_reward:.2f}")
    return agent, env

def test_and_visualize(agent, env, model_path="results/actor_phase_4.pth", phase=4):
    agent.actor.load_state_dict(torch.load(model_path))
    agent.actor.eval()
    env.update_task_generating_users(phase)
    state = env.reset()
    agent.actor.reset_hidden()
    total_reward = 0
    step_rewards = []
    trajectory = [env.uav_position.copy()]
    collection_times = np.zeros(NUM_USERS)
    collection_order = []

    for step in range(1, MAX_STEPS + 1):
        action = agent.select_action(state, noise_scale=0)
        trajectory.append(env.uav_position.copy())
        collected_before = env.collected_tasks.copy()
        next_state, reward, done, info = env.step(action)
        for i in range(NUM_USERS):
            if env.task_generating_users[i] and env.collected_tasks[i] and not collected_before[i]:
                collection_times[i] = step
                collection_order.append(i)
        total_reward += reward
        step_rewards.append(reward)
        state = next_state
        if step % 5 == 0 or done:
            env.render(step)
        if done:
            break

    trajectory = np.array(trajectory)
    plt.figure(figsize=(12, 10))

    plt.scatter(BASE_STATION_POSITION[0], BASE_STATION_POSITION[1],
                s=300, c='orange', marker='s', label='Base Station', edgecolors='black', linewidth=2)
    plt.annotate('BS\n(MEC)', BASE_STATION_POSITION,
                 textcoords="offset points", xytext=(0, -25),
                 ha='center', fontsize=10, fontweight='bold')

    for i, (x, y) in enumerate(env.user_positions):
        if env.task_generating_users[i]:
            if env.collected_tasks[i]:
                color = 'green'
                plt.scatter(x, y, s=150, c=color, marker='o')
                offload_ratio = env.user_offloading_ratios[i]
                plt.annotate(f"用户 {i + 1}\n(步数 {int(collection_times[i])})\nOffload: {offload_ratio:.2f}",
                             (x, y), textcoords="offset points", xytext=(0, 10),
                             ha='center', fontsize=10)
            else:
                color = 'red'
                plt.scatter(x, y, s=150, c=color, marker='o')
                plt.annotate(f"用户 {i + 1}\n(未收集)", (x, y), textcoords="offset points",
                             xytext=(0, 10), ha='center', fontsize=10)
        else:
            color = 'gray'
            plt.scatter(x, y, s=100, c=color, marker='o')
            plt.annotate(f"用户 {i + 1}\n(不产生任务)", (x, y), textcoords="offset points",
                         xytext=(0, 10), ha='center', fontsize=10)

    plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', label='UAV轨迹', alpha=0.7)
    plt.scatter(trajectory[0, 0], trajectory[0, 1], s=200, c='blue', marker='^', label='起点')
    plt.scatter(trajectory[-1, 0], trajectory[-1, 1], s=200, c='purple', marker='*', label='终点')

    for i in range(0, len(trajectory), 10):
        plt.annotate(f"{i}", (trajectory[i, 0], trajectory[i, 1]), fontsize=8, ha='center', va='center',
                     bbox=dict(boxstyle="circle,pad=0.2", fc="white", alpha=0.7))

    for i in range(NUM_USERS):
        if env.task_generating_users[i] and env.collected_tasks[i]:
            step = int(collection_times[i])
            if step < len(trajectory):
                uav_pos = trajectory[step]
                plt.plot([uav_pos[0], env.user_positions[i, 0]],
                         [uav_pos[1], env.user_positions[i, 1]], 'g--', alpha=0.5)

    plt.plot([trajectory[-1, 0], BASE_STATION_POSITION[0]],
             [trajectory[-1, 1], BASE_STATION_POSITION[1]],
             'orange', linestyle='--', alpha=0.5, linewidth=2, label='UAV-BS Link')

    plt.title(
        f"UAV任务收集轨迹 (阶段{phase}: 收集 {sum(env.collected_tasks & env.task_generating_users)}/{sum(env.task_generating_users)} 任务, 步数: {env.step_count})")
    plt.xlabel("X坐标 (m)")
    plt.ylabel("Y坐标 (m)")
    plt.grid(True)
    plt.legend()
    plt.xlim(0, AREA_SIZE)
    plt.ylim(0, AREA_SIZE)
    plt.savefig(f"results/final_uav_trajectory_phase_{phase}.png")
    plt.close()

    plt.figure(figsize=(15, 5))
    plt.subplot(1, 2, 1)
    plt.plot(step_rewards)
    plt.title("步奖励")
    plt.xlabel("步数")
    plt.ylabel("奖励")
    plt.grid(True)
    plt.subplot(1, 2, 2)
    plt.plot(np.cumsum(step_rewards))
    plt.title("累计奖励")
    plt.xlabel("步数")
    plt.ylabel("累计奖励")
    plt.grid(True)
    plt.tight_layout()
    plt.savefig(f"results/test_rewards_phase_{phase}.png")
    plt.close()

    print(f"\n测试结果 (阶段 {phase}):")
    collected_count = sum(env.collected_tasks & env.task_generating_users)
    total_count = sum(env.task_generating_users)
    percentage = collected_count / total_count * 100 if total_count > 0 else 0
    print(f"收集任务: {collected_count}/{total_count} ({percentage:.1f}%)")
    print(f"总奖励: {total_reward:.2f}")
    print(f"总能耗: {info['energy']:.2f}")
    print(f"总延迟: {info['delay']:.2f}")
    print(f"总步数: {env.step_count}")

    print("\n卸载决策统计:")
    collected_indices = [i for i in range(NUM_USERS)
                         if env.task_generating_users[i] and env.collected_tasks[i]]
    if collected_indices:
        avg_offload_ratio = np.mean([env.user_offloading_ratios[i] for i in collected_indices])
        print(f"平均卸载比例: {avg_offload_ratio:.3f}")
        local_count = sum(1 for i in collected_indices if env.user_offloading_ratios[i] < 0.1)
        mixed_count = sum(1 for i in collected_indices if 0.1 <= env.user_offloading_ratios[i] < 0.9)
        remote_count = sum(1 for i in collected_indices if env.user_offloading_ratios[i] >= 0.9)
        print(f"本地处理: {local_count}, 混合处理: {mixed_count}, 远程处理: {remote_count}")

    print("\n任务收集详情:")
    collection_indices = [(i, int(collection_times[i])) for i in range(NUM_USERS)
                          if env.task_generating_users[i] and env.collected_tasks[i]]
    collection_indices.sort(key=lambda x: x[1])
    for i, step in collection_indices:
        offload_ratio = env.user_offloading_ratios[i]
        print(f"用户 {i + 1}: 在步数 {step} 收集, 卸载比例: {offload_ratio:.3f}")
    for i in range(NUM_USERS):
        if env.task_generating_users[i] and not env.collected_tasks[i]:
            print(f"用户 {i + 1}: 未收集")

if __name__ == "__main__":
    agent, env = train()
    print("\n" + "=" * 60)
    print("训练完成！开始测试各阶段模型性能...")
    print("=" * 60)
    for phase in range(1, 5):
        print(f"\n{'=' * 20} 测试阶段 {phase} {'=' * 20}")
        test_and_visualize(agent, env, model_path=f"results/actor_phase_{phase}.pth", phase=phase)
