import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import random
from collections import deque
import os
import time
import copy

SEED = 45
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

EPISODES_PER_TASK = [200, 200, 200, 200]  # 四阶段每阶段的总episode数
TASK_USERS = [10, 12, 8, 10]  # 四阶段每阶段的用户数量

AREA_SIZE = 200
NUM_USERS = 12  # 最大用户数
MAX_STEPS = 300
MAX_DISTANCE_COLLECT = 15

UAV_HEIGHT = 30.0
UAV_COMPUTE_CAPACITY = 1e10
MIN_UAV_SPEED = 2.0
MAX_UAV_SPEED = 20.0
DEFAULT_UAV_SPEED = 10.0

BASE_STATION_POSITION = np.array([0.0, 0.0])
BASE_STATION_COMPUTE_CAPACITY = 2e11
BASE_STATION_HEIGHT = 10.0

ACTOR_LR = 3e-4
CRITIC_LR = 3e-4
GAMMA = 0.99
TAU = 0.005
BUFFER_SIZE = 200000
BATCH_SIZE = 128
EXPLORATION_NOISE_START = 0.4
EXPLORATION_NOISE_END = 0.05
REWARD_SCALE = 0.15

NUM_QUANTILES = 51
QUANTILE_TAU = torch.FloatTensor([(2 * i - 1) / (2.0 * NUM_QUANTILES) for i in range(1, NUM_QUANTILES + 1)]).to(device)
KAPPA = 1.0

META_LR = 1e-4
META_ADAPTATION_STEPS = 5
FINE_TUNE_EPISODES = 20

SEQUENCE_LENGTH = 10
HIDDEN_SIZE = 128

BANDWIDTH = 1e6
USER_TRANSMIT_POWER = 0.1
CHANNEL_GAIN_REF_DB = 30.0
CHANNEL_GAIN_REF_LINEAR = 10 ** (CHANNEL_GAIN_REF_DB / 10)
PATH_LOSS_EXPONENT = 2.5
BOLTZMANN_CONSTANT = 1.38e-23
TEMPERATURE_KELVIN = 290
NOISE_POWER = BOLTZMANN_CONSTANT * TEMPERATURE_KELVIN * BANDWIDTH
RICE_FACTOR = 5

TASK_SIZE_BITS = [1e6, 2e6]
TASK_CPU_CYCLES = [5e8, 15e8]

UAV_WEIGHT_KG = 2.0
GRAVITY = 9.81
AIR_DENSITY = 1.225
ROTOR_RADIUS = 0.4
NUM_ROTORS = 4
P_INDUCED_COEFF = UAV_WEIGHT_KG * GRAVITY * np.sqrt(
    UAV_WEIGHT_KG * GRAVITY / (2 * AIR_DENSITY * np.pi * ROTOR_RADIUS ** 2))
P_PROFILE_COEFF = 0.012
P_PARASITE_COEFF = 0.6
EFFECTIVE_SWITCHED_CAPACITANCE = 1e-27

DELAY_WEIGHT = 0.7
ENERGY_WEIGHT = 0.3
DELAY_SCALE = 100.0
ENERGY_SCALE = 0.001


class Environment:
    def __init__(self):
        self.user_positions = np.random.uniform(0, AREA_SIZE, size=(NUM_USERS, 2))
        self.task_cpu_cycles = np.random.uniform(TASK_CPU_CYCLES[0], TASK_CPU_CYCLES[1], size=NUM_USERS)
        self.task_sizes = np.random.uniform(TASK_SIZE_BITS[0], TASK_SIZE_BITS[1], size=NUM_USERS)
        self.task_generating_users = np.ones(NUM_USERS, dtype=bool)
        self.uav_position = np.array([0.0, 0.0], dtype=float)
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)
        self.step_count = 0
        self.user_completion_delays = np.zeros(NUM_USERS)
        self.user_offloading_delays = np.zeros(NUM_USERS)
        self.user_computation_delays = np.zeros(NUM_USERS)
        self.user_computation_energies = np.zeros(NUM_USERS)
        self.total_flight_energy = 0
        self.user_offloading_ratios = np.zeros(NUM_USERS)
        self.trajectory = [self.uav_position.copy()]
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])
        self.observation_history = deque(maxlen=SEQUENCE_LENGTH)
        self.current_phase = 1
        self.episode_reward_breakdown = {
            'collection_reward': 0.0,
            'proximity_reward': 0.0,
            'completion_reward': 0.0,
            'cost': 0.0,
            'step_penalty': 0.0
        }

    def _calculate_rice_channel_gain(self, distance_2d, height=UAV_HEIGHT):
        distance_3d = np.sqrt(distance_2d ** 2 + height ** 2)
        if distance_3d < 1.0: distance_3d = 1.0
        path_loss = CHANNEL_GAIN_REF_LINEAR * (distance_3d ** (-PATH_LOSS_EXPONENT))
        K = RICE_FACTOR
        h_los = 1.0
        h_nlos_real = np.random.normal(0, 1)
        h_nlos_imag = np.random.normal(0, 1)
        h_nlos = (h_nlos_real + 1j * h_nlos_imag) / np.sqrt(2)
        h = np.sqrt(K / (K + 1)) * h_los + np.sqrt(1 / (K + 1)) * h_nlos
        fading_gain = abs(h) ** 2
        return path_loss * fading_gain

    def _calculate_offloading_delay(self, user_index, distance_2d):
        channel_gain = self._calculate_rice_channel_gain(distance_2d)
        snr = (USER_TRANSMIT_POWER * channel_gain) / NOISE_POWER
        data_rate = BANDWIDTH * np.log2(1 + snr)
        return self.task_sizes[user_index] / data_rate

    def _calculate_uav_to_bs_delay(self, task_size):
        bs_distance_2d = np.linalg.norm(self.uav_position - BASE_STATION_POSITION)
        height_diff = abs(UAV_HEIGHT - BASE_STATION_HEIGHT)
        channel_gain = self._calculate_rice_channel_gain(bs_distance_2d, height_diff)
        snr = (USER_TRANSMIT_POWER * channel_gain) / NOISE_POWER
        data_rate = BANDWIDTH * np.log2(1 + snr)
        return task_size / data_rate

    def _calculate_completion_delay(self, user_index, offloading_ratio):
        total_task_size = self.task_sizes[user_index]
        total_cpu_cycles = self.task_cpu_cycles[user_index]
        user_to_uav_delay = self._calculate_offloading_delay(user_index,
                                                             np.linalg.norm(
                                                                 self.uav_position - self.user_positions[user_index]))
        if offloading_ratio == 0:
            local_computation_delay = total_cpu_cycles / UAV_COMPUTE_CAPACITY
            total_delay = user_to_uav_delay + local_computation_delay
        elif offloading_ratio == 1:
            uav_to_bs_delay = self._calculate_uav_to_bs_delay(total_task_size)
            bs_computation_delay = total_cpu_cycles / BASE_STATION_COMPUTE_CAPACITY
            total_delay = user_to_uav_delay + uav_to_bs_delay + bs_computation_delay
        else:
            local_task_size = total_task_size * (1 - offloading_ratio)
            bs_task_size = total_task_size * offloading_ratio
            local_cpu_cycles = total_cpu_cycles * (1 - offloading_ratio)
            bs_cpu_cycles = total_cpu_cycles * offloading_ratio
            local_computation_delay = local_cpu_cycles / UAV_COMPUTE_CAPACITY
            local_total_delay = user_to_uav_delay + local_computation_delay
            uav_to_bs_delay = self._calculate_uav_to_bs_delay(bs_task_size)
            bs_computation_delay = bs_cpu_cycles / BASE_STATION_COMPUTE_CAPACITY
            bs_total_delay = user_to_uav_delay + uav_to_bs_delay + bs_computation_delay
            total_delay = max(local_total_delay, bs_total_delay)
        return total_delay

    def _calculate_flight_energy(self, distance_moved, actual_speed=None, time_delta=1.0):
        if actual_speed is None:
            speed = distance_moved / time_delta
        else:
            speed = actual_speed
        power = P_INDUCED_COEFF * (
                np.sqrt(1 + (speed ** 4) / (4 * P_INDUCED_COEFF ** 2)) - (speed ** 2) / (2 * P_INDUCED_COEFF)) \
                + P_PROFILE_COEFF * (1 + 3 * (speed ** 2)) \
                + 0.5 * P_PARASITE_COEFF * AIR_DENSITY * speed ** 3
        return power * time_delta

    def _calculate_computation_energy(self, user_index, offloading_ratio):
        local_cpu_cycles = self.task_cpu_cycles[user_index] * (1 - offloading_ratio)
        working_frequency = UAV_COMPUTE_CAPACITY
        energy = EFFECTIVE_SWITCHED_CAPACITANCE * (working_frequency ** 2) * local_cpu_cycles
        return energy

    def update_task_generating_users(self, phase):
        self.current_phase = phase
        task_count = TASK_USERS[phase - 1]

        if phase == 1 or phase == 4:
            indices = list(range(task_count))
        else:
            indices = np.random.choice(NUM_USERS, task_count, replace=False)

        self.task_generating_users = np.zeros(NUM_USERS, dtype=bool)
        self.task_generating_users[indices] = True

        print(f"Phase {phase}: {sum(self.task_generating_users)} users are generating tasks")
        print(f"Task generating users: {np.where(self.task_generating_users)[0]}")

    def reset(self):
        self.uav_position = np.array([0.0, 0.0], dtype=float)
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)
        self.step_count = 0
        self.user_completion_delays.fill(0)
        self.user_offloading_delays.fill(0)
        self.user_computation_delays.fill(0)
        self.user_computation_energies.fill(0)
        self.user_offloading_ratios.fill(0)
        self.total_flight_energy = 0
        self.current_speed = DEFAULT_UAV_SPEED
        self.trajectory = [self.uav_position.copy()]
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])
        self.observation_history.clear()
        for key in self.episode_reward_breakdown:
            self.episode_reward_breakdown[key] = 0.0
        initial_state = self._get_state()
        for _ in range(SEQUENCE_LENGTH):
            self.observation_history.append(initial_state)
        return self._get_gru_state()

    def step(self, action):
        direction_raw = action[:2]
        direction_norm = np.linalg.norm(direction_raw)
        if direction_norm > 1e-6:
            movement_direction = direction_raw / direction_norm
        else:
            movement_direction = np.array([0.0, 0.0])
        speed = (action[2] + 1) / 2 * (MAX_UAV_SPEED - MIN_UAV_SPEED) + MIN_UAV_SPEED
        offloading_ratio = (action[3] + 1) / 2
        movement = movement_direction * speed
        prev_position = self.uav_position.copy()
        self.uav_position += movement
        self.uav_position = np.clip(self.uav_position, 0, AREA_SIZE)
        self.trajectory.append(self.uav_position.copy())
        actual_movement = self.uav_position - prev_position
        distance_moved = np.linalg.norm(actual_movement)
        if distance_moved > 0:
            actual_speed = speed if direction_norm > 1e-6 else 0
            flight_energy_step = self._calculate_flight_energy(distance_moved, actual_speed=actual_speed)
        else:
            flight_energy_step = self._calculate_flight_energy(0, actual_speed=0)
        self.total_flight_energy += flight_energy_step
        self.current_speed = speed if direction_norm > 1e-6 else 0
        new_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])
        newly_collected = 0
        for i in range(NUM_USERS):
            if self.task_generating_users[i] and not self.collected_tasks[i]:
                if new_distances[i] <= MAX_DISTANCE_COLLECT:
                    self.collected_tasks[i] = True
                    newly_collected += 1
                    self.user_offloading_ratios[i] = offloading_ratio
                    self.user_offloading_delays[i] = self._calculate_offloading_delay(i, new_distances[i])
                    self.user_completion_delays[i] = self._calculate_completion_delay(i, offloading_ratio)
                    self.user_computation_delays[i] = self.user_completion_delays[i] - self.user_offloading_delays[i]
                    self.user_computation_energies[i] = self._calculate_computation_energy(i, offloading_ratio)
        self.step_count += 1
        completed_indices = np.where(self.collected_tasks & self.task_generating_users)[0]
        if len(completed_indices) > 0:
            total_delay = np.sum(self.user_completion_delays[completed_indices])
            total_comp_energy = np.sum(self.user_computation_energies[completed_indices])
            avg_total_delay = np.mean(self.user_completion_delays[completed_indices])
            avg_offloading_delay = np.mean(self.user_offloading_delays[completed_indices])
            avg_computation_delay = np.mean(self.user_computation_delays[completed_indices])
        else:
            total_delay = 0.0
            total_comp_energy = 0.0
            avg_total_delay, avg_offloading_delay, avg_computation_delay = 0.0, 0.0, 0.0
        total_energy = self.total_flight_energy + total_comp_energy
        reward_info = self.calculate_reward(newly_collected, total_energy, total_delay, new_distances,
                                            self.last_distances)
        reward = reward_info['total_reward']
        for key in self.episode_reward_breakdown:
            self.episode_reward_breakdown[key] += reward_info[key]
        self.last_distances = new_distances
        total_tasks_to_collect = sum(self.task_generating_users)
        collected_required_tasks = sum(self.collected_tasks & self.task_generating_users)
        done = (self.step_count >= MAX_STEPS) or (collected_required_tasks == total_tasks_to_collect)
        self.observation_history.append(self._get_state())
        return self._get_gru_state(), reward, done, {
            "collected_required": collected_required_tasks,
            "total_required": total_tasks_to_collect,
            "energy": total_energy,
            "delay": avg_total_delay,
            "reward_breakdown": reward_info,
            "flight_energy": self.total_flight_energy,
            "comp_energy": total_comp_energy,
            "delay_breakdown": {
                "avg_offloading_delay": avg_offloading_delay,
                "avg_computation_delay": avg_computation_delay,
                "avg_total_delay": avg_total_delay,
                "total_delay": total_delay,
            },
            "episode_reward_breakdown": self.episode_reward_breakdown
        }

    def _get_state(self):
        state = np.zeros(2 + NUM_USERS * 4 + 1 + 1)
        state[0:2] = self.uav_position / AREA_SIZE
        for i in range(NUM_USERS):
            dist = np.linalg.norm(self.uav_position - self.user_positions[i])
            idx = 2 + i * 4
            state[idx] = dist / np.sqrt(2 * AREA_SIZE ** 2)
            state[idx + 1] = float(self.collected_tasks[i])
            state[idx + 2] = float(self.task_generating_users[i])
            state[idx + 3] = self.task_cpu_cycles[i] / TASK_CPU_CYCLES[1]
        state[-2] = self.step_count / MAX_STEPS
        bs_distance = np.linalg.norm(self.uav_position - BASE_STATION_POSITION)
        state[-1] = bs_distance / np.sqrt(2 * AREA_SIZE ** 2)
        return state

    def _get_gru_state(self):
        while len(self.observation_history) < SEQUENCE_LENGTH:
            self.observation_history.append(self._get_state())
        return np.array(list(self.observation_history))

    def calculate_reward(self, newly_collected, total_energy, total_delay, new_distances, old_distances):
        collected_required = sum(self.collected_tasks & self.task_generating_users)
        total_required = sum(self.task_generating_users) if sum(self.task_generating_users) > 0 else 1
        completion_rate = collected_required / total_required
        proximity_reward = 0.0
        uncollected_indices = np.where(self.task_generating_users & ~self.collected_tasks)[0]
        if len(uncollected_indices) > 0:
            uncollected_distances_old = old_distances[uncollected_indices]
            uncollected_distances_new = new_distances[uncollected_indices]
            closest_idx = np.argmin(uncollected_distances_new)
            dist_diff = uncollected_distances_old[closest_idx] - uncollected_distances_new[closest_idx]
            if uncollected_distances_new[closest_idx] < 40:
                proximity_reward = dist_diff * 0.06
            else:
                proximity_reward = dist_diff * 0.03
        time_factor = max(0, 1 - self.step_count / MAX_STEPS)
        collection_reward = newly_collected * 30.0 * (1 + time_factor * 0.5)
        progress = self.step_count / MAX_STEPS
        step_penalty = 0.25 + 0.5 * progress
        completion_reward = 0
        done = (self.step_count >= MAX_STEPS) or (collected_required == total_required)
        if done:
            step_efficiency = max(0.1, 1 - self.step_count / MAX_STEPS)
            base_completion = completion_rate * 400
            bonus = 1000 if completion_rate == 1.0 else 0
            completion_reward = (base_completion + bonus) * step_efficiency
        cost = 0
        if done and collected_required > 0:
            delay_penalty = total_delay * DELAY_WEIGHT * 1200
            energy_penalty = total_energy * ENERGY_WEIGHT * 0.015
            cost = (delay_penalty + energy_penalty) / collected_required
        total_reward = (collection_reward +
                        proximity_reward +
                        completion_reward -
                        step_penalty -
                        cost)
        return {
            'total_reward': total_reward,
            'proximity_reward': proximity_reward * REWARD_SCALE,
            'collection_reward': collection_reward * REWARD_SCALE,
            'completion_reward': completion_reward * REWARD_SCALE,
            'cost': -cost * REWARD_SCALE,
            'step_penalty': -step_penalty * REWARD_SCALE
        }

    def render(self, episode=0, clear_output=True):
        plt.figure(figsize=(10, 10))
        title = f"Episode {episode}, Step {self.step_count}\n"
        title += f"收集: {sum(self.collected_tasks & self.task_generating_users)}/{sum(self.task_generating_users)} 任务"
        if hasattr(self, 'current_speed'):
            title += f", 当前速度: {self.current_speed:.1f} m/s"
        plt.title(title)
        plt.scatter(BASE_STATION_POSITION[0], BASE_STATION_POSITION[1],
                    s=300, c='orange', marker='s', label='Base Station', edgecolors='black', linewidth=2)
        plt.annotate('BS\n(MEC)', BASE_STATION_POSITION,
                     textcoords="offset points", xytext=(0, -25),
                     ha='center', fontsize=10, fontweight='bold')
        for i, pos in enumerate(self.user_positions):
            if self.task_generating_users[i]:
                color = 'green' if self.collected_tasks[i] else 'red'
            else:
                color = 'gray'
            plt.scatter(pos[0], pos[1], s=100, c=color)
            if self.task_generating_users[i] and self.collected_tasks[i]:
                offload_ratio = self.user_offloading_ratios[i]
                task_info = f"{i + 1}\n{self.task_sizes[i] / 1e6:.1f}Mb\n{self.task_cpu_cycles[i] / 1e9:.1f}Gcy\nOffload:{offload_ratio:.2f}"
            else:
                task_info = f"{i + 1}\n{self.task_sizes[i] / 1e6:.1f}Mb\n{self.task_cpu_cycles[i] / 1e9:.1f}Gcy"
            plt.annotate(task_info, (pos[0], pos[1]), fontsize=8, ha='center', va='bottom')
        trajectory = np.array(self.trajectory)
        plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', alpha=0.5)
        plt.scatter(self.uav_position[0], self.uav_position[1], s=200, c='blue', marker='*')
        circle = plt.Circle((self.uav_position[0], self.uav_position[1]), MAX_DISTANCE_COLLECT, color='blue',
                            fill=False, alpha=0.3)
        plt.gca().add_patch(circle)
        plt.plot([self.uav_position[0], BASE_STATION_POSITION[0]],
                 [self.uav_position[1], BASE_STATION_POSITION[1]],
                 'orange', linestyle='--', alpha=0.5, linewidth=1)
        plt.xlim(0, AREA_SIZE)
        plt.ylim(0, AREA_SIZE)
        title = f"Episode {episode}, Step {self.step_count}\n"
        title += f"收集: {sum(self.collected_tasks & self.task_generating_users)}/{sum(self.task_generating_users)} 任务"
        plt.title(title)
        plt.grid(True)
        plt.legend()
        plt.savefig(f"results/step_{episode}_{self.step_count}.png")
        plt.close()


class GRUActor(nn.Module):
    def __init__(self, state_dim, action_dim, max_action):
        super(GRUActor, self).__init__()
        self.state_dim = state_dim
        self.seq_len = SEQUENCE_LENGTH
        self.hidden_size = HIDDEN_SIZE
        self.gru = nn.GRU(input_size=state_dim, hidden_size=self.hidden_size, num_layers=1, batch_first=True)
        self.layer1 = nn.Linear(self.hidden_size, 256)
        self.layer2 = nn.Linear(256, 128)
        self.layer3 = nn.Linear(128, action_dim)
        self.max_action = max_action
        self.ln1 = nn.LayerNorm(256)
        self.ln2 = nn.LayerNorm(128)
        self.hidden = None
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0.0)

    def forward(self, state, reset_hidden=False):
        batch_size = state.size(0)
        if reset_hidden or self.hidden is None or self.hidden.size(1) != batch_size:
            self.reset_hidden(batch_size)
        gru_out, self.hidden = self.gru(state, self.hidden)
        x = gru_out[:, -1]
        x = self.ln1(torch.relu(self.layer1(x)))
        x = self.ln2(torch.relu(self.layer2(x)))
        action = torch.tanh(self.layer3(x))
        return self.max_action * action

    def reset_hidden(self, batch_size=1):
        self.hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)


class SD3DistributionalCritic(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(SD3DistributionalCritic, self).__init__()
        self.state_dim = state_dim
        self.seq_len = SEQUENCE_LENGTH
        self.hidden_size = HIDDEN_SIZE
        self.num_quantiles = NUM_QUANTILES
        self.q1_gru = nn.GRU(input_size=state_dim, hidden_size=self.hidden_size, num_layers=1, batch_first=True)
        self.q1_layer1 = nn.Linear(self.hidden_size + action_dim, 256)
        self.q1_layer2 = nn.Linear(256, 128)
        self.q1_output = nn.Linear(128, self.num_quantiles)
        self.q1_ln1 = nn.LayerNorm(256)
        self.q1_ln2 = nn.LayerNorm(128)
        self.q2_gru = nn.GRU(input_size=state_dim, hidden_size=self.hidden_size, num_layers=1, batch_first=True)
        self.q2_layer1 = nn.Linear(self.hidden_size + action_dim, 256)
        self.q2_layer2 = nn.Linear(256, 128)
        self.q2_output = nn.Linear(128, self.num_quantiles)
        self.q2_ln1 = nn.LayerNorm(256)
        self.q2_ln2 = nn.LayerNorm(128)
        self.q1_hidden = None
        self.q2_hidden = None
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0.01)

    def forward(self, state, action, reset_hidden=False):
        batch_size = state.size(0)
        if reset_hidden or self.q1_hidden is None or self.q1_hidden.size(1) != batch_size:
            self.reset_q1_hidden(batch_size)
        if reset_hidden or self.q2_hidden is None or self.q2_hidden.size(1) != batch_size:
            self.reset_q2_hidden(batch_size)
        q1_gru_out, self.q1_hidden = self.q1_gru(state, self.q1_hidden.to(state.device))
        q2_gru_out, self.q2_hidden = self.q2_gru(state, self.q2_hidden.to(state.device))
        q1_state = q1_gru_out[:, -1]
        q2_state = q2_gru_out[:, -1]
        q1_x = torch.cat([q1_state, action], dim=1)
        q2_x = torch.cat([q2_state, action], dim=1)
        q1 = self.q1_ln1(torch.relu(self.q1_layer1(q1_x)))
        q1 = self.q1_ln2(torch.relu(self.q1_layer2(q1)))
        q1_quantiles = self.q1_output(q1)
        q2 = self.q2_ln1(torch.relu(self.q2_layer1(q2_x)))
        q2 = self.q2_ln2(torch.relu(self.q2_layer2(q2)))
        q2_quantiles = self.q2_output(q2)
        return q1_quantiles, q2_quantiles

    def Q1(self, state, action, reset_hidden=False):
        batch_size = state.size(0)
        if reset_hidden or self.q1_hidden is None or self.q1_hidden.size(1) != batch_size:
            self.reset_q1_hidden(batch_size)
        q1_gru_out, self.q1_hidden = self.q1_gru(state, self.q1_hidden.to(state.device))
        q1_state = q1_gru_out[:, -1]
        q1_x = torch.cat([q1_state, action], dim=1)
        q1 = self.q1_ln1(torch.relu(self.q1_layer1(q1_x)))
        q1 = self.q1_ln2(torch.relu(self.q1_layer2(q1)))
        q1_quantiles = self.q1_output(q1)
        median_idx = NUM_QUANTILES // 2
        return q1_quantiles[:, median_idx:median_idx + 1]

    def reset_hidden(self, batch_size=1):
        self.reset_q1_hidden(batch_size)
        self.reset_q2_hidden(batch_size)

    def reset_q1_hidden(self, batch_size=1):
        self.q1_hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)

    def reset_q2_hidden(self, batch_size=1):
        self.q2_hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)


def quantile_huber_loss(quantiles, targets, taus, kappa=KAPPA):
    residual = targets - quantiles
    abs_residual = torch.abs(residual)
    huber_loss = torch.where(abs_residual <= kappa,
                             0.5 * residual.pow(2),
                             kappa * (abs_residual - 0.5 * kappa))
    quantile_weight = torch.abs(taus.unsqueeze(0) - (residual < 0).float())
    loss = quantile_weight * huber_loss
    return loss.mean()


class ReplayBuffer:
    def __init__(self, max_size=BUFFER_SIZE):
        self.buffer = deque(maxlen=max_size)

    def add(self, state, action, reward, next_state, done):
        self.buffer.append((state, action, reward, next_state, done))

    def sample(self, batch_size):
        batch = random.sample(self.buffer, min(len(self.buffer), batch_size))
        state, action, reward, next_state, done = map(
            lambda x: np.stack(x).astype(np.float32), zip(*batch)
        )
        return state, action, reward, next_state, done

    def __len__(self):
        return len(self.buffer)


class MAML:
    def __init__(self, model, lr=META_LR, adaptation_steps=META_ADAPTATION_STEPS):
        self.model = model
        self.meta_lr = lr
        self.adaptation_steps = adaptation_steps
        self.meta_optimizer = optim.Adam(self.model.parameters(), lr=lr)

    def adapt(self, support_data, criterion):
        adapted_params = {}
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                adapted_params[name] = param

        for step in range(self.adaptation_steps):
            if len(support_data) < BATCH_SIZE:
                continue

            states, actions, rewards, next_states, dones = support_data.sample(min(BATCH_SIZE, len(support_data)))
            states = torch.FloatTensor(states).to(device)
            actions = torch.FloatTensor(actions).to(device)
            rewards = torch.FloatTensor(rewards.reshape(-1, 1)).to(device)

            if isinstance(self.model, GRUActor):
                self.model.reset_hidden(states.size(0))
                predicted_actions = self.model(states)
                loss = criterion(predicted_actions, actions)
            else:
                self.model.reset_hidden(states.size(0))
                q1_quantiles, q2_quantiles = self.model(states, actions)
                target_quantiles = rewards.expand(-1, NUM_QUANTILES)
                loss = quantile_huber_loss(q1_quantiles, target_quantiles, QUANTILE_TAU) + \
                       quantile_huber_loss(q2_quantiles, target_quantiles, QUANTILE_TAU)

            grads = torch.autograd.grad(loss, adapted_params.values(), create_graph=True, allow_unused=True)
            adapted_params = {name: param - self.meta_lr * grad if grad is not None else param
                              for (name, param), grad in zip(adapted_params.items(), grads)}

        return adapted_params

    def meta_update(self, tasks_data, criterion):
        meta_losses = []

        for task_data in tasks_data:
            support_data, query_data = task_data

            adapted_params = self.adapt(support_data, criterion)

            if len(query_data) < BATCH_SIZE:
                continue

            states, actions, rewards, next_states, dones = query_data.sample(min(BATCH_SIZE, len(query_data)))
            states = torch.FloatTensor(states).to(device)
            actions = torch.FloatTensor(actions).to(device)
            rewards = torch.FloatTensor(rewards.reshape(-1, 1)).to(device)

            if isinstance(self.model, GRUActor):
                self.model.reset_hidden(states.size(0))
                predicted_actions = self.model(states)
                loss = criterion(predicted_actions, actions)
            else:
                self.model.reset_hidden(states.size(0))
                q1_quantiles, q2_quantiles = self.model(states, actions)
                target_quantiles = rewards.expand(-1, NUM_QUANTILES)
                loss = quantile_huber_loss(q1_quantiles, target_quantiles, QUANTILE_TAU) + \
                       quantile_huber_loss(q2_quantiles, target_quantiles, QUANTILE_TAU)

            meta_losses.append(loss)

        if meta_losses:
            meta_loss = torch.stack(meta_losses).mean()
            self.meta_optimizer.zero_grad()
            meta_loss.backward()
            self.meta_optimizer.step()
            return meta_loss.item()
        return 0.0


class SD3:
    def __init__(self, state_dim, action_dim, max_action, use_meta_learning=True):
        self.actor = GRUActor(state_dim, action_dim, max_action).to(device)
        self.actor_target = GRUActor(state_dim, action_dim, max_action).to(device)
        self.actor_target.load_state_dict(self.actor.state_dict())
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=ACTOR_LR)

        self.critic = SD3DistributionalCritic(state_dim, action_dim).to(device)
        self.critic_target = SD3DistributionalCritic(state_dim, action_dim).to(device)
        self.critic_target.load_state_dict(self.critic.state_dict())
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=CRITIC_LR)

        self.max_action = max_action
        self.memory = ReplayBuffer()
        self.policy_noise = 0.2 * max_action
        self.noise_clip = 0.5 * max_action
        self.policy_freq = 2
        self.total_it = 0

        self.use_meta_learning = use_meta_learning
        if use_meta_learning:
            self.meta_actor = MAML(self.actor)
            self.meta_critic = MAML(self.critic)
            self.task_buffers = {}

        self.current_task = 1

        self.task_noise = {
            1: np.linspace(EXPLORATION_NOISE_START, EXPLORATION_NOISE_END, EPISODES_PER_TASK[0]),
            2: np.linspace(EXPLORATION_NOISE_START * 0.8, EXPLORATION_NOISE_END, EPISODES_PER_TASK[1]),
            3: np.linspace(EXPLORATION_NOISE_START * 0.7, EXPLORATION_NOISE_END, EPISODES_PER_TASK[2]),
            4: np.linspace(EXPLORATION_NOISE_START * 0.6, EXPLORATION_NOISE_END, EPISODES_PER_TASK[3])
        }

        self.actor_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.actor_optimizer, mode='max', factor=0.5, patience=100, verbose=True)
        self.critic_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.critic_optimizer, mode='max', factor=0.5, patience=100, verbose=True)

    def select_action(self, state, noise_scale=EXPLORATION_NOISE_START):
        if len(state.shape) == 2:
            state = np.expand_dims(state, 0)
        state = torch.FloatTensor(state).to(device)
        action = self.actor(state).cpu().data.numpy().flatten()
        if noise_scale > 0:
            noise = np.random.normal(0, self.max_action * noise_scale, size=action.shape)
            action = action + noise
        return np.clip(action, -self.max_action, self.max_action)

    def switch_task(self, task_id):
        print(f"\nSwitching to task {task_id}")

        if self.use_meta_learning and self.current_task > 0:
            if self.current_task not in self.task_buffers:
                self.task_buffers[self.current_task] = ReplayBuffer(max_size=10000)

            for experience in list(self.memory.buffer)[-5000:]:
                self.task_buffers[self.current_task].add(*experience)

            if len(self.task_buffers) > 1:
                print("Performing meta-learning update...")
                tasks_data = []
                for tid, buffer in self.task_buffers.items():
                    if len(buffer) > BATCH_SIZE * 2:
                        support_buffer = ReplayBuffer()
                        query_buffer = ReplayBuffer()

                        all_data = list(buffer.buffer)
                        split_idx = len(all_data) // 2

                        for exp in all_data[:split_idx]:
                            support_buffer.add(*exp)
                        for exp in all_data[split_idx:]:
                            query_buffer.add(*exp)

                        tasks_data.append((support_buffer, query_buffer))

                if tasks_data:
                    actor_criterion = nn.MSELoss()
                    self.meta_actor.meta_update(tasks_data, actor_criterion)

                    critic_criterion = nn.MSELoss()
                    self.meta_critic.meta_update(tasks_data, critic_criterion)

        if task_id == 4:
            print("Phase 4: Using same users as Phase 1 to test continual learning")

        print(f"Clearing replay buffer for new task.")
        self.memory.buffer.clear()
        self.current_task = task_id
        self.actor.reset_hidden()
        self.critic.reset_hidden()
        print(f"Reset GRU states for new task {task_id}")

    def fine_tune(self, env, episodes=FINE_TUNE_EPISODES):
        if not self.use_meta_learning:
            return

        print(f"Fine-tuning for {episodes} episodes...")
        fine_tune_buffer = ReplayBuffer(max_size=5000)

        for episode in range(episodes):
            state = env.reset()
            self.actor.reset_hidden()
            self.critic.reset_hidden()
            episode_reward = 0

            for step in range(MAX_STEPS):
                action = self.select_action(state, noise_scale=0.3)
                next_state, reward, done, info = env.step(action)
                fine_tune_buffer.add(state, action, reward, next_state, done)

                if len(fine_tune_buffer) > BATCH_SIZE:
                    self._fine_tune_step(fine_tune_buffer)

                state = next_state
                episode_reward += reward

                if done:
                    break

            print(
                f"Fine-tune episode {episode + 1}: Reward = {episode_reward:.2f}, Tasks = {info['collected_required']}/{info['total_required']}")

    def _fine_tune_step(self, buffer):
        states, actions, rewards, next_states, dones = buffer.sample(BATCH_SIZE)
        states = torch.FloatTensor(states).to(device)
        actions = torch.FloatTensor(actions).to(device)
        rewards = torch.FloatTensor(rewards.reshape(-1, 1)).to(device)
        next_states = torch.FloatTensor(next_states).to(device)
        dones = torch.FloatTensor(dones.reshape(-1, 1)).to(device)

        self.critic.reset_hidden(BATCH_SIZE)
        self.actor.reset_hidden(BATCH_SIZE)

        with torch.no_grad():
            self.critic_target.reset_hidden(BATCH_SIZE)
            self.actor_target.reset_hidden(BATCH_SIZE)

            noise = torch.FloatTensor(actions.shape).data.normal_(0, self.policy_noise).to(device)
            noise = noise.clamp(-self.noise_clip, self.noise_clip)
            next_action = (self.actor_target(next_states) + noise).clamp(-self.max_action, self.max_action)

            target_q1_quantiles, target_q2_quantiles = self.critic_target(next_states, next_action)
            target_q_quantiles = torch.min(target_q1_quantiles, target_q2_quantiles)
            reward_expanded = rewards.expand(-1, NUM_QUANTILES)
            done_expanded = dones.expand(-1, NUM_QUANTILES)
            target_quantiles = reward_expanded + (1 - done_expanded) * GAMMA * target_q_quantiles

        current_q1_quantiles, current_q2_quantiles = self.critic(states, actions)

        critic_loss_q1 = quantile_huber_loss(current_q1_quantiles, target_quantiles, QUANTILE_TAU)
        critic_loss_q2 = quantile_huber_loss(current_q2_quantiles, target_quantiles, QUANTILE_TAU)
        critic_loss = critic_loss_q1 + critic_loss_q2

        self.critic_optimizer.zero_grad()
        critic_loss.backward(retain_graph=True)
        torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 1.0)
        self.critic_optimizer.step()

        if self.total_it % self.policy_freq == 0:
            self.actor.reset_hidden(BATCH_SIZE)
            self.critic.reset_q1_hidden(BATCH_SIZE)
            actor_loss = -self.critic.Q1(states, self.actor(states)).mean()
            self.actor_optimizer.zero_grad()
            actor_loss.backward()
            torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 1.0)
            self.actor_optimizer.step()

    def train(self):
        self.total_it += 1
        if len(self.memory) < BATCH_SIZE:
            return

        state, action, reward, next_state, done = self.memory.sample(BATCH_SIZE)
        state = torch.FloatTensor(state).to(device)
        action = torch.FloatTensor(action).to(device)
        reward = torch.FloatTensor(reward.reshape(-1, 1)).to(device)
        next_state = torch.FloatTensor(next_state).to(device)
        done = torch.FloatTensor(done.reshape(-1, 1)).to(device)

        self.critic.reset_hidden(BATCH_SIZE)
        self.actor.reset_hidden(BATCH_SIZE)

        with torch.no_grad():
            self.critic_target.reset_hidden(BATCH_SIZE)
            self.actor_target.reset_hidden(BATCH_SIZE)

            noise = torch.FloatTensor(action.shape).data.normal_(0, self.policy_noise).to(device)
            noise = noise.clamp(-self.noise_clip, self.noise_clip)
            next_action = (self.actor_target(next_state) + noise).clamp(-self.max_action, self.max_action)

            target_q1_quantiles, target_q2_quantiles = self.critic_target(next_state, next_action)
            target_q_quantiles = torch.min(target_q1_quantiles, target_q2_quantiles)
            reward_expanded = reward.expand(-1, NUM_QUANTILES)
            done_expanded = done.expand(-1, NUM_QUANTILES)
            target_quantiles = reward_expanded + (1 - done_expanded) * GAMMA * target_q_quantiles

        current_q1_quantiles, current_q2_quantiles = self.critic(state, action)

        critic_loss_q1 = quantile_huber_loss(current_q1_quantiles, target_quantiles, QUANTILE_TAU)
        critic_loss_q2 = quantile_huber_loss(current_q2_quantiles, target_quantiles, QUANTILE_TAU)
        critic_loss = critic_loss_q1 + critic_loss_q2

        self.critic_optimizer.zero_grad()
        critic_loss.backward(retain_graph=True)
        torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 1.0)
        self.critic_optimizer.step()

        actor_loss = 0

        if self.total_it % self.policy_freq == 0:
            self.actor.reset_hidden(BATCH_SIZE)
            self.critic.reset_q1_hidden(BATCH_SIZE)
            actor_loss = -self.critic.Q1(state, self.actor(state)).mean()
            self.actor_optimizer.zero_grad()
            actor_loss.backward()
            torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 1.0)
            self.actor_optimizer.step()

            for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)
            for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)

        return {
            "critic_loss": critic_loss.item(),
            "actor_loss": actor_loss if isinstance(actor_loss, (int, float)) else actor_loss.item(),
        }

    def update_lr_schedulers(self, reward):
        self.actor_scheduler.step(reward)
        self.critic_scheduler.step(reward)


def train():
    os.makedirs("results", exist_ok=True)
    env = Environment()

    state_dim = 2 + NUM_USERS * 4 + 1 + 1
    action_dim = 4
    max_action = 1

    agent_meta = SD3(state_dim, action_dim, max_action, use_meta_learning=True)
    agent_baseline = SD3(state_dim, action_dim, max_action, use_meta_learning=False)

    total_episodes = sum(EPISODES_PER_TASK)
    eval_freq = 50

    rewards_history_meta = []
    rewards_history_baseline = []
    smoothed_rewards_meta = []
    smoothed_rewards_baseline = []
    collection_history_meta = []
    collection_history_baseline = []
    energy_history_meta = []
    energy_history_baseline = []
    delay_history_meta = []
    delay_history_baseline = []

    best_reward_meta = -float('inf')
    best_reward_baseline = -float('inf')
    best_collection_meta = 0
    best_collection_baseline = 0
    losses_meta = {"critic": [], "actor": []}
    losses_baseline = {"critic": [], "actor": []}

    start_time = time.time()

    for phase in range(1, 5):
        env.update_task_generating_users(phase)
        agent_meta.switch_task(phase)
        agent_baseline.switch_task(phase)

        if phase > 1:
            print("Starting fine-tuning for Meta-Learning agent...")
            agent_meta.fine_tune(env)

        phase_noise_base = EXPLORATION_NOISE_START * (0.9 ** (phase - 1))
        phase_noise = np.linspace(phase_noise_base, EXPLORATION_NOISE_END, EPISODES_PER_TASK[phase - 1])

        for episode in range(1, EPISODES_PER_TASK[phase - 1] + 1):
            global_episode = sum(EPISODES_PER_TASK[:phase - 1]) + episode
            current_noise = phase_noise[episode - 1]

            for agent_name, agent in [("Meta", agent_meta), ("Baseline", agent_baseline)]:
                state = env.reset()
                agent.actor.reset_hidden()
                agent.critic.reset_hidden()
                episode_reward = 0
                last_collection = 0
                episode_losses = {"critic": [], "actor": []}

                for step in range(1, MAX_STEPS + 1):
                    action = agent.select_action(state, noise_scale=current_noise)
                    next_state, reward, done, info = env.step(action)
                    agent.memory.add(state, action, reward, next_state, done)
                    loss_info = agent.train()
                    if loss_info:
                        episode_losses["critic"].append(loss_info["critic_loss"])
                        episode_losses["actor"].append(loss_info["actor_loss"])

                    state = next_state
                    episode_reward += reward
                    last_collection = info["collected_required"]
                    if done:
                        if global_episode % eval_freq == 0 and agent_name == "Meta":
                            print(f"--- Episode {global_episode} finished. Generating final trajectory plot. ---")
                            env.render(global_episode)
                        break

                if agent_name == "Meta":
                    rewards_history_meta.append(episode_reward)
                    collection_history_meta.append(last_collection)
                    energy_history_meta.append(info["energy"])
                    delay_history_meta.append(info["delay"])

                    if len(rewards_history_meta) >= 10:
                        smoothed_rewards_meta.append(np.mean(rewards_history_meta[-10:]))
                    else:
                        smoothed_rewards_meta.append(episode_reward)
                    if episode_losses["critic"]:
                        losses_meta["critic"].append(np.mean(episode_losses["critic"]))
                    if episode_losses["actor"]:
                        losses_meta["actor"].append(np.mean(episode_losses["actor"]))
                    agent.update_lr_schedulers(episode_reward)

                    current_required = info["total_required"]
                    collection_ratio = last_collection / current_required if current_required > 0 else 0
                    if collection_ratio > best_collection_meta or (
                            collection_ratio == best_collection_meta and episode_reward > best_reward_meta):
                        best_reward_meta = episode_reward
                        best_collection_meta = collection_ratio
                        torch.save(agent.actor.state_dict(), f"results/best_actor_meta_phase_{phase}.pth")
                else:
                    rewards_history_baseline.append(episode_reward)
                    collection_history_baseline.append(last_collection)
                    energy_history_baseline.append(info["energy"])
                    delay_history_baseline.append(info["delay"])

                    if len(rewards_history_baseline) >= 10:
                        smoothed_rewards_baseline.append(np.mean(rewards_history_baseline[-10:]))
                    else:
                        smoothed_rewards_baseline.append(episode_reward)
                    if episode_losses["critic"]:
                        losses_baseline["critic"].append(np.mean(episode_losses["critic"]))
                    if episode_losses["actor"]:
                        losses_baseline["actor"].append(np.mean(episode_losses["actor"]))
                    agent.update_lr_schedulers(episode_reward)

                    current_required = info["total_required"]
                    collection_ratio = last_collection / current_required if current_required > 0 else 0
                    if collection_ratio > best_collection_baseline or (
                            collection_ratio == best_collection_baseline and episode_reward > best_reward_baseline):
                        best_reward_baseline = episode_reward
                        best_collection_baseline = collection_ratio
                        torch.save(agent.actor.state_dict(), f"results/best_actor_baseline_phase_{phase}.pth")

            elapsed_time = time.time() - start_time
            collected_required = info.get("collected_required", 0)
            total_required = info.get("total_required", 1)

            avg_offloading_ratio = 0.0
            completed_tasks_with_offload = []
            for i in range(NUM_USERS):
                if env.task_generating_users[i] and env.collected_tasks[i]:
                    completed_tasks_with_offload.append(env.user_offloading_ratios[i])

            if completed_tasks_with_offload:
                avg_offloading_ratio = np.mean(completed_tasks_with_offload)

            avg_actor_loss_meta = np.mean(losses_meta["actor"][-1:]) if losses_meta["actor"] else 0.0
            avg_critic_loss_meta = np.mean(losses_meta["critic"][-1:]) if losses_meta["critic"] else 0.0
            avg_actor_loss_baseline = np.mean(losses_baseline["actor"][-1:]) if losses_baseline["actor"] else 0.0
            avg_critic_loss_baseline = np.mean(losses_baseline["critic"][-1:]) if losses_baseline["critic"] else 0.0

            reward_str = ""
            if 'episode_reward_breakdown' in info:
                rb = info["episode_reward_breakdown"]
                reward_str = (f"Pro:{rb['proximity_reward']:.1f} "
                              f"Col:{rb['collection_reward']:.1f} "
                              f"Comp:{rb['completion_reward']:.1f} "
                              f"Cost:{rb['cost']:.1f} "
                              f"Step:{rb['step_penalty']:.1f}")

            energy_str = ""
            if 'flight_energy' in info and 'comp_energy' in info:
                energy_str = f"E(Flight:{info['flight_energy']:.1f} Comp:{info['comp_energy']:.1f})"

            delay_str = ""
            if 'delay_breakdown' in info:
                db = info['delay_breakdown']
                delay_str = f"D(Tot:{db['total_delay']:.2f}s AvgComp:{db['avg_computation_delay']:.3f}s AvgOff:{db['avg_offloading_delay']:.3f}s)"

            print(
                f"P:{phase} Ep {episode:3d}/{EPISODES_PER_TASK[phase - 1]} "
                f"Tasks:{collected_required:2d}/{total_required:2d} "
                f"Steps:{env.step_count:3d} "
                f"Speed:{env.current_speed:.1f} m/s "
                f"Noise:{current_noise:.3f} "
                f"AvgOffload: {avg_offloading_ratio:.2f} "
                f"Loss(Meta A/C:{avg_actor_loss_meta:.3f}/{avg_critic_loss_meta:.3f}) "
                f"Loss(Base A/C:{avg_actor_loss_baseline:.3f}/{avg_critic_loss_baseline:.3f}) | "
                f"Rwd(Meta:{rewards_history_meta[-1]:.2f}/Base:{rewards_history_baseline[-1]:.2f}) "
                f"[{reward_str}] | "
                f"Total E: {info.get('energy', 0):.1f} "
                f"[{energy_str}] | "
                f"Avg D: {info.get('delay', 0):.3f}s "
                f"[{delay_str}] | "
                f"Time: {elapsed_time:.1f}s"
            )

            if global_episode % eval_freq == 0 or global_episode == total_episodes:
                plt.figure(figsize=(30, 6))

                plt.subplot(1, 6, 1)
                plt.plot(rewards_history_meta, alpha=0.3, color='blue', label='Meta Raw')
                plt.plot(smoothed_rewards_meta, color='red', label='Meta Smoothed')
                plt.plot(rewards_history_baseline, alpha=0.3, color='green', label='Baseline Raw')
                plt.plot(smoothed_rewards_baseline, color='orange', label='Baseline Smoothed')
                for i, ep_count in enumerate(EPISODES_PER_TASK[:-1]):
                    plt.axvline(x=sum(EPISODES_PER_TASK[:i + 1]), color='purple', linestyle='--',
                                label=f'Phase {i + 1}->{i + 2}' if i == 0 else "")
                plt.title("Reward Comparison")
                plt.xlabel("Episode")
                plt.ylabel("Reward")
                plt.legend()
                plt.grid(True)

                plt.subplot(1, 6, 2)
                plt.plot(collection_history_meta, label='Meta-Learning', color='blue')
                plt.plot(collection_history_baseline, label='Baseline', color='green')
                for i, ep_count in enumerate(EPISODES_PER_TASK[:-1]):
                    plt.axvline(x=sum(EPISODES_PER_TASK[:i + 1]), color='purple', linestyle='--')
                plt.title("Collected Tasks")
                plt.xlabel("Episode")
                plt.ylabel("Number of Tasks")
                plt.legend()
                plt.grid(True)

                plt.subplot(1, 6, 3)
                plt.plot(energy_history_meta, label='Meta-Learning', color='blue')
                plt.plot(energy_history_baseline, label='Baseline', color='green')
                for i, ep_count in enumerate(EPISODES_PER_TASK[:-1]):
                    plt.axvline(x=sum(EPISODES_PER_TASK[:i + 1]), color='purple', linestyle='--')
                plt.title("Total Energy")
                plt.xlabel("Episode")
                plt.ylabel("Energy")
                plt.legend()
                plt.grid(True)

                plt.subplot(1, 6, 4)
                plt.plot(delay_history_meta, label='Meta-Learning', color='blue')
                plt.plot(delay_history_baseline, label='Baseline', color='green')
                for i, ep_count in enumerate(EPISODES_PER_TASK[:-1]):
                    plt.axvline(x=sum(EPISODES_PER_TASK[:i + 1]), color='purple', linestyle='--')
                plt.title("Avg Delay")
                plt.xlabel("Episode")
                plt.ylabel("Delay (s)")
                plt.legend()
                plt.grid(True)

                plt.subplot(1, 6, 5)
                if losses_meta["critic"]: plt.plot(losses_meta["critic"], label='Meta Critic', color='blue')
                if losses_meta["actor"]: plt.plot(losses_meta["actor"], label='Meta Actor', color='red')
                if losses_baseline["critic"]: plt.plot(losses_baseline["critic"], label='Baseline Critic',
                                                       color='green')
                if losses_baseline["actor"]: plt.plot(losses_baseline["actor"], label='Baseline Actor', color='orange')
                for i, ep_count in enumerate(EPISODES_PER_TASK[:-1]):
                    plt.axvline(x=sum(EPISODES_PER_TASK[:i + 1]), color='purple', linestyle='--')
                plt.title("Training Loss")
                plt.xlabel("Episode")
                plt.ylabel("Loss")
                plt.legend()
                plt.grid(True)

                plt.subplot(1, 6, 6)
                phase_ranges = [0] + [sum(EPISODES_PER_TASK[:i + 1]) for i in range(4)]
                phase_rewards_meta = []
                phase_rewards_baseline = []

                for i in range(4):
                    start_idx = phase_ranges[i]
                    end_idx = phase_ranges[i + 1]
                    if end_idx <= len(rewards_history_meta):
                        phase_rewards_meta.append(np.mean(rewards_history_meta[start_idx:end_idx]))
                        phase_rewards_baseline.append(np.mean(rewards_history_baseline[start_idx:end_idx]))

                x_phases = list(range(1, len(phase_rewards_meta) + 1))
                plt.bar([x - 0.2 for x in x_phases], phase_rewards_meta, width=0.4,
                        label='Meta-Learning', color='blue', alpha=0.7)
                plt.bar([x + 0.2 for x in x_phases], phase_rewards_baseline, width=0.4,
                        label='Baseline', color='green', alpha=0.7)
                plt.title("Average Reward by Phase")
                plt.xlabel("Phase")
                plt.ylabel("Average Reward")
                plt.legend()
                plt.grid(True)

                plt.tight_layout()
                plt.savefig(f"results/training_comparison_episode_{global_episode}.png")
                plt.close()

                torch.save({
                    'meta_actor_state_dict': agent_meta.actor.state_dict(),
                    'meta_critic_state_dict': agent_meta.critic.state_dict(),
                    'baseline_actor_state_dict': agent_baseline.actor.state_dict(),
                    'baseline_critic_state_dict': agent_baseline.critic.state_dict(),
                    'episode': global_episode,
                    'phase': phase,
                    'rewards_history_meta': rewards_history_meta,
                    'rewards_history_baseline': rewards_history_baseline,
                    'collection_history_meta': collection_history_meta,
                    'collection_history_baseline': collection_history_baseline,
                    'energy_history_meta': energy_history_meta,
                    'energy_history_baseline': energy_history_baseline,
                    'delay_history_meta': delay_history_meta,
                    'delay_history_baseline': delay_history_baseline,
                    'best_reward_meta': best_reward_meta,
                    'best_reward_baseline': best_reward_baseline,
                    'best_collection_meta': best_collection_meta,
                    'best_collection_baseline': best_collection_baseline
                }, f"results/checkpoint_comparison_episode_{global_episode}.pt")

        torch.save(agent_meta.actor.state_dict(), f"results/actor_meta_phase_{phase}.pth")
        torch.save(agent_meta.critic.state_dict(), f"results/critic_meta_phase_{phase}.pth")
        torch.save(agent_baseline.actor.state_dict(), f"results/actor_baseline_phase_{phase}.pth")
        torch.save(agent_baseline.critic.state_dict(), f"results/critic_baseline_phase_{phase}.pth")

    print(f"Training completed!")
    print(f"Meta-Learning - Best result: {best_collection_meta * 100:.1f}% tasks, Reward: {best_reward_meta:.2f}")
    print(f"Baseline - Best result: {best_collection_baseline * 100:.1f}% tasks, Reward: {best_reward_baseline:.2f}")

    final_comparison = {
        'meta_rewards': rewards_history_meta,
        'baseline_rewards': rewards_history_baseline,
        'meta_collections': collection_history_meta,
        'baseline_collections': collection_history_baseline
    }

    return agent_meta, agent_baseline, env, final_comparison


def test_and_visualize(agent, env, model_path, phase, agent_type="Meta"):
    agent.actor.load_state_dict(torch.load(model_path))
    agent.actor.eval()
    env.update_task_generating_users(phase)
    state = env.reset()
    agent.actor.reset_hidden()
    total_reward = 0
    step_rewards = []
    trajectory = [env.uav_position.copy()]
    collection_times = np.zeros(NUM_USERS)
    collection_order = []

    for step in range(1, MAX_STEPS + 1):
        action = agent.select_action(state, noise_scale=0)
        trajectory.append(env.uav_position.copy())
        collected_before = env.collected_tasks.copy()
        next_state, reward, done, info = env.step(action)
        for i in range(NUM_USERS):
            if env.task_generating_users[i] and env.collected_tasks[i] and not collected_before[i]:
                collection_times[i] = step
                collection_order.append(i)
        total_reward += reward
        step_rewards.append(reward)
        state = next_state
        if step % 5 == 0 or done:
            env.render(step)
        if done:
            break

    trajectory = np.array(trajectory)
    plt.figure(figsize=(12, 10))

    plt.scatter(BASE_STATION_POSITION[0], BASE_STATION_POSITION[1],
                s=300, c='orange', marker='s', label='Base Station', edgecolors='black', linewidth=2)
    plt.annotate('BS\n(MEC)', BASE_STATION_POSITION,
                 textcoords="offset points", xytext=(0, -25),
                 ha='center', fontsize=10, fontweight='bold')

    for i, (x, y) in enumerate(env.user_positions):
        if env.task_generating_users[i]:
            if env.collected_tasks[i]:
                color = 'green'
                plt.scatter(x, y, s=150, c=color, marker='o')
                offload_ratio = env.user_offloading_ratios[i]
                plt.annotate(f"用户 {i + 1}\n(步数 {int(collection_times[i])})\nOffload: {offload_ratio:.2f}",
                             (x, y), textcoords="offset points", xytext=(0, 10),
                             ha='center', fontsize=10)
            else:
                color = 'red'
                plt.scatter(x, y, s=150, c=color, marker='o')
                plt.annotate(f"用户 {i + 1}\n(未收集)", (x, y), textcoords="offset points",
                             xytext=(0, 10), ha='center', fontsize=10)
        else:
            color = 'gray'
            plt.scatter(x, y, s=100, c=color, marker='o')
            plt.annotate(f"用户 {i + 1}\n(不产生任务)", (x, y), textcoords="offset points",
                         xytext=(0, 10), ha='center', fontsize=10)

    plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', label='UAV轨迹', alpha=0.7)
    plt.scatter(trajectory[0, 0], trajectory[0, 1], s=200, c='blue', marker='^', label='起点')
    plt.scatter(trajectory[-1, 0], trajectory[-1, 1], s=200, c='purple', marker='*', label='终点')

    for i in range(0, len(trajectory), 10):
        plt.annotate(f"{i}", (trajectory[i, 0], trajectory[i, 1]), fontsize=8, ha='center', va='center',
                     bbox=dict(boxstyle="circle,pad=0.2", fc="white", alpha=0.7))

    for i in range(NUM_USERS):
        if env.task_generating_users[i] and env.collected_tasks[i]:
            step = int(collection_times[i])
            if step < len(trajectory):
                uav_pos = trajectory[step]
                plt.plot([uav_pos[0], env.user_positions[i, 0]],
                         [uav_pos[1], env.user_positions[i, 1]], 'g--', alpha=0.5)

    plt.plot([trajectory[-1, 0], BASE_STATION_POSITION[0]],
             [trajectory[-1, 1], BASE_STATION_POSITION[1]],
             'orange', linestyle='--', alpha=0.5, linewidth=2, label='UAV-BS Link')

    plt.title(
        f"{agent_type} Agent - UAV任务收集轨迹 (阶段{phase}: 收集 {sum(env.collected_tasks & env.task_generating_users)}/{sum(env.task_generating_users)} 任务, 步数: {env.step_count})")
    plt.xlabel("X坐标 (m)")
    plt.ylabel("Y坐标 (m)")
    plt.grid(True)
    plt.legend()
    plt.xlim(0, AREA_SIZE)
    plt.ylim(0, AREA_SIZE)
    plt.savefig(f"results/final_uav_trajectory_{agent_type.lower()}_phase_{phase}.png")
    plt.close()

    plt.figure(figsize=(15, 5))
    plt.subplot(1, 2, 1)
    plt.plot(step_rewards)
    plt.title(f"{agent_type} Agent - 步奖励")
    plt.xlabel("步数")
    plt.ylabel("奖励")
    plt.grid(True)
    plt.subplot(1, 2, 2)
    plt.plot(np.cumsum(step_rewards))
    plt.title(f"{agent_type} Agent - 累计奖励")
    plt.xlabel("步数")
    plt.ylabel("累计奖励")
    plt.grid(True)
    plt.tight_layout()
    plt.savefig(f"results/test_rewards_{agent_type.lower()}_phase_{phase}.png")
    plt.close()

    print(f"\n{agent_type} Agent 测试结果 (阶段 {phase}):")
    collected_count = sum(env.collected_tasks & env.task_generating_users)
    total_count = sum(env.task_generating_users)
    percentage = collected_count / total_count * 100 if total_count > 0 else 0
    print(f"收集任务: {collected_count}/{total_count} ({percentage:.1f}%)")
    print(f"总奖励: {total_reward:.2f}")
    print(f"总能耗: {info['energy']:.2f}")
    print(f"总延迟: {info['delay']:.2f}")
    print(f"总步数: {env.step_count}")

    print("\n卸载决策统计:")
    collected_indices = [i for i in range(NUM_USERS)
                         if env.task_generating_users[i] and env.collected_tasks[i]]
    if collected_indices:
        avg_offload_ratio = np.mean([env.user_offloading_ratios[i] for i in collected_indices])
        print(f"平均卸载比例: {avg_offload_ratio:.3f}")
        local_count = sum(1 for i in collected_indices if env.user_offloading_ratios[i] < 0.1)
        mixed_count = sum(1 for i in collected_indices if 0.1 <= env.user_offloading_ratios[i] < 0.9)
        remote_count = sum(1 for i in collected_indices if env.user_offloading_ratios[i] >= 0.9)
        print(f"本地处理: {local_count}, 混合处理: {mixed_count}, 远程处理: {remote_count}")

    print("\n任务收集详情:")
    collection_indices = [(i, int(collection_times[i])) for i in range(NUM_USERS)
                          if env.task_generating_users[i] and env.collected_tasks[i]]
    collection_indices.sort(key=lambda x: x[1])
    for i, step in collection_indices:
        offload_ratio = env.user_offloading_ratios[i]
        print(f"用户 {i + 1}: 在步数 {step} 收集, 卸载比例: {offload_ratio:.3f}")
    for i in range(NUM_USERS):
        if env.task_generating_users[i] and not env.collected_tasks[i]:
            print(f"用户 {i + 1}: 未收集")

    return {
        'collected_tasks': collected_count,
        'total_tasks': total_count,
        'completion_rate': percentage,
        'total_reward': total_reward,
        'total_energy': info['energy'],
        'total_delay': info['delay'],
        'total_steps': env.step_count,
        'avg_offload_ratio': avg_offload_ratio if collected_indices else 0
    }


def compare_phase_performance(agent_meta, agent_baseline, env, comparison_data):
    print("\n" + "=" * 80)
    print("阶段性能对比分析")
    print("=" * 80)

    phase_ranges = [0] + [sum(EPISODES_PER_TASK[:i + 1]) for i in range(4)]

    for phase in range(1, 5):
        print(f"\n--- 阶段 {phase} 性能分析 ---")
        start_idx = phase_ranges[phase - 1]
        end_idx = phase_ranges[phase]

        if end_idx <= len(comparison_data['meta_rewards']):
            meta_rewards = comparison_data['meta_rewards'][start_idx:end_idx]
            baseline_rewards = comparison_data['baseline_rewards'][start_idx:end_idx]
            meta_collections = comparison_data['meta_collections'][start_idx:end_idx]
            baseline_collections = comparison_data['baseline_collections'][start_idx:end_idx]

            print(f"平均奖励 - Meta: {np.mean(meta_rewards):.2f}, Baseline: {np.mean(baseline_rewards):.2f}")
            print(
                f"平均收集任务数 - Meta: {np.mean(meta_collections):.2f}, Baseline: {np.mean(baseline_collections):.2f}")
            print(
                f"奖励提升: {(np.mean(meta_rewards) - np.mean(baseline_rewards)):.2f} ({((np.mean(meta_rewards) - np.mean(baseline_rewards)) / abs(np.mean(baseline_rewards)) * 100):.1f}%)")
            print(f"任务收集提升: {(np.mean(meta_collections) - np.mean(baseline_collections)):.2f}")

        print(f"\n测试各阶段最佳模型:")

        meta_results = test_and_visualize(agent_meta, env,
                                          f"results/best_actor_meta_phase_{phase}.pth",
                                          phase, "Meta")

        baseline_results = test_and_visualize(agent_baseline, env,
                                              f"results/best_actor_baseline_phase_{phase}.pth",
                                              phase, "Baseline")

        print(f"最终测试对比:")
        print(
            f"完成率 - Meta: {meta_results['completion_rate']:.1f}%, Baseline: {baseline_results['completion_rate']:.1f}%")
        print(f"总奖励 - Meta: {meta_results['total_reward']:.2f}, Baseline: {baseline_results['total_reward']:.2f}")
        print(f"总能耗 - Meta: {meta_results['total_energy']:.2f}, Baseline: {baseline_results['total_energy']:.2f}")
        print(f"平均延迟 - Meta: {meta_results['total_delay']:.3f}s, Baseline: {baseline_results['total_delay']:.3f}s")
        print(f"总步数 - Meta: {meta_results['total_steps']}, Baseline: {baseline_results['total_steps']}")

    print(f"\n--- 第四阶段持续学习效果验证 ---")
    print("第四阶段使用与第一阶段相同的用户配置，验证是否发生灾难性遗忘")

    phase1_meta = test_and_visualize(agent_meta, env,
                                     f"results/best_actor_meta_phase_1.pth", 1, "Meta-Phase1")
    phase4_meta = test_and_visualize(agent_meta, env,
                                     f"results/best_actor_meta_phase_4.pth", 4, "Meta-Phase4")

    phase1_baseline = test_and_visualize(agent_baseline, env,
                                         f"results/best_actor_baseline_phase_1.pth", 1, "Baseline-Phase1")
    phase4_baseline = test_and_visualize(agent_baseline, env,
                                         f"results/best_actor_baseline_phase_4.pth", 4, "Baseline-Phase4")

    print(f"\n持续学习对比分析:")
    print(f"Meta-Learning:")
    print(
        f"  阶段1完成率: {phase1_meta['completion_rate']:.1f}% -> 阶段4完成率: {phase4_meta['completion_rate']:.1f}% (变化: {phase4_meta['completion_rate'] - phase1_meta['completion_rate']:+.1f}%)")
    print(
        f"  阶段1奖励: {phase1_meta['total_reward']:.2f} -> 阶段4奖励: {phase4_meta['total_reward']:.2f} (变化: {phase4_meta['total_reward'] - phase1_meta['total_reward']:+.2f})")

    print(f"Baseline:")
    print(
        f"  阶段1完成率: {phase1_baseline['completion_rate']:.1f}% -> 阶段4完成率: {phase4_baseline['completion_rate']:.1f}% (变化: {phase4_baseline['completion_rate'] - phase1_baseline['completion_rate']:+.1f}%)")
    print(
        f"  阶段1奖励: {phase1_baseline['total_reward']:.2f} -> 阶段4奖励: {phase4_baseline['total_reward']:.2f} (变化: {phase4_baseline['total_reward'] - phase1_baseline['total_reward']:+.2f})")

    meta_forgetting = phase1_meta['completion_rate'] - phase4_meta['completion_rate']
    baseline_forgetting = phase1_baseline['completion_rate'] - phase4_baseline['completion_rate']

    print(f"\n遗忘程度分析:")
    print(f"Meta-Learning 遗忘程度: {meta_forgetting:.1f}% ({'轻微遗忘' if meta_forgetting > 0 else '性能提升'})")
    print(f"Baseline 遗忘程度: {baseline_forgetting:.1f}% ({'轻微遗忘' if baseline_forgetting > 0 else '性能提升'})")

    if meta_forgetting < baseline_forgetting:
        print("结论: Meta-Learning + Fine-tuning 在持续学习方面表现更优，遗忘程度更低")
    else:
        print("结论: 两种方法在持续学习方面表现相当")


if __name__ == "__main__":
    agent_meta, agent_baseline, env, comparison_data = train()
    print("\n" + "=" * 60)
    print("训练完成！开始性能对比分析...")
    print("=" * 60)
    compare_phase_performance(agent_meta, agent_baseline, env, comparison_data)
