import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import random
from collections import deque
import os
import time

SEED = 45
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

EPISODES_PER_TASK = 200

# 环境参数
AREA_SIZE = 100
NUM_USERS = 10
MAX_STEPS = 200
MAX_DISTANCE_COLLECT = 15

# UAV参数
UAV_HEIGHT = 30.0
UAV_SPEED = 10.0
UAV_COMPUTE_CAPACITY = 1e10

# TD3参数
ACTOR_LR = 3e-4
CRITIC_LR = 3e-4
GAMMA = 0.99
TAU = 0.005
BUFFER_SIZE = 200000
BATCH_SIZE = 256
EXPLORATION_NOISE_START = 0.4
EXPLORATION_NOISE_END = 0.05
REWARD_SCALE = 0.1

# EWC参数
EWC_LAMBDA = 1.0
FISHER_SAMPLE_SIZE = 1000

# GRU参数
SEQUENCE_LENGTH = 10
HIDDEN_SIZE = 128

# 通信参数
BANDWIDTH = 1e6
USER_TRANSMIT_POWER = 0.1
CHANNEL_GAIN_REF_DB = 30.0
CHANNEL_GAIN_REF_LINEAR = 10 ** (CHANNEL_GAIN_REF_DB / 10)
PATH_LOSS_EXPONENT = 2.5
BOLTZMANN_CONSTANT = 1.38e-23
TEMPERATURE_KELVIN = 290
NOISE_POWER = BOLTZMANN_CONSTANT * TEMPERATURE_KELVIN * BANDWIDTH
RICE_FACTOR = 5

# 任务参数
TASK_SIZE_BITS = [1e6, 2e6]
TASK_CPU_CYCLES = [5e8, 15e8]

# UAV飞行能耗模型参数 (基于旋翼无人机)
UAV_WEIGHT_KG = 2.0
GRAVITY = 9.81
AIR_DENSITY = 1.225
ROTOR_RADIUS = 0.4
NUM_ROTORS = 4
P_INDUCED_COEFF = UAV_WEIGHT_KG * GRAVITY * np.sqrt(
    UAV_WEIGHT_KG * GRAVITY / (2 * AIR_DENSITY * np.pi * ROTOR_RADIUS ** 2))
P_PROFILE_COEFF = 0.012
P_PARASITE_COEFF = 0.6
EFFECTIVE_SWITCHED_CAPACITANCE = 1e-28

# 权重参数
DELAY_WEIGHT = 0.5
ENERGY_WEIGHT = 0.5
DELAY_SCALE = 10
ENERGY_SCALE = 0.01


class Environment:
    def __init__(self):
        self.user_positions = np.random.uniform(0, AREA_SIZE, size=(NUM_USERS, 2))
        self.task_cpu_cycles = np.random.uniform(TASK_CPU_CYCLES[0], TASK_CPU_CYCLES[1], size=NUM_USERS)
        self.task_sizes = np.random.uniform(TASK_SIZE_BITS[0], TASK_SIZE_BITS[1], size=NUM_USERS)
        self.task_generating_users = np.ones(NUM_USERS, dtype=bool)
        self.uav_position = np.array([0.0, 0.0], dtype=float)  # 左下角位置
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)
        self.step_count = 0

        self.user_completion_delays = np.zeros(NUM_USERS)
        self.user_offloading_delays = np.zeros(NUM_USERS)
        self.user_computation_delays = np.zeros(NUM_USERS)
        self.user_computation_energies = np.zeros(NUM_USERS)
        self.total_flight_energy = 0

        self.trajectory = [self.uav_position.copy()]
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])
        self.observation_history = deque(maxlen=SEQUENCE_LENGTH)
        self.current_phase = 1

    def _calculate_rice_channel_gain(self, distance_2d):
        distance_3d = np.sqrt(distance_2d ** 2 + UAV_HEIGHT ** 2)
        if distance_3d < 1.0: distance_3d = 1.0
        path_loss = CHANNEL_GAIN_REF_LINEAR * (distance_3d ** (-PATH_LOSS_EXPONENT))
        K = RICE_FACTOR
        h_los = 1.0
        h_nlos_real = np.random.normal(0, 1)
        h_nlos_imag = np.random.normal(0, 1)
        h_nlos = (h_nlos_real + 1j * h_nlos_imag) / np.sqrt(2)
        h = np.sqrt(K / (K + 1)) * h_los + np.sqrt(1 / (K + 1)) * h_nlos
        fading_gain = abs(h) ** 2
        return path_loss * fading_gain

    def _calculate_offloading_delay(self, user_index, distance_2d):
        channel_gain = self._calculate_rice_channel_gain(distance_2d)
        snr = (USER_TRANSMIT_POWER * channel_gain) / NOISE_POWER
        data_rate = BANDWIDTH * np.log2(1 + snr)
        return self.task_sizes[user_index] / data_rate

    def _calculate_computation_delay(self, user_index):
        return self.task_cpu_cycles[user_index] / UAV_COMPUTE_CAPACITY

    def _calculate_flight_energy(self, distance_moved, time_delta=1.0):
        speed = distance_moved / time_delta
        power = P_INDUCED_COEFF * (
                np.sqrt(1 + (speed ** 4) / (4 * P_INDUCED_COEFF ** 2)) - (speed ** 2) / (2 * P_INDUCED_COEFF)) \
                + P_PROFILE_COEFF * (1 + 3 * (speed ** 2)) \
                + 0.5 * P_PARASITE_COEFF * AIR_DENSITY * speed ** 3
        return power * time_delta

    def _calculate_computation_energy(self, user_index):
        return EFFECTIVE_SWITCHED_CAPACITANCE * self.task_cpu_cycles[user_index]

    def update_task_generating_users(self, phase):
        self.current_phase = phase
        if phase == 1:
            self.task_generating_users = np.ones(NUM_USERS, dtype=bool)
        elif phase == 2:
            self.task_generating_users = np.ones(NUM_USERS, dtype=bool)
            if len(self.user_positions) < 11:
                extra_position = np.random.uniform(0, AREA_SIZE, size=(1, 2))
                extra_cpu = np.random.uniform(TASK_CPU_CYCLES[0], TASK_CPU_CYCLES[1], size=1)
                extra_size = np.random.uniform(TASK_SIZE_BITS[0], TASK_SIZE_BITS[1], size=1)
                self.user_positions = np.vstack([self.user_positions, extra_position])
                self.task_cpu_cycles = np.append(self.task_cpu_cycles, extra_cpu)
                self.task_sizes = np.append(self.task_sizes, extra_size)
                self.task_generating_users = np.append(self.task_generating_users, True)
                self.collected_tasks = np.append(self.collected_tasks, False)
                self.user_completion_delays = np.append(self.user_completion_delays, 0)
                self.user_offloading_delays = np.append(self.user_offloading_delays, 0)
                self.user_computation_delays = np.append(self.user_computation_delays, 0)
                self.user_computation_energies = np.append(self.user_computation_energies, 0)
            else:
                if len(self.task_generating_users) < 11:
                    self.task_generating_users = np.append(self.task_generating_users, True)
                else:
                    self.task_generating_users = np.ones(len(self.user_positions), dtype=bool)
        else:
            total_users = len(self.user_positions)
            indices = np.random.choice(total_users, 9, replace=False)
            self.task_generating_users = np.zeros(total_users, dtype=bool)
            self.task_generating_users[indices] = True
        print(f"Phase {phase}: {sum(self.task_generating_users)} users are generating tasks")
        print(f"Task generating users: {np.where(self.task_generating_users)[0]}")

    def reset(self):
        self.uav_position = np.array([10.0, 10.0], dtype=float)
        num_users = len(self.user_positions)
        self.collected_tasks = np.zeros(num_users, dtype=bool)
        self.step_count = 0
        if len(self.user_completion_delays) < num_users:
            self.user_completion_delays = np.zeros(num_users)
            self.user_offloading_delays = np.zeros(num_users)
            self.user_computation_delays = np.zeros(num_users)
            self.user_computation_energies = np.zeros(num_users)
        else:
            self.user_completion_delays[:num_users] = 0
            self.user_offloading_delays[:num_users] = 0
            self.user_computation_delays[:num_users] = 0
            self.user_computation_energies[:num_users] = 0
        self.total_flight_energy = 0
        self.trajectory = [self.uav_position.copy()]
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])
        self.observation_history.clear()
        initial_state = self._get_state()
        for _ in range(SEQUENCE_LENGTH):
            self.observation_history.append(initial_state)
        return self._get_gru_state()

    def step(self, action):
        action = np.clip(action, -1, 1)
        movement = action * UAV_SPEED
        prev_position = self.uav_position.copy()
        self.uav_position += movement
        self.uav_position = np.clip(self.uav_position, 0, AREA_SIZE)
        self.trajectory.append(self.uav_position.copy())
        distance_moved = np.linalg.norm(self.uav_position - prev_position)
        flight_energy_step = self._calculate_flight_energy(distance_moved)
        self.total_flight_energy += flight_energy_step
        new_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])
        newly_collected = 0
        num_users = len(self.user_positions)
        for i in range(num_users):
            if (i < len(self.task_generating_users) and
                    self.task_generating_users[i] and
                    not self.collected_tasks[i]):
                if new_distances[i] <= MAX_DISTANCE_COLLECT:
                    self.collected_tasks[i] = True
                    newly_collected += 1
                    offloading_delay = self._calculate_offloading_delay(i, new_distances[i])
                    computation_delay = self._calculate_computation_delay(i)
                    self.user_offloading_delays[i] = offloading_delay
                    self.user_computation_delays[i] = computation_delay
                    self.user_completion_delays[i] = offloading_delay + computation_delay
                    self.user_computation_energies[i] = self._calculate_computation_energy(i)
        self.step_count += 1
        if len(self.task_generating_users) < num_users:
            additional_users = num_users - len(self.task_generating_users)
            self.task_generating_users = np.append(self.task_generating_users,
                                                   np.zeros(additional_users, dtype=bool))
        completed_indices = np.where(self.collected_tasks[:num_users] &
                                     self.task_generating_users[:num_users])[0]
        if len(completed_indices) > 0:
            total_delay = np.sum(self.user_completion_delays[completed_indices])
            total_comp_energy = np.sum(self.user_computation_energies[completed_indices])
            avg_total_delay = np.mean(self.user_completion_delays[completed_indices])
            avg_offloading_delay = np.mean(self.user_offloading_delays[completed_indices])
            avg_computation_delay = np.mean(self.user_computation_delays[completed_indices])
        else:
            total_delay, total_comp_energy, avg_total_delay, avg_offloading_delay, avg_computation_delay = 0.0, 0.0, 0.0, 0.0, 0.0
        total_energy = self.total_flight_energy + total_comp_energy
        reward_info = self._calculate_reward_detailed(newly_collected, total_energy, total_delay, new_distances,
                                                      self.last_distances)
        reward = reward_info['total_reward']
        self.last_distances = new_distances
        total_tasks_to_collect = sum(self.task_generating_users[:num_users])
        collected_required_tasks = sum(self.collected_tasks[:num_users] &
                                       self.task_generating_users[:num_users])
        done = (self.step_count >= MAX_STEPS) or (collected_required_tasks == total_tasks_to_collect)
        self.observation_history.append(self._get_state())
        return self._get_gru_state(), reward, done, {
            "collected_required": collected_required_tasks,
            "total_required": total_tasks_to_collect,
            "energy": total_energy,
            "delay": avg_total_delay,
            "reward_breakdown": reward_info,
            "flight_energy": self.total_flight_energy,
            "comp_energy": total_comp_energy,
            "delay_breakdown": {
                "avg_offloading_delay": avg_offloading_delay,
                "avg_computation_delay": avg_computation_delay,
                "avg_total_delay": avg_total_delay,
                "total_delay": total_delay,
            }
        }

    def _get_state(self):
        max_users = 11
        state = np.zeros(2 + max_users * 4 + 1)
        state[0:2] = self.uav_position / AREA_SIZE
        num_users = len(self.user_positions)
        for i in range(max_users):
            idx = 2 + i * 4
            if i < num_users:
                dist = np.linalg.norm(self.uav_position - self.user_positions[i])
                state[idx] = dist / np.sqrt(2 * AREA_SIZE ** 2)
                state[idx + 1] = float(self.collected_tasks[i])
                state[idx + 2] = float(self.task_generating_users[i] if i < len(self.task_generating_users) else False)
                state[idx + 3] = self.task_cpu_cycles[i] / TASK_CPU_CYCLES[1]
            else:
                state[idx:idx+4] = 0.0
        state[-1] = self.step_count / MAX_STEPS
        return state

    def _get_gru_state(self):
        while len(self.observation_history) < SEQUENCE_LENGTH:
            self.observation_history.append(self._get_state())
        return np.array(list(self.observation_history))

    def _calculate_reward_detailed(self, newly_collected, total_energy, total_delay, new_distances, old_distances):
        collected_required = sum(self.collected_tasks & self.task_generating_users)
        total_required = sum(self.task_generating_users) if sum(self.task_generating_users) > 0 else 1
        collection_reward = newly_collected * 15.0
        proximity_reward = 0.0
        uncollected_indices = np.where(self.task_generating_users & ~self.collected_tasks)[0]
        if len(uncollected_indices) > 0:
            uncollected_distances_old = old_distances[uncollected_indices]
            uncollected_distances_new = new_distances[uncollected_indices]
            closest_user_idx = np.argmin(uncollected_distances_new)
            dist_diff = uncollected_distances_old[closest_user_idx] - uncollected_distances_new[closest_user_idx]
            proximity_reward = dist_diff * 0.3
        time_penalty = 0.1
        objective_penalty = 0.0
        completion_bonus = 0.0
        done = (self.step_count >= MAX_STEPS) or (collected_required == sum(self.task_generating_users))
        if done:
            if collected_required > 0:
                scaled_total_delay = total_delay * DELAY_SCALE
                scaled_total_energy = total_energy * ENERGY_SCALE
                objective_value = (DELAY_WEIGHT * scaled_total_delay) + \
                                  (ENERGY_WEIGHT * scaled_total_energy)
                objective_penalty = objective_value / collected_required
                completion_rate = collected_required / total_required
                completion_bonus = completion_rate * 100.0
                if completion_rate == 1.0:
                    completion_bonus += 150.0
            else:
                objective_penalty = 200.0
        total_reward = (collection_reward + proximity_reward + completion_bonus -
                        time_penalty - objective_penalty)
        scaled_reward = total_reward * REWARD_SCALE
        return {'total_reward': scaled_reward, 'collection_reward': collection_reward * REWARD_SCALE,
                'proximity_reward': proximity_reward * REWARD_SCALE, 'completion_bonus': completion_bonus * REWARD_SCALE,
                'objective_penalty': -objective_penalty * REWARD_SCALE, 'time_penalty': -time_penalty * REWARD_SCALE}

    def render(self, episode=0, clear_output=True):
        plt.figure(figsize=(10, 10))
        num_users = len(self.user_positions)
        for i, pos in enumerate(self.user_positions):
            if i < len(self.task_generating_users) and self.task_generating_users[i]:
                color = 'green' if self.collected_tasks[i] else 'red'
            else:
                color = 'gray'
            plt.scatter(pos[0], pos[1], s=100, c=color)
            task_info = f"{i + 1}\n{self.task_sizes[i] / 1e6:.1f}Mb\n{self.task_cpu_cycles[i] / 1e9:.1f}Gcy"
            plt.annotate(task_info, (pos[0], pos[1]), fontsize=8, ha='center', va='bottom')
        trajectory = np.array(self.trajectory)
        plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', alpha=0.5)
        plt.scatter(self.uav_position[0], self.uav_position[1], s=200, c='blue', marker='*')
        circle = plt.Circle((self.uav_position[0], self.uav_position[1]), MAX_DISTANCE_COLLECT, color='blue',
                            fill=False, alpha=0.3)
        plt.gca().add_patch(circle)
        plt.xlim(0, AREA_SIZE)
        plt.ylim(0, AREA_SIZE)
        title = f"Episode {episode}, Step {self.step_count}\n"
        title += f"收集: {sum(self.collected_tasks & self.task_generating_users)}/{sum(self.task_generating_users)} 任务"
        plt.title(title)
        plt.grid(True)
        plt.savefig(f"results/step_{episode}_{self.step_count}.png")
        plt.close()


class GRUActor(nn.Module):
    def __init__(self, state_dim, action_dim, max_action):
        super(GRUActor, self).__init__()
        self.state_dim = state_dim
        self.seq_len = SEQUENCE_LENGTH
        self.hidden_size = HIDDEN_SIZE
        self.gru = nn.GRU(input_size=state_dim, hidden_size=self.hidden_size, num_layers=1, batch_first=True)
        self.layer1 = nn.Linear(self.hidden_size, 256)
        self.layer2 = nn.Linear(256, 128)
        self.layer3 = nn.Linear(128, action_dim)
        self.max_action = max_action
        self.ln1 = nn.LayerNorm(256)
        self.ln2 = nn.LayerNorm(128)
        self.hidden = None
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0.0)

    def forward(self, state, reset_hidden=False):
        batch_size = state.size(0)
        if reset_hidden or self.hidden is None or self.hidden.size(1) != batch_size:
            self.reset_hidden(batch_size)
        gru_out, self.hidden = self.gru(state, self.hidden)
        x = gru_out[:, -1]
        x = self.ln1(torch.relu(self.layer1(x)))
        x = self.ln2(torch.relu(self.layer2(x)))
        action = torch.tanh(self.layer3(x))
        return self.max_action * action

    def reset_hidden(self, batch_size=1):
        self.hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)


class GRUCritic(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(GRUCritic, self).__init__()
        self.state_dim = state_dim
        self.seq_len = SEQUENCE_LENGTH
        self.hidden_size = HIDDEN_SIZE
        self.q1_gru = nn.GRU(input_size=state_dim, hidden_size=self.hidden_size, num_layers=1, batch_first=True)
        self.q2_gru = nn.GRU(input_size=state_dim, hidden_size=self.hidden_size, num_layers=1, batch_first=True)
        self.q1_layer1 = nn.Linear(self.hidden_size + action_dim, 256)
        self.q1_layer2 = nn.Linear(256, 128)
        self.q1_output = nn.Linear(128, 1)
        self.q1_ln1 = nn.LayerNorm(256)
        self.q1_ln2 = nn.LayerNorm(128)
        self.q2_layer1 = nn.Linear(self.hidden_size + action_dim, 256)
        self.q2_layer2 = nn.Linear(256, 128)
        self.q2_output = nn.Linear(128, 1)
        self.q2_ln1 = nn.LayerNorm(256)
        self.q2_ln2 = nn.LayerNorm(128)
        self.q1_hidden = None
        self.q2_hidden = None
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0.01)

    def forward(self, state, action, reset_hidden=False):
        batch_size = state.size(0)
        if reset_hidden or self.q1_hidden is None or self.q1_hidden.size(1) != batch_size:
            self.reset_q1_hidden(batch_size)
        if reset_hidden or self.q2_hidden is None or self.q2_hidden.size(1) != batch_size:
            self.reset_q2_hidden(batch_size)
        q1_gru_out, self.q1_hidden = self.q1_gru(state, self.q1_hidden.to(state.device))
        q2_gru_out, self.q2_hidden = self.q2_gru(state, self.q2_hidden.to(state.device))
        q1_state = q1_gru_out[:, -1]
        q2_state = q2_gru_out[:, -1]
        q1_x = torch.cat([q1_state, action], dim=1)
        q2_x = torch.cat([q2_state, action], dim=1)
        q1 = self.q1_ln1(torch.relu(self.q1_layer1(q1_x)))
        q1 = self.q1_ln2(torch.relu(self.q1_layer2(q1)))
        q1 = self.q1_output(q1)
        q2 = self.q2_ln1(torch.relu(self.q2_layer1(q2_x)))
        q2 = self.q2_ln2(torch.relu(self.q2_layer2(q2)))
        q2 = self.q2_output(q2)
        return q1, q2

    def Q1(self, state, action, reset_hidden=False):
        batch_size = state.size(0)
        if reset_hidden or self.q1_hidden is None or self.q1_hidden.size(1) != batch_size:
            self.reset_q1_hidden(batch_size)
        q1_gru_out, self.q1_hidden = self.q1_gru(state, self.q1_hidden.to(state.device))
        q1_state = q1_gru_out[:, -1]
        q1_x = torch.cat([q1_state, action], dim=1)
        q1 = self.q1_ln1(torch.relu(self.q1_layer1(q1_x)))
        q1 = self.q1_ln2(torch.relu(self.q1_layer2(q1)))
        q1 = self.q1_output(q1)
        return q1

    def reset_hidden(self, batch_size=1):
        self.reset_q1_hidden(batch_size)
        self.reset_q2_hidden(batch_size)

    def reset_q1_hidden(self, batch_size=1):
        self.q1_hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)

    def reset_q2_hidden(self, batch_size=1):
        self.q2_hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)


class ReplayBuffer:
    def __init__(self, max_size=BUFFER_SIZE):
        self.buffer = deque(maxlen=max_size)

    def add(self, state, action, reward, next_state, done):
        self.buffer.append((state, action, reward, next_state, done))

    def sample(self, batch_size):
        batch = random.sample(self.buffer, min(len(self.buffer), batch_size))
        state, action, reward, next_state, done = map(np.stack, zip(*batch))
        return state, action, reward, next_state, done

    def __len__(self):
        return len(self.buffer)


class EWC:
    def __init__(self, model, fisher_sample_size=FISHER_SAMPLE_SIZE):
        self.model = model
        self.fisher_sample_size = fisher_sample_size
        self.importance = {}
        self.old_params = {}
        self.fisher_diagonal = {}

    def _calculate_fisher_info(self, replay_buffer):
        fisher = {}
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                fisher[name] = torch.zeros_like(param).to(device)
        self.model.train()
        samples_count = min(self.fisher_sample_size, len(replay_buffer))
        if samples_count <= 0: return fisher
        for _ in range(samples_count):
            states, actions, _, _, _ = replay_buffer.sample(1)
            states = torch.FloatTensor(states).to(device)
            actions = torch.FloatTensor(actions).to(device)
            self.model.zero_grad()
            if isinstance(self.model, GRUActor):
                self.model.reset_hidden(1)
                outputs = self.model(states)
                loss = ((outputs - actions) ** 2).mean()
            else:
                self.model.reset_hidden(1)
                outputs, _ = self.model(states, actions)
                loss = outputs.mean()
            loss.backward()
            for name, param in self.model.named_parameters():
                if param.requires_grad and param.grad is not None:
                    fisher[name] += param.grad.pow(2) / samples_count
        return fisher

    def store_task_parameters(self, task_id, replay_buffer):
        print(f"Storing parameters for task {task_id} and computing Fisher information matrix")
        self.old_params = {}
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                self.old_params[name] = param.data.clone()
        self.importance = self._calculate_fisher_info(replay_buffer)
        print(f"Stored {len(self.old_params)} parameters and computed Fisher matrices")

    def calculate_ewc_loss(self, lam=EWC_LAMBDA):
        loss = 0
        if not self.old_params or not self.importance: return loss
        for name, param in self.model.named_parameters():
            if name in self.old_params and name in self.importance and param.requires_grad:
                loss += torch.sum(self.importance[name] * (param - self.old_params[name]).pow(2))
        return lam * loss


class TD3:
    def __init__(self, state_dim, action_dim, max_action, use_ewc=True):
        self.actor = GRUActor(state_dim, action_dim, max_action).to(device)
        self.actor_target = GRUActor(state_dim, action_dim, max_action).to(device)
        self.actor_target.load_state_dict(self.actor.state_dict())
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=ACTOR_LR)
        self.critic = GRUCritic(state_dim, action_dim).to(device)
        self.critic_target = GRUCritic(state_dim, action_dim).to(device)
        self.critic_target.load_state_dict(self.critic.state_dict())
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=CRITIC_LR)
        self.max_action = max_action
        self.memory = ReplayBuffer()
        self.policy_noise = 0.2 * max_action
        self.noise_clip = 0.5 * max_action
        self.policy_freq = 2
        self.total_it = 0
        self.use_ewc = use_ewc
        if self.use_ewc:
            self.ewc_actor = EWC(self.actor)
            self.ewc_critic = EWC(self.critic)
        self.current_task = 1
        self.task_noise = {
            1: np.linspace(EXPLORATION_NOISE_START, EXPLORATION_NOISE_END, EPISODES_PER_TASK),
            2: np.linspace(EXPLORATION_NOISE_START * 0.8, EXPLORATION_NOISE_END, EPISODES_PER_TASK),
            3: np.linspace(EXPLORATION_NOISE_START * 0.7, EXPLORATION_NOISE_END, EPISODES_PER_TASK)
        }
        self.actor_scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.actor_optimizer, mode='max', factor=0.5,
                                                                    patience=100, verbose=True)
        self.critic_scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.critic_optimizer, mode='max', factor=0.5,
                                                                     patience=100, verbose=True)

    def select_action(self, state, noise_scale=EXPLORATION_NOISE_START):
        if len(state.shape) == 2:
            state = np.expand_dims(state, 0)
        state = torch.FloatTensor(state).to(device)
        action = self.actor(state).cpu().data.numpy().flatten()
        if noise_scale > 0:
            noise = np.random.normal(0, self.max_action * noise_scale, size=action.shape)
            action = action + noise
        return np.clip(action, -self.max_action, self.max_action)

    def switch_task(self, task_id):
        print(f"\nSwitching to task {task_id}")

        if self.use_ewc:
            # EWC算法：保存旧任务参数并计算Fisher信息矩阵
            if self.current_task > 0 and len(self.memory) > 0:
                self.ewc_actor.store_task_parameters(self.current_task, self.memory)
                self.ewc_critic.store_task_parameters(self.current_task, self.memory)
            print(f"EWC: Clearing replay buffer for new task.")
            self.memory.buffer.clear()
        else:
            # 【MODIFIED】对比算法（持续学习，不带EWC）：
            # 不再重新初始化网络，而是保留已学习的权重，仅清空经验池。
            # 这样它就可以在旧知识的基础上继续学习新任务。
            print("Baseline (Continual w/o EWC): Keeping weights, clearing replay buffer for new task.")
            self.memory.buffer.clear()

        self.current_task = task_id
        self.actor.reset_hidden()
        self.critic.reset_hidden()
        print(f"Reset GRU states for new task {task_id}")

    def train(self):
        self.total_it += 1
        if len(self.memory) < BATCH_SIZE: return
        state, action, reward, next_state, done = self.memory.sample(BATCH_SIZE)
        state = torch.FloatTensor(state).to(device)
        action = torch.FloatTensor(action).to(device)
        reward = torch.FloatTensor(reward.reshape(-1, 1)).to(device)
        next_state = torch.FloatTensor(next_state).to(device)
        done = torch.FloatTensor(done.reshape(-1, 1)).to(device)
        self.critic.reset_hidden(BATCH_SIZE)
        self.actor.reset_hidden(BATCH_SIZE)
        with torch.no_grad():
            self.critic_target.reset_hidden(BATCH_SIZE)
            self.actor_target.reset_hidden(BATCH_SIZE)
            noise = torch.FloatTensor(action.shape).data.normal_(0, self.policy_noise).to(device)
            noise = noise.clamp(-self.noise_clip, self.noise_clip)
            next_action = (self.actor_target(next_state) + noise).clamp(-self.max_action, self.max_action)
            target_q1, target_q2 = self.critic_target(next_state, next_action)
            target_q = torch.min(target_q1, target_q2)
            target_q = reward + (1 - done) * GAMMA * target_q
        current_q1, current_q2 = self.critic(state, action)
        critic_loss = nn.MSELoss()(current_q1, target_q) + nn.MSELoss()(current_q2, target_q)

        if self.use_ewc and self.current_task > 1:
            critic_ewc_loss = self.ewc_critic.calculate_ewc_loss()
            critic_loss += critic_ewc_loss

        self.critic_optimizer.zero_grad()
        critic_loss.backward(retain_graph=True)
        torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 1.0)
        self.critic_optimizer.step()
        actor_loss = 0
        if self.total_it % self.policy_freq == 0:
            self.actor.reset_hidden(BATCH_SIZE)
            self.critic.reset_q1_hidden(BATCH_SIZE)
            actor_loss = -self.critic.Q1(state, self.actor(state)).mean()

            if self.use_ewc and self.current_task > 1:
                actor_ewc_loss = self.ewc_actor.calculate_ewc_loss()
                actor_loss += actor_ewc_loss

            self.actor_optimizer.zero_grad()
            actor_loss.backward()
            torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 1.0)
            self.actor_optimizer.step()
            for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)
            for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)
        return {"critic_loss": critic_loss.item(),
                "actor_loss": actor_loss if isinstance(actor_loss, (int, float)) else actor_loss.item()}

    def update_lr_schedulers(self, reward):
        self.actor_scheduler.step(reward)
        self.critic_scheduler.step(reward)


def train():
    os.makedirs("results", exist_ok=True)
    os.makedirs("results/ewc", exist_ok=True)
    os.makedirs("results/baseline", exist_ok=True)
    env = Environment()
    max_users = 11
    state_dim = 2 + max_users * 4 + 1
    action_dim = 2
    max_action = 1
    agent_ewc = TD3(state_dim, action_dim, max_action, use_ewc=True)
    agent_baseline = TD3(state_dim, action_dim, max_action, use_ewc=False)
    total_episodes = 600
    episodes_per_task = 200
    eval_freq = 50
    results = {
        'ewc': {'rewards_history': [], 'smoothed_rewards': [], 'collection_history': [], 'energy_history': [], 'delay_history': [], 'losses': {"critic": [], "actor": []}},
        'baseline': {'rewards_history': [], 'smoothed_rewards': [], 'collection_history': [], 'energy_history': [], 'delay_history': [], 'losses': {"critic": [], "actor": []}}
    }
    start_time = time.time()
    for phase in range(1, 4):
        env.update_task_generating_users(phase)
        agent_ewc.switch_task(phase)
        agent_baseline.switch_task(phase)
        phase_noise_base = EXPLORATION_NOISE_START * (0.9 ** (phase - 1))
        phase_noise = np.linspace(phase_noise_base, EXPLORATION_NOISE_END, episodes_per_task)
        for episode in range(1, episodes_per_task + 1):
            global_episode = (phase - 1) * episodes_per_task + episode
            for agent_name, agent in [('ewc', agent_ewc), ('baseline', agent_baseline)]:
                state = env.reset()
                agent.actor.reset_hidden()
                agent.critic.reset_hidden()
                episode_reward = 0
                last_collection = 0
                episode_losses = {"critic": [], "actor": []}
                current_noise = phase_noise[episode - 1]
                for step in range(1, MAX_STEPS + 1):
                    action = agent.select_action(state, noise_scale=current_noise)
                    next_state, reward, done, info = env.step(action)
                    agent.memory.add(state, action, reward, next_state, done)
                    loss_info = agent.train()
                    if loss_info:
                        episode_losses["critic"].append(loss_info["critic_loss"])
                        episode_losses["actor"].append(loss_info["actor_loss"])
                    state = next_state
                    episode_reward += reward
                    last_collection = info["collected_required"]
                    if done:
                        if global_episode % eval_freq == 0:
                            print(f"--- {agent_name.upper()} Episode {global_episode} finished. ---")
                            env.render(f"{agent_name}_{global_episode}")
                            torch.save(agent.actor.state_dict(),f"results/{agent_name}/actor_phase_{phase}_ep_{episode}.pth")
                        break
                results[agent_name]['rewards_history'].append(episode_reward)
                results[agent_name]['collection_history'].append(last_collection)
                results[agent_name]['energy_history'].append(info["energy"])
                results[agent_name]['delay_history'].append(info["delay"])
                if len(results[agent_name]['rewards_history']) >= 10:
                    results[agent_name]['smoothed_rewards'].append(np.mean(results[agent_name]['rewards_history'][-10:]))
                else:
                    results[agent_name]['smoothed_rewards'].append(episode_reward)
                if episode_losses["critic"]:
                    results[agent_name]['losses']["critic"].append(np.mean(episode_losses["critic"]))
                if episode_losses["actor"]:
                    results[agent_name]['losses']["actor"].append(np.mean(episode_losses["actor"]))
                agent.update_lr_schedulers(episode_reward)
                elapsed_time = time.time() - start_time
                collected_required = info.get("collected_required", 0)
                total_required = info.get("total_required", 1)
                print(
                    f"{agent_name.upper():>8} Ph {phase} Ep {episode:3d}/{episodes_per_task} "
                    f"Tasks {collected_required:2d}/{total_required:2d} "
                    f"Steps {env.step_count:3d} "
                    f"Reward: {episode_reward:7.2f} "
                    f"Energy: {info.get('energy', 0):.1f} "
                    f"Delay: {info.get('delay', 0):.3f}s "
                    f"Time: {elapsed_time:.1f}s"
                )
            if global_episode % eval_freq == 0 or global_episode == total_episodes:
                plt.figure(figsize=(30, 6))
                plt.subplot(1, 6, 1)
                plt.plot(results['ewc']['rewards_history'], alpha=0.3, color='blue', label='EWC Raw')
                plt.plot(results['ewc']['smoothed_rewards'], color='blue', label='EWC Smoothed')
                plt.plot(results['baseline']['rewards_history'], alpha=0.3, color='red', label='Baseline Raw')
                plt.plot(results['baseline']['smoothed_rewards'], color='red', label='Baseline Smoothed')
                plt.axvline(x=episodes_per_task, color='green', linestyle='--', label='Phase 1->2')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--', label='Phase 2->3')
                plt.title("Reward Comparison"); plt.xlabel("Episode"); plt.ylabel("Reward"); plt.legend(); plt.grid(True)
                plt.subplot(1, 6, 2)
                plt.plot(results['ewc']['collection_history'], color='blue', label='EWC')
                plt.plot(results['baseline']['collection_history'], color='red', label='Baseline')
                plt.axvline(x=episodes_per_task, color='green', linestyle='--'); plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.title("Collected Tasks"); plt.xlabel("Episode"); plt.ylabel("Number of Tasks"); plt.legend(); plt.grid(True)
                plt.subplot(1, 6, 3)
                plt.plot(results['ewc']['energy_history'], color='blue', label='EWC')
                plt.plot(results['baseline']['energy_history'], color='red', label='Baseline')
                plt.axvline(x=episodes_per_task, color='green', linestyle='--'); plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.title("Total Energy"); plt.xlabel("Episode"); plt.ylabel("Energy"); plt.legend(); plt.grid(True)
                plt.subplot(1, 6, 4)
                plt.plot(results['ewc']['delay_history'], color='blue', label='EWC')
                plt.plot(results['baseline']['delay_history'], color='red', label='Baseline')
                plt.axvline(x=episodes_per_task, color='green', linestyle='--'); plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.title("Avg Delay"); plt.xlabel("Episode"); plt.ylabel("Delay (s)"); plt.legend(); plt.grid(True)
                plt.subplot(1, 6, 5)
                if results['ewc']['losses']["critic"]: plt.plot(results['ewc']['losses']["critic"], color='blue', label='EWC Critic')
                if results['baseline']['losses']["critic"]: plt.plot(results['baseline']['losses']["critic"], color='red', label='Baseline Critic')
                plt.axvline(x=episodes_per_task, color='green', linestyle='--'); plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.title("Critic Loss"); plt.xlabel("Episode"); plt.ylabel("Loss"); plt.legend(); plt.grid(True)
                plt.subplot(1, 6, 6)
                if results['ewc']['losses']["actor"]: plt.plot(results['ewc']['losses']["actor"], color='blue', label='EWC Actor')
                if results['baseline']['losses']["actor"]: plt.plot(results['baseline']['losses']["actor"], color='red', label='Baseline Actor')
                plt.axvline(x=episodes_per_task, color='green', linestyle='--'); plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.title("Actor Loss"); plt.xlabel("Episode"); plt.ylabel("Loss"); plt.legend(); plt.grid(True)
                plt.tight_layout()
                plt.savefig(f"results/comparison_curves_episode_{global_episode}.png")
                plt.close()
        torch.save(agent_ewc.actor.state_dict(), f"results/ewc/actor_phase_{phase}.pth")
        torch.save(agent_ewc.critic.state_dict(), f"results/ewc/critic_phase_{phase}.pth")
        torch.save(agent_baseline.actor.state_dict(), f"results/baseline/actor_phase_{phase}.pth")
        torch.save(agent_baseline.critic.state_dict(), f"results/baseline/critic_phase_{phase}.pth")
    print("Training completed!")
    return agent_ewc, agent_baseline, env


def test_and_visualize(agent, env, model_path, phase=3, algorithm_name="EWC"):
    agent.actor.load_state_dict(torch.load(model_path))
    agent.actor.eval()
    env.update_task_generating_users(phase)
    state = env.reset()
    agent.actor.reset_hidden()
    total_reward = 0
    step_rewards = []
    trajectory = [env.uav_position.copy()]
    collection_times = np.zeros(len(env.user_positions))
    collection_order = []
    for step in range(1, MAX_STEPS + 1):
        action = agent.select_action(state, noise_scale=0)
        trajectory.append(env.uav_position.copy())
        collected_before = env.collected_tasks.copy()
        next_state, reward, done, info = env.step(action)
        for i in range(len(env.user_positions)):
            if (i < len(env.task_generating_users) and env.task_generating_users[i] and
                    env.collected_tasks[i] and not collected_before[i]):
                collection_times[i] = step
                collection_order.append(i)
        total_reward += reward
        step_rewards.append(reward)
        state = next_state
        if done:
            break
    trajectory = np.array(trajectory)
    plt.figure(figsize=(12, 10))
    for i, (x, y) in enumerate(env.user_positions):
        if i < len(env.task_generating_users) and env.task_generating_users[i]:
            if env.collected_tasks[i]:
                color = 'green'
                plt.scatter(x, y, s=150, c=color, marker='o')
                plt.annotate(f"用户 {i + 1}\n(步数 {int(collection_times[i])})", (x, y),
                             textcoords="offset points", xytext=(0, 10), ha='center', fontsize=10)
            else:
                color = 'red'
                plt.scatter(x, y, s=150, c=color, marker='o')
                plt.annotate(f"用户 {i + 1}\n(未收集)", (x, y), textcoords="offset points",
                             xytext=(0, 10), ha='center', fontsize=10)
        else:
            color = 'gray'
            plt.scatter(x, y, s=100, c=color, marker='o')
            plt.annotate(f"用户 {i + 1}\n(不产生任务)", (x, y), textcoords="offset points",
                         xytext=(0, 10), ha='center', fontsize=10)
    plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', label='UAV轨迹', alpha=0.7)
    plt.scatter(trajectory[0, 0], trajectory[0, 1], s=200, c='blue', marker='^', label='起点')
    plt.scatter(trajectory[-1, 0], trajectory[-1, 1], s=200, c='purple', marker='*', label='终点')
    current_pos = trajectory[-1]
    circle = plt.Circle((current_pos[0], current_pos[1]), MAX_DISTANCE_COLLECT,
                        color='lightblue', fill=False, alpha=0.5, linewidth=2, label='收集范围')
    plt.gca().add_patch(circle)
    for i in range(0, len(trajectory), 10):
        plt.annotate(f"{i}", (trajectory[i, 0], trajectory[i, 1]), fontsize=8, ha='center', va='center',
                     bbox=dict(boxstyle="circle,pad=0.2", fc="white", alpha=0.7))
    for i in range(len(env.user_positions)):
        if (i < len(env.task_generating_users) and env.task_generating_users[i] and
                env.collected_tasks[i]):
            step = int(collection_times[i])
            if step < len(trajectory):
                uav_pos = trajectory[step]
                plt.plot([uav_pos[0], env.user_positions[i, 0]],
                         [uav_pos[1], env.user_positions[i, 1]], 'g--', alpha=0.5)
    plt.title(f"{algorithm_name} UAV任务收集轨迹 (阶段{phase}: 收集 "
              f"{sum(env.collected_tasks & env.task_generating_users)}/{sum(env.task_generating_users)} 任务, "
              f"步数: {env.step_count})")
    plt.xlabel("X坐标 (m)"); plt.ylabel("Y坐标 (m)"); plt.grid(True); plt.legend()
    plt.xlim(0, AREA_SIZE); plt.ylim(0, AREA_SIZE)
    plt.savefig(f"results/{algorithm_name.lower()}_trajectory_phase_{phase}.png"); plt.close()
    plt.figure(figsize=(15, 5))
    plt.subplot(1, 2, 1); plt.plot(step_rewards); plt.title(f"{algorithm_name} 步奖励"); plt.xlabel("步数"); plt.ylabel("奖励"); plt.grid(True)
    plt.subplot(1, 2, 2); plt.plot(np.cumsum(step_rewards)); plt.title(f"{algorithm_name} 累计奖励"); plt.xlabel("步数"); plt.ylabel("累计奖励"); plt.grid(True)
    plt.tight_layout()
    plt.savefig(f"results/{algorithm_name.lower()}_rewards_phase_{phase}.png"); plt.close()
    print(f"\n{algorithm_name} 测试结果 (阶段 {phase}):")
    collected_count = sum(env.collected_tasks & env.task_generating_users)
    total_count = sum(env.task_generating_users)
    percentage = collected_count / total_count * 100 if total_count > 0 else 0
    print(f"收集任务: {collected_count}/{total_count} ({percentage:.1f}%)")
    print(f"总奖励: {total_reward:.2f}")
    print(f"总能耗: {info['energy']:.2f}")
    print(f"总延迟: {info['delay']:.2f}")
    print(f"总步数: {env.step_count}")


if __name__ == "__main__":
    agent_ewc, agent_baseline, env = train()
    for phase in range(1, 4):
        print(f"\n测试阶段 {phase} 的模型性能:")
        test_and_visualize(agent_ewc, env, f"results/ewc/actor_phase_{phase}.pth", phase, "EWC")
        test_and_visualize(agent_baseline, env, f"results/baseline/actor_phase_{phase}.pth", phase, "Baseline")

