'''代码二'''
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import random
from collections import deque
import os
import time
import matplotlib

SEED = 45
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

EPISODES_PER_TASK = 1500

AREA_SIZE = 100
NUM_USERS = 12
MAX_STEPS = 200
MAX_DISTANCE_COLLECT = 15

UAV_SPEED = 15.0
UAV_ENERGY_PER_METER = 0.1
UAV_HOVER_ENERGY = 0.5

TASK_SIZE = [10, 50]

ACTOR_LR = 3e-4
CRITIC_LR = 3e-4
GAMMA = 0.99
TAU = 0.005
BUFFER_SIZE = 200000
BATCH_SIZE = 256
EXPLORATION_NOISE_START = 0.4
EXPLORATION_NOISE_END = 0.05
REWARD_SCALE = 0.1

EWC_LAMBDA = 1.0
FISHER_SAMPLE_SIZE = 1000

SEQUENCE_LENGTH = 10
HIDDEN_SIZE = 128


class Environment:
    def __init__(self):
        self.user_positions = np.random.uniform(0, AREA_SIZE, size=(NUM_USERS, 2))

        self.task_sizes = np.random.uniform(TASK_SIZE[0], TASK_SIZE[1], size=NUM_USERS)

        self.task_generating_users = np.ones(NUM_USERS, dtype=bool)

        self.uav_position = np.array([AREA_SIZE / 2, AREA_SIZE / 2], dtype=float)

        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)

        self.step_count = 0

        self.total_delay = 0
        self.total_energy = 0

        self.trajectory = [self.uav_position.copy()]

        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])

        self.observation_history = deque(maxlen=SEQUENCE_LENGTH)

        self.current_phase = 1
        self.phase1_users = None

    def update_task_generating_users(self, phase):
        self.current_phase = phase

        if phase == 1:
            indices = np.random.choice(NUM_USERS, 10, replace=False)
            self.task_generating_users = np.zeros(NUM_USERS, dtype=bool)
            self.task_generating_users[indices] = True
            self.phase1_users = self.task_generating_users.copy()
        elif phase == 2:
            self.task_generating_users = np.ones(NUM_USERS, dtype=bool)
        elif phase == 3:
            indices = np.random.choice(NUM_USERS, 8, replace=False)
            self.task_generating_users = np.zeros(NUM_USERS, dtype=bool)
            self.task_generating_users[indices] = True
        elif phase == 4:
            self.task_generating_users = self.phase1_users.copy()

        print(f"Phase {phase}: {sum(self.task_generating_users)} users are generating tasks")
        print(f"Task generating users: {np.where(self.task_generating_users)[0]}")

    def reset(self):
        self.uav_position = np.array([AREA_SIZE / 2, AREA_SIZE / 2], dtype=float)

        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)

        self.step_count = 0

        self.total_delay = 0
        self.total_energy = 0

        self.trajectory = [self.uav_position.copy()]

        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])

        self.observation_history = deque(maxlen=SEQUENCE_LENGTH)

        state = self._get_state()

        for _ in range(SEQUENCE_LENGTH):
            self.observation_history.append(state)

        return self._get_gru_state()

    def step(self, action):
        action = np.clip(action, -1, 1)
        movement = action * UAV_SPEED
        prev_position = self.uav_position.copy()
        self.uav_position += movement
        self.uav_position = np.clip(self.uav_position, 0, AREA_SIZE)

        self.trajectory.append(self.uav_position.copy())

        distance_moved = np.linalg.norm(self.uav_position - prev_position)
        energy_consumed = distance_moved * UAV_ENERGY_PER_METER

        new_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])

        newly_collected = 0
        collected_indices = []

        for i in range(NUM_USERS):
            if self.task_generating_users[i] and not self.collected_tasks[i]:
                if new_distances[i] <= MAX_DISTANCE_COLLECT:
                    self.collected_tasks[i] = True
                    newly_collected += 1
                    collected_indices.append(i)

                    delay = new_distances[i] * self.task_sizes[i] / 10
                    self.total_delay += delay

                    energy_consumed += UAV_HOVER_ENERGY

        self.total_energy += energy_consumed

        self.step_count += 1

        reward = self._calculate_reward(newly_collected, energy_consumed, collected_indices, new_distances,
                                        self.last_distances)

        self.last_distances = new_distances

        total_tasks_to_collect = sum(self.task_generating_users)
        collected_required_tasks = sum(self.collected_tasks & self.task_generating_users)
        done = (self.step_count >= MAX_STEPS) or (collected_required_tasks == total_tasks_to_collect)

        state = self._get_state()

        self.observation_history.append(state)

        return self._get_gru_state(), reward, done, {
            "collected": sum(self.collected_tasks),
            "collected_required": collected_required_tasks,
            "total_required": total_tasks_to_collect,
            "energy": self.total_energy,
            "delay": self.total_delay,
            "newly_collected": newly_collected,
            "total_users": NUM_USERS
        }

    def _get_state(self):
        state = np.zeros(2 + NUM_USERS * 3 + 1)

        state[0:2] = self.uav_position / AREA_SIZE

        for i in range(NUM_USERS):
            dist = np.linalg.norm(self.uav_position - self.user_positions[i])
            idx = 2 + i * 3
            state[idx] = dist / np.sqrt(2 * AREA_SIZE ** 2)
            state[idx + 1] = float(self.collected_tasks[i])
            state[idx + 2] = float(self.task_generating_users[i])

        state[-1] = self.step_count / MAX_STEPS

        return state

    def _get_gru_state(self):
        while len(self.observation_history) < SEQUENCE_LENGTH:
            self.observation_history.append(self._get_state())

        return np.array(list(self.observation_history))

    def _calculate_reward(self, newly_collected, energy_consumed, collected_indices, new_distances, old_distances):
        time_penalty = 0.1

        collected_required = sum(self.collected_tasks & self.task_generating_users)
        total_required = sum(self.task_generating_users)

        collection_reward = newly_collected * 20

        if newly_collected > 0 and total_required > 0:
            progress_bonus = (collected_required / total_required) * 15
            collection_reward += progress_bonus

        energy_penalty = energy_consumed * 0.8

        proximity_reward = 0
        uncollected_tasks_count = 0
        for i in range(NUM_USERS):
            if self.task_generating_users[i] and not self.collected_tasks[i]:
                uncollected_tasks_count += 1
                dist_diff = old_distances[i] - new_distances[i]
                proximity_factor = max(0, 1 - (new_distances[i] / (AREA_SIZE)) ** 2)
                proximity_reward += dist_diff * 0.5 * proximity_factor

        if uncollected_tasks_count == 0:
            proximity_reward = 0

        completion_reward = 0
        if total_required > 0 and collected_required == total_required:
            completion_reward = 200 - self.step_count * 0.2

        reward = collection_reward + proximity_reward + completion_reward - energy_penalty - time_penalty

        return reward * REWARD_SCALE

    def render(self, episode=0, clear_output=True):
        plt.figure(figsize=(10, 10))

        for i, pos in enumerate(self.user_positions):
            if self.task_generating_users[i]:
                if self.collected_tasks[i]:
                    color = 'green'
                else:
                    color = 'red'
            else:
                color = 'gray'

            plt.scatter(pos[0], pos[1], s=100, c=color)
            plt.annotate(f"{i + 1}", (pos[0], pos[1]), fontsize=12)

        trajectory = np.array(self.trajectory)
        plt.plot(trajectory[:, 0], trajectory[:, 1], 'b-', alpha=0.5)
        plt.scatter(self.uav_position[0], self.uav_position[1], s=200, c='blue', marker='*')

        circle = plt.Circle((self.uav_position[0], self.uav_position[1]),
                            MAX_DISTANCE_COLLECT, color='blue', fill=False, alpha=0.3)
        plt.gca().add_patch(circle)

        plt.xlim(0, AREA_SIZE)
        plt.ylim(0, AREA_SIZE)

        title = f"Episode {episode}, Step {self.step_count}\n"
        title += f"收集: {sum(self.collected_tasks & self.task_generating_users)}/{sum(self.task_generating_users)} 任务"
        plt.title(title)
        plt.grid(True)

        plt.savefig(f"results/step_{episode}_{self.step_count}.png")
        plt.close()


class GRUActor(nn.Module):
    def __init__(self, state_dim, action_dim, max_action):
        super(GRUActor, self).__init__()

        self.state_dim = state_dim
        self.seq_len = SEQUENCE_LENGTH
        self.hidden_size = HIDDEN_SIZE

        self.gru = nn.GRU(
            input_size=state_dim,
            hidden_size=self.hidden_size,
            num_layers=1,
            batch_first=True
        )

        self.layer1 = nn.Linear(self.hidden_size, 256)
        self.layer2 = nn.Linear(256, 128)
        self.layer3 = nn.Linear(128, action_dim)

        self.max_action = max_action

        self.ln1 = nn.LayerNorm(256)
        self.ln2 = nn.LayerNorm(128)

        self.hidden = None

        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0.0)

    def forward(self, state, reset_hidden=False):
        if reset_hidden or self.hidden is None:
            self.reset_hidden(state.size(0))

        gru_out, self.hidden = self.gru(state, self.hidden)

        x = gru_out[:, -1]

        x = self.ln1(torch.relu(self.layer1(x)))
        x = self.ln2(torch.relu(self.layer2(x)))
        action = torch.tanh(self.layer3(x))

        return self.max_action * action

    def reset_hidden(self, batch_size=1):
        self.hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)


class GRUCritic(nn.Module):
    def __init__(self, state_dim, action_dim):
        super(GRUCritic, self).__init__()

        self.state_dim = state_dim
        self.seq_len = SEQUENCE_LENGTH
        self.hidden_size = HIDDEN_SIZE

        self.q1_gru = nn.GRU(
            input_size=state_dim,
            hidden_size=self.hidden_size,
            num_layers=1,
            batch_first=True
        )

        self.q2_gru = nn.GRU(
            input_size=state_dim,
            hidden_size=self.hidden_size,
            num_layers=1,
            batch_first=True
        )

        self.q1_layer1 = nn.Linear(self.hidden_size + action_dim, 256)
        self.q1_layer2 = nn.Linear(256, 128)
        self.q1_output = nn.Linear(128, 1)

        self.q1_ln1 = nn.LayerNorm(256)
        self.q1_ln2 = nn.LayerNorm(128)

        self.q2_layer1 = nn.Linear(self.hidden_size + action_dim, 256)
        self.q2_layer2 = nn.Linear(256, 128)
        self.q2_output = nn.Linear(128, 1)

        self.q2_ln1 = nn.LayerNorm(256)
        self.q2_ln2 = nn.LayerNorm(128)

        self.q1_hidden = None
        self.q2_hidden = None

        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_uniform_(m.weight)
                nn.init.constant_(m.bias, 0.01)

    def forward(self, state, action, reset_hidden=False):
        if reset_hidden or self.q1_hidden is None or self.q2_hidden is None:
            self.reset_hidden(state.size(0))

        q1_gru_out, self.q1_hidden = self.q1_gru(state, self.q1_hidden)
        q2_gru_out, self.q2_hidden = self.q2_gru(state, self.q2_hidden)

        q1_state = q1_gru_out[:, -1]
        q2_state = q2_gru_out[:, -1]

        q1_x = torch.cat([q1_state, action], dim=1)
        q2_x = torch.cat([q2_state, action], dim=1)

        q1 = self.q1_ln1(torch.relu(self.q1_layer1(q1_x)))
        q1 = self.q1_ln2(torch.relu(self.q1_layer2(q1)))
        q1 = self.q1_output(q1)

        q2 = self.q2_ln1(torch.relu(self.q2_layer1(q2_x)))
        q2 = self.q2_ln2(torch.relu(self.q2_layer2(q2)))
        q2 = self.q2_output(q2)

        return q1, q2

    def Q1(self, state, action, reset_hidden=False):
        if reset_hidden or self.q1_hidden is None:
            self.reset_q1_hidden(state.size(0))

        q1_gru_out, self.q1_hidden = self.q1_gru(state, self.q1_hidden)
        q1_state = q1_gru_out[:, -1]
        q1_x = torch.cat([q1_state, action], dim=1)

        q1 = self.q1_ln1(torch.relu(self.q1_layer1(q1_x)))
        q1 = self.q1_ln2(torch.relu(self.q1_layer2(q1)))
        q1 = self.q1_output(q1)

        return q1

    def reset_hidden(self, batch_size=1):
        self.reset_q1_hidden(batch_size)
        self.reset_q2_hidden(batch_size)

    def reset_q1_hidden(self, batch_size=1):
        self.q1_hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)

    def reset_q2_hidden(self, batch_size=1):
        self.q2_hidden = torch.zeros(1, batch_size, self.hidden_size).to(device)


class ReplayBuffer:
    def __init__(self, max_size=BUFFER_SIZE):
        self.buffer = deque(maxlen=max_size)

    def add(self, state, action, reward, next_state, done):
        self.buffer.append((state, action, reward, next_state, done))

    def sample(self, batch_size):
        batch = random.sample(self.buffer, min(len(self.buffer), batch_size))
        state, action, reward, next_state, done = map(np.stack, zip(*batch))
        return state, action, reward, next_state, done

    def __len__(self):
        return len(self.buffer)


class EWC:
    def __init__(self, model, fisher_sample_size=FISHER_SAMPLE_SIZE):
        self.model = model
        self.fisher_sample_size = fisher_sample_size
        self.importance = {}
        self.old_params = {}
        self.fisher_diagonal = {}

    def _calculate_fisher_info(self, replay_buffer):
        fisher = {}
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                fisher[name] = torch.zeros_like(param).to(device)

        samples_count = min(self.fisher_sample_size, len(replay_buffer))
        if samples_count <= 0:
            return fisher

        for _ in range(samples_count):
            states, actions, _, _, _ = replay_buffer.sample(1)
            states = torch.FloatTensor(states).to(device)
            actions = torch.FloatTensor(actions).to(device)

            self.model.zero_grad()

            if isinstance(self.model, GRUActor):
                self.model.reset_hidden(1)
                outputs = self.model(states)
                loss = ((outputs - actions) ** 2).mean()
            else:
                self.model.reset_hidden(1)
                outputs, _ = self.model(states, actions)
                loss = outputs.mean()

            loss.backward()

            for name, param in self.model.named_parameters():
                if param.requires_grad and param.grad is not None:
                    fisher[name] += param.grad.pow(2) / samples_count

        return fisher

    def store_task_parameters(self, task_id, replay_buffer):
        print(f"Storing parameters for task {task_id} and computing Fisher information matrix")

        self.old_params = {}
        for name, param in self.model.named_parameters():
            if param.requires_grad:
                self.old_params[name] = param.data.clone()

        self.importance = self._calculate_fisher_info(replay_buffer)

        print(f"Stored {len(self.old_params)} parameters and computed Fisher matrices")

    def calculate_ewc_loss(self, lam=EWC_LAMBDA):
        loss = 0

        if not self.old_params or not self.importance:
            return loss

        for name, param in self.model.named_parameters():
            if name in self.old_params and name in self.importance and param.requires_grad:
                loss += torch.sum(self.importance[name] * (param - self.old_params[name]).pow(2))

        return lam * loss


class TD3:
    def __init__(self, state_dim, action_dim, max_action):
        self.actor = GRUActor(state_dim, action_dim, max_action).to(device)
        self.actor_target = GRUActor(state_dim, action_dim, max_action).to(device)
        self.actor_target.load_state_dict(self.actor.state_dict())
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=ACTOR_LR)

        self.critic = GRUCritic(state_dim, action_dim).to(device)
        self.critic_target = GRUCritic(state_dim, action_dim).to(device)
        self.critic_target.load_state_dict(self.critic.state_dict())
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=CRITIC_LR)

        self.max_action = max_action
        self.memory = ReplayBuffer()

        self.policy_noise = 0.2 * max_action
        self.noise_clip = 0.5 * max_action
        self.policy_freq = 2
        self.total_it = 0

        self.ewc_actor = EWC(self.actor)
        self.ewc_critic = EWC(self.critic)
        self.current_task = 1

        self.task_noise = {
            1: np.linspace(EXPLORATION_NOISE_START, EXPLORATION_NOISE_END, EPISODES_PER_TASK),
            2: np.linspace(EXPLORATION_NOISE_START * 0.8, EXPLORATION_NOISE_END, EPISODES_PER_TASK),
            3: np.linspace(EXPLORATION_NOISE_START * 0.7, EXPLORATION_NOISE_END, EPISODES_PER_TASK),
            4: np.linspace(EXPLORATION_NOISE_START * 0.6, EXPLORATION_NOISE_END, EPISODES_PER_TASK)
        }

        self.actor_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.actor_optimizer, mode='max', factor=0.5, patience=100, verbose=True
        )
        self.critic_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.critic_optimizer, mode='max', factor=0.5, patience=100, verbose=True
        )

    def select_action(self, state, noise_scale=EXPLORATION_NOISE_START):
        if len(state.shape) == 2:
            state = np.expand_dims(state, 0)

        state = torch.FloatTensor(state).to(device)

        self.actor.reset_hidden(1)

        action = self.actor(state).cpu().data.numpy().flatten()

        if noise_scale > 0:
            noise = np.random.normal(0, self.max_action * noise_scale, size=action.shape)
            action = action + noise

        return np.clip(action, -self.max_action, self.max_action)

    def switch_task(self, task_id):
        print(f"\nSwitching to task {task_id}")

        if self.current_task > 0 and len(self.memory) > 0:
            self.ewc_actor.store_task_parameters(self.current_task, self.memory)
            self.ewc_critic.store_task_parameters(self.current_task, self.memory)

        print(f"Clearing replay buffer for new task.")
        self.memory.buffer.clear()

        self.current_task = task_id

        self.actor.reset_hidden()
        self.critic.reset_hidden()

        print(f"Reset GRU states for new task {task_id}")

    def train(self):
        self.total_it += 1

        if len(self.memory) < BATCH_SIZE:
            return

        state, action, reward, next_state, done = self.memory.sample(BATCH_SIZE)

        state = torch.FloatTensor(state).to(device)
        action = torch.FloatTensor(action).to(device)
        reward = torch.FloatTensor(reward.reshape(-1, 1)).to(device)
        next_state = torch.FloatTensor(next_state).to(device)
        done = torch.FloatTensor(done.reshape(-1, 1)).to(device)

        self.critic.reset_hidden(BATCH_SIZE)
        self.actor.reset_hidden(BATCH_SIZE)

        with torch.no_grad():
            self.critic_target.reset_hidden(BATCH_SIZE)
            self.actor_target.reset_hidden(BATCH_SIZE)

            noise = torch.FloatTensor(action.shape).data.normal_(0, self.policy_noise).to(device)
            noise = noise.clamp(-self.noise_clip, self.noise_clip)

            next_action = (self.actor_target(next_state) + noise).clamp(-self.max_action, self.max_action)

            target_q1, target_q2 = self.critic_target(next_state, next_action)
            target_q = torch.min(target_q1, target_q2)
            target_q = reward + (1 - done) * GAMMA * target_q

        current_q1, current_q2 = self.critic(state, action)

        critic_loss = nn.MSELoss()(current_q1, target_q) + nn.MSELoss()(current_q2, target_q)

        if self.current_task > 1:
            critic_ewc_loss = self.ewc_critic.calculate_ewc_loss()
            critic_loss += critic_ewc_loss

        self.critic_optimizer.zero_grad()
        critic_loss.backward(retain_graph=True)
        torch.nn.utils.clip_grad_norm_(self.critic.parameters(), 1.0)
        self.critic_optimizer.step()

        actor_loss = 0
        if self.total_it % self.policy_freq == 0:
            self.actor.reset_hidden(BATCH_SIZE)
            self.critic.reset_q1_hidden(BATCH_SIZE)

            actor_loss = -self.critic.Q1(state, self.actor(state)).mean()

            if self.current_task > 1:
                actor_ewc_loss = self.ewc_actor.calculate_ewc_loss()
                actor_loss += actor_ewc_loss

            self.actor_optimizer.zero_grad()
            actor_loss.backward()
            torch.nn.utils.clip_grad_norm_(self.actor.parameters(), 1.0)
            self.actor_optimizer.step()

            for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)

            for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
                target_param.data.copy_(TAU * param.data + (1 - TAU) * target_param.data)

        return {
            "critic_loss": critic_loss.item(),
            "actor_loss": actor_loss if isinstance(actor_loss, (int, float)) else actor_loss.item()
        }

    def update_lr_schedulers(self, reward):
        self.actor_scheduler.step(reward)
        self.critic_scheduler.step(reward)


def train():
    os.makedirs("results", exist_ok=True)

    env = Environment()

    state_dim = 2 + NUM_USERS * 3 + 1
    action_dim = 2
    max_action = 1

    agent = TD3(state_dim, action_dim, max_action)

    total_episodes = 800
    episodes_per_task = 200
    eval_freq = 50

    rewards_history = []
    smoothed_rewards = []
    collection_history = []
    energy_history = []
    best_reward = -float('inf')
    best_collection = 0
    losses = {"critic": [], "actor": []}

    noise_schedule = np.linspace(EXPLORATION_NOISE_START, EXPLORATION_NOISE_END, total_episodes)

    start_time = time.time()

    for phase in range(1, 5):
        env.update_task_generating_users(phase)

        agent.switch_task(phase)

        phase_noise_base = EXPLORATION_NOISE_START * (0.9 ** (phase - 1))
        phase_noise = np.linspace(phase_noise_base, EXPLORATION_NOISE_END, episodes_per_task)

        for episode in range(1, episodes_per_task + 1):
            global_episode = (phase - 1) * episodes_per_task + episode

            state = env.reset()
            agent.actor.reset_hidden()
            agent.critic.reset_hidden()

            episode_reward = 0
            last_collection = 0
            episode_losses = {"critic": [], "actor": []}

            current_noise = phase_noise[episode - 1]

            for step in range(1, MAX_STEPS + 1):
                action = agent.select_action(state, noise_scale=current_noise)

                next_state, reward, done, info = env.step(action)

                agent.memory.add(state, action, reward, next_state, done)

                loss_info = agent.train()
                if loss_info:
                    episode_losses["critic"].append(loss_info["critic_loss"])
                    episode_losses["actor"].append(loss_info["actor_loss"])

                state = next_state
                episode_reward += reward
                last_collection = info["collected_required"]

                if done:
                    if global_episode % eval_freq == 0:
                        print(f"--- Episode {global_episode} finished. Generating final trajectory plot. ---")
                        env.render(global_episode)

                    break

            rewards_history.append(episode_reward)
            collection_history.append(last_collection)
            energy_history.append(info["energy"])

            if len(rewards_history) >= 10:
                smoothed_rewards.append(np.mean(rewards_history[-10:]))
            else:
                smoothed_rewards.append(episode_reward)

            if episode_losses["critic"]:
                losses["critic"].append(np.mean(episode_losses["critic"]))
            if episode_losses["actor"]:
                losses["actor"].append(np.mean(episode_losses["actor"]))

            agent.update_lr_schedulers(episode_reward)

            current_required = info["total_required"]
            collection_ratio = last_collection / current_required if current_required > 0 else 0

            if collection_ratio > best_collection or (
                    collection_ratio == best_collection and episode_reward > best_reward):
                best_reward = episode_reward
                best_collection = collection_ratio

                torch.save(agent.actor.state_dict(), f"results/best_actor_phase_{phase}.pth")

            elapsed_time = time.time() - start_time
            print(f"Phase: {phase} | Episode: {episode}/{episodes_per_task} | "
                  f"Global Episode: {global_episode}/{total_episodes} | "
                  f"Tasks: {last_collection}/{info['total_required']} | "
                  f"Reward: {episode_reward:.2f} | "
                  f"Energy: {info['energy']:.2f} | "
                  f"Steps: {env.step_count} | "
                  f"Noise: {current_noise:.3f} | "
                  f"Time: {elapsed_time:.2f}s")

            if global_episode % eval_freq == 0 or global_episode == total_episodes:
                plt.figure(figsize=(20, 5))

                plt.subplot(1, 4, 1)
                plt.plot(rewards_history, alpha=0.3, color='blue', label='Raw')
                plt.plot(smoothed_rewards, color='red', label='Smoothed')
                plt.axvline(x=episodes_per_task, color='green', linestyle='--', label='Phase 1->2')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--', label='Phase 2->3')
                plt.axvline(x=3 * episodes_per_task, color='orange', linestyle='--', label='Phase 3->4')
                plt.title("Reward")
                plt.xlabel("Episode")
                plt.ylabel("Reward")
                plt.legend()
                plt.grid(True)

                plt.subplot(1, 4, 2)
                plt.plot(collection_history)
                plt.axvline(x=episodes_per_task, color='green', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.axvline(x=3 * episodes_per_task, color='orange', linestyle='--')
                plt.title("Collected Tasks")
                plt.xlabel("Episode")
                plt.ylabel("Number of Tasks")
                plt.grid(True)

                plt.subplot(1, 4, 3)
                plt.plot(energy_history)
                plt.axvline(x=episodes_per_task, color='green', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.axvline(x=3 * episodes_per_task, color='orange', linestyle='--')
                plt.title("Total Energy")
                plt.xlabel("Episode")
                plt.ylabel("Energy")
                plt.grid(True)

                plt.subplot(1, 4, 4)
                if losses["critic"]:
                    plt.plot(losses["critic"], label='Critic Loss')
                if losses["actor"]:
                    plt.plot(losses["actor"], label='Actor Loss')
                plt.axvline(x=episodes_per_task, color='green', linestyle='--')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--')
                plt.axvline(x=3 * episodes_per_task, color='orange', linestyle='--')
                plt.title("Training Loss")
                plt.xlabel("Episode")
                plt.ylabel("Loss")
                plt.legend()
                plt.grid(True)

                plt.tight_layout()
                plt.savefig(f"results/training_curves_episode_{global_episode}.png")
                plt.close()

                torch.save({
                    'actor_state_dict': agent.actor.state_dict(),
                    'critic_state_dict': agent.critic.state_dict(),
                    'actor_optimizer': agent.actor_optimizer.state_dict(),
                    'critic_optimizer': agent.critic_optimizer.state_dict(),
                    'episode': global_episode,
                    'phase': phase,
                    'rewards_history': rewards_history,
                    'collection_history': collection_history,
                    'best_reward': best_reward,
                    'best_collection': best_collection
                }, f"results/checkpoint_episode_{global_episode}.pt")

        torch.save(agent.actor.state_dict(), f"results/actor_phase_{phase}.pth")
        torch.save(agent.critic.state_dict(), f"results/critic_phase_{phase}.pth")

    print(f"Training completed! Best result: {best_collection * 100:.1f}% tasks, Reward: {best_reward:.2f}")
    return agent, env


if __name__ == "__main__":
    agent, env = train()
