import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Normal
import numpy as np
import matplotlib.pyplot as plt
import random
from collections import deque
import os
import time

SEED = 45
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

EPISODES_PER_TASK = 200

# 环境参数
AREA_SIZE = 100
NUM_USERS = 15
MAX_STEPS = 200
MAX_DISTANCE_COLLECT = 15

# UAV参数
UAV_SPEED = 20.0
UAV_ENERGY_PER_METER = 0.1
UAV_HOVER_ENERGY = 0.5

# 任务参数
TASK_SIZE = [10, 50]

# PPO参数
ACTOR_LR = 3e-4
CRITIC_LR = 3e-4
GAMMA = 0.99
GAE_LAMBDA = 0.95
CLIP_RATIO = 0.2
VALUE_CLIP_RATIO = 0.2
ENTROPY_COEFF = 0.01
VALUE_COEFF = 0.5
MAX_GRAD_NORM = 0.5
PPO_EPOCHS = 4
MINI_BATCH_SIZE = 64
REWARD_SCALE = 0.1

# TRAC参数
TASK_EMBEDDING_DIM = 16
EWC_LAMBDA = 1000  # 弹性权重巩固强度
BUFFER_SIZE = 2048  # PPO收集步数

# GRU参数
SEQUENCE_LENGTH = 10
HIDDEN_SIZE = 128


class Environment:
    def __init__(self):
        self.user_positions = np.random.uniform(0, AREA_SIZE, size=(NUM_USERS, 2))
        self.task_sizes = np.random.uniform(TASK_SIZE[0], TASK_SIZE[1], size=NUM_USERS)
        self.task_generating_users = np.ones(NUM_USERS, dtype=bool)
        self.uav_position = np.array([AREA_SIZE / 2, AREA_SIZE / 2], dtype=float)
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)
        self.step_count = 0
        self.total_delay = 0
        self.total_energy = 0
        self.trajectory = [self.uav_position.copy()]
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])
        self.observation_history = deque(maxlen=SEQUENCE_LENGTH)
        self.current_phase = 1

    def update_task_generating_users(self, phase):
        self.current_phase = phase
        if phase == 1:
            self.task_generating_users = np.ones(NUM_USERS, dtype=bool)
        elif phase == 2:
            indices = np.random.choice(NUM_USERS, 11, replace=False)
            self.task_generating_users = np.zeros(NUM_USERS, dtype=bool)
            self.task_generating_users[indices] = True
            self.phase2_generating_users = set(indices)
            self.phase2_non_generating_users = set(range(NUM_USERS)) - self.phase2_generating_users
        else:
            phase3_users = set(self.phase2_non_generating_users)
            additional_users = np.random.choice(list(self.phase2_generating_users), 3, replace=False)
            phase3_users.update(additional_users)
            self.task_generating_users = np.zeros(NUM_USERS, dtype=bool)
            for user_id in phase3_users:
                self.task_generating_users[user_id] = True

    def reset(self):
        self.uav_position = np.array([AREA_SIZE / 2, AREA_SIZE / 2], dtype=float)
        self.collected_tasks = np.zeros(NUM_USERS, dtype=bool)
        self.step_count = 0
        self.total_delay = 0
        self.total_energy = 0
        self.trajectory = [self.uav_position.copy()]
        self.last_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])
        self.observation_history = deque(maxlen=SEQUENCE_LENGTH)
        state = self._get_state()
        for _ in range(SEQUENCE_LENGTH):
            self.observation_history.append(state)
        return self._get_gru_state()

    def step(self, action):
        action = np.clip(action, -1, 1)
        movement = action * UAV_SPEED
        prev_position = self.uav_position.copy()
        self.uav_position += movement
        self.uav_position = np.clip(self.uav_position, 0, AREA_SIZE)
        self.trajectory.append(self.uav_position.copy())

        distance_moved = np.linalg.norm(self.uav_position - prev_position)
        energy_consumed = distance_moved * UAV_ENERGY_PER_METER
        new_distances = np.array([np.linalg.norm(self.uav_position - pos) for pos in self.user_positions])

        newly_collected = 0
        collected_indices = []
        for i in range(NUM_USERS):
            if self.task_generating_users[i] and not self.collected_tasks[i]:
                if new_distances[i] <= MAX_DISTANCE_COLLECT:
                    self.collected_tasks[i] = True
                    newly_collected += 1
                    collected_indices.append(i)
                    delay = new_distances[i] * self.task_sizes[i] / 10
                    self.total_delay += delay
                    energy_consumed += UAV_HOVER_ENERGY

        self.total_energy += energy_consumed
        self.step_count += 1
        reward = self._calculate_reward(newly_collected, energy_consumed, collected_indices, new_distances,
                                        self.last_distances)
        self.last_distances = new_distances

        total_tasks_to_collect = sum(self.task_generating_users)
        collected_required_tasks = sum(self.collected_tasks & self.task_generating_users)
        done = (self.step_count >= MAX_STEPS) or (collected_required_tasks == total_tasks_to_collect)

        state = self._get_state()
        self.observation_history.append(state)

        return self._get_gru_state(), reward, done, {
            "collected": sum(self.collected_tasks),
            "collected_required": collected_required_tasks,
            "total_required": total_tasks_to_collect,
            "energy": self.total_energy,
            "delay": self.total_delay,
            "newly_collected": newly_collected,
            "total_users": NUM_USERS
        }

    def _get_state(self):
        state = np.zeros(2 + NUM_USERS * 3 + 1)
        state[0:2] = self.uav_position / AREA_SIZE
        for i in range(NUM_USERS):
            dist = np.linalg.norm(self.uav_position - self.user_positions[i])
            idx = 2 + i * 3
            state[idx] = dist / np.sqrt(2 * AREA_SIZE ** 2)
            state[idx + 1] = float(self.collected_tasks[i])
            state[idx + 2] = float(self.task_generating_users[i])
        state[-1] = self.step_count / MAX_STEPS
        return state

    def _get_gru_state(self):
        while len(self.observation_history) < SEQUENCE_LENGTH:
            self.observation_history.append(self._get_state())
        return np.array(list(self.observation_history))

    def _calculate_reward(self, newly_collected, energy_consumed, collected_indices, new_distances, old_distances):
        time_penalty = 0.1
        collected_required = sum(self.collected_tasks & self.task_generating_users)
        total_required = sum(self.task_generating_users)

        collection_reward = newly_collected * 20
        if newly_collected > 0 and total_required > 0:
            progress_bonus = (collected_required / total_required) * 15
            collection_reward += progress_bonus

        energy_penalty = energy_consumed * 0.8
        proximity_reward = 0
        uncollected_tasks_count = 0
        for i in range(NUM_USERS):
            if self.task_generating_users[i] and not self.collected_tasks[i]:
                uncollected_tasks_count += 1
                dist_diff = old_distances[i] - new_distances[i]
                proximity_factor = max(0, 1 - (new_distances[i] / AREA_SIZE) ** 2)
                proximity_reward += dist_diff * 0.5 * proximity_factor

        if uncollected_tasks_count == 0:
            proximity_reward = 0

        completion_reward = 0
        if total_required > 0 and collected_required == total_required:
            completion_reward = 200 - self.step_count * 0.2

        reward = collection_reward + proximity_reward + completion_reward - energy_penalty - time_penalty
        return reward * REWARD_SCALE


# PPO Actor网络（任务感知）
class TRACPPOActor(nn.Module):
    def __init__(self, state_dim, action_dim, n_tasks=3):
        super(TRACPPOActor, self).__init__()

        self.state_dim = state_dim
        self.action_dim = action_dim
        self.n_tasks = n_tasks

        # GRU特征提取
        self.gru = nn.GRU(state_dim, HIDDEN_SIZE, batch_first=True)

        # 任务嵌入
        self.task_embedding = nn.Embedding(n_tasks, TASK_EMBEDDING_DIM)

        # 策略网络
        policy_input_dim = HIDDEN_SIZE + TASK_EMBEDDING_DIM
        self.policy_net = nn.Sequential(
            nn.Linear(policy_input_dim, 256),
            nn.ReLU(),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Linear(128, action_dim)
        )

        # 动作标准差（可学习）
        self.log_std = nn.Parameter(torch.zeros(action_dim))

        # 用于EWC的参数保存
        self.saved_params = {}
        self.fisher_info = {}

    def forward(self, state, task_id=0):
        batch_size = state.size(0)

        # GRU特征提取
        hidden = torch.zeros(1, batch_size, HIDDEN_SIZE).to(state.device)
        gru_out, _ = self.gru(state, hidden)
        features = gru_out[:, -1]  # 取最后一个时间步

        # 任务嵌入
        if isinstance(task_id, int):
            task_id = torch.tensor([task_id] * batch_size).to(state.device)
        task_emb = self.task_embedding(task_id).squeeze(1) if len(task_id.shape) > 1 else self.task_embedding(task_id)

        # 策略计算
        combined = torch.cat([features, task_emb], dim=1)
        action_mean = self.policy_net(combined)
        action_std = torch.exp(self.log_std)

        return action_mean, action_std

    def get_action_and_log_prob(self, state, task_id=0):
        action_mean, action_std = self.forward(state, task_id)
        dist = Normal(action_mean, action_std)
        action = dist.sample()
        log_prob = dist.log_prob(action).sum(dim=-1)
        return action, log_prob

    def evaluate_actions(self, state, action, task_id=0):
        action_mean, action_std = self.forward(state, task_id)
        dist = Normal(action_mean, action_std)
        log_prob = dist.log_prob(action).sum(dim=-1)
        entropy = dist.entropy().sum(dim=-1)
        return log_prob, entropy

    def save_params_for_ewc(self):
        """保存当前参数用于EWC"""
        self.saved_params = {name: param.clone().detach()
                             for name, param in self.named_parameters()}

    def compute_ewc_loss(self):
        """计算EWC正则化损失"""
        if not self.saved_params or not self.fisher_info:
            return 0

        ewc_loss = 0
        for name, param in self.named_parameters():
            if name in self.saved_params and name in self.fisher_info:
                ewc_loss += (self.fisher_info[name] *
                             (param - self.saved_params[name]).pow(2)).sum()

        return EWC_LAMBDA * ewc_loss


# PPO Critic网络（任务感知）
class TRACPPOCritic(nn.Module):
    def __init__(self, state_dim, n_tasks=3):
        super(TRACPPOCritic, self).__init__()

        self.state_dim = state_dim
        self.n_tasks = n_tasks

        # GRU特征提取
        self.gru = nn.GRU(state_dim, HIDDEN_SIZE, batch_first=True)

        # 任务嵌入
        self.task_embedding = nn.Embedding(n_tasks, TASK_EMBEDDING_DIM)

        # 价值网络
        value_input_dim = HIDDEN_SIZE + TASK_EMBEDDING_DIM
        self.value_net = nn.Sequential(
            nn.Linear(value_input_dim, 256),
            nn.ReLU(),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Linear(128, 1)
        )

        # 用于EWC的参数保存
        self.saved_params = {}
        self.fisher_info = {}

    def forward(self, state, task_id=0):
        batch_size = state.size(0)

        # GRU特征提取
        hidden = torch.zeros(1, batch_size, HIDDEN_SIZE).to(state.device)
        gru_out, _ = self.gru(state, hidden)
        features = gru_out[:, -1]

        # 任务嵌入
        if isinstance(task_id, int):
            task_id = torch.tensor([task_id] * batch_size).to(state.device)
        task_emb = self.task_embedding(task_id).squeeze(1) if len(task_id.shape) > 1 else self.task_embedding(task_id)

        # 价值计算
        combined = torch.cat([features, task_emb], dim=1)
        value = self.value_net(combined)

        return value.squeeze(-1)

    def save_params_for_ewc(self):
        """保存当前参数用于EWC"""
        self.saved_params = {name: param.clone().detach()
                             for name, param in self.named_parameters()}

    def compute_ewc_loss(self):
        """计算EWC正则化损失"""
        if not self.saved_params or not self.fisher_info:
            return 0

        ewc_loss = 0
        for name, param in self.named_parameters():
            if name in self.saved_params and name in self.fisher_info:
                ewc_loss += (self.fisher_info[name] *
                             (param - self.saved_params[name]).pow(2)).sum()

        return EWC_LAMBDA * ewc_loss


# PPO经验收集器
class PPOBuffer:
    def __init__(self, buffer_size, state_dim, action_dim):
        self.buffer_size = buffer_size
        self.ptr = 0
        self.size = 0

        self.states = np.zeros((buffer_size, SEQUENCE_LENGTH, state_dim), dtype=np.float32)
        self.actions = np.zeros((buffer_size, action_dim), dtype=np.float32)
        self.rewards = np.zeros(buffer_size, dtype=np.float32)
        self.values = np.zeros(buffer_size, dtype=np.float32)
        self.log_probs = np.zeros(buffer_size, dtype=np.float32)
        self.dones = np.zeros(buffer_size, dtype=np.float32)
        self.task_ids = np.zeros(buffer_size, dtype=np.int32)

        self.advantages = np.zeros(buffer_size, dtype=np.float32)
        self.returns = np.zeros(buffer_size, dtype=np.float32)

    def add(self, state, action, reward, value, log_prob, done, task_id):
        self.states[self.ptr] = state
        self.actions[self.ptr] = action
        self.rewards[self.ptr] = reward
        self.values[self.ptr] = value
        self.log_probs[self.ptr] = log_prob
        self.dones[self.ptr] = done
        self.task_ids[self.ptr] = task_id

        self.ptr = (self.ptr + 1) % self.buffer_size
        self.size = min(self.size + 1, self.buffer_size)

    def compute_gae(self, last_value):
        """计算GAE优势估计"""
        self.advantages.fill(0)
        self.returns.fill(0)

        gae = 0
        for t in reversed(range(self.size)):
            if t == self.size - 1:
                next_value = last_value
                next_done = 0
            else:
                next_value = self.values[t + 1]
                next_done = self.dones[t + 1]

            delta = self.rewards[t] + GAMMA * next_value * (1 - next_done) - self.values[t]
            gae = delta + GAMMA * GAE_LAMBDA * (1 - next_done) * gae
            self.advantages[t] = gae
            self.returns[t] = gae + self.values[t]

    def get_batch(self):
        """获取所有数据"""
        # 标准化优势
        self.advantages = (self.advantages - self.advantages.mean()) / (self.advantages.std() + 1e-8)

        return (torch.FloatTensor(self.states[:self.size]),
                torch.FloatTensor(self.actions[:self.size]),
                torch.FloatTensor(self.advantages[:self.size]),
                torch.FloatTensor(self.returns[:self.size]),
                torch.FloatTensor(self.log_probs[:self.size]),
                torch.LongTensor(self.task_ids[:self.size]))

    def clear(self):
        self.ptr = 0
        self.size = 0


# TRAC-PPO主算法
class TRAC_PPO:
    def __init__(self, state_dim, action_dim, n_tasks=3):
        self.n_tasks = n_tasks
        self.current_task = 0
        self.state_dim = state_dim
        self.action_dim = action_dim

        # 网络初始化
        self.actor = TRACPPOActor(state_dim, action_dim, n_tasks).to(device)
        self.critic = TRACPPOCritic(state_dim, n_tasks).to(device)

        # 优化器
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=ACTOR_LR)
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=CRITIC_LR)

        # 经验缓冲区
        self.buffer = PPOBuffer(BUFFER_SIZE, state_dim, action_dim)

        # 性能跟踪
        self.task_performance = {i: [] for i in range(n_tasks)}

    def select_action(self, state, deterministic=False):
        if len(state.shape) == 2:
            state = np.expand_dims(state, 0)

        state = torch.FloatTensor(state).to(device)

        with torch.no_grad():
            if deterministic:
                action_mean, _ = self.actor(state, self.current_task)
                action = action_mean
                log_prob = torch.zeros(1)
                value = self.critic(state, self.current_task)
            else:
                action, log_prob = self.actor.get_action_and_log_prob(state, self.current_task)
                value = self.critic(state, self.current_task)

        return action.cpu().numpy().flatten(), log_prob.cpu().numpy().flatten(), value.cpu().numpy().flatten()

    def switch_task(self, task_id):
        """切换到新任务"""
        print(f"\nSwitching to task {task_id}")

        # 计算Fisher信息矩阵（用于EWC）
        if task_id > 0 and self.buffer.size > 0:
            self._compute_fisher_information()
            self.actor.save_params_for_ewc()
            self.critic.save_params_for_ewc()

        self.current_task = task_id

        # 清空缓冲区开始新任务
        self.buffer.clear()

    def _compute_fisher_information(self):
        """计算Fisher信息矩阵"""
        if self.buffer.size < MINI_BATCH_SIZE:
            return

        print("Computing Fisher Information Matrix...")

        # 重置Fisher信息
        self.actor.fisher_info = {}
        self.critic.fisher_info = {}

        # 从当前缓冲区采样数据
        states, actions, _, returns, old_log_probs, task_ids = self.buffer.get_batch()
        states = states.to(device)
        actions = actions.to(device)
        returns = returns.to(device)

        # 计算Actor的Fisher信息
        self.actor.zero_grad()
        log_probs, _ = self.actor.evaluate_actions(states, actions, self.current_task)
        actor_loss = -log_probs.mean()
        actor_loss.backward()

        for name, param in self.actor.named_parameters():
            if param.grad is not None:
                self.actor.fisher_info[name] = param.grad.data.clone().pow(2)

        # 计算Critic的Fisher信息
        self.critic.zero_grad()
        values = self.critic(states, self.current_task)
        critic_loss = F.mse_loss(values, returns)
        critic_loss.backward()

        for name, param in self.critic.named_parameters():
            if param.grad is not None:
                self.critic.fisher_info[name] = param.grad.data.clone().pow(2)

        # 清除梯度
        self.actor.zero_grad()
        self.critic.zero_grad()

        print("Fisher Information computation completed")

    def update(self):
        """PPO更新"""
        if self.buffer.size < BUFFER_SIZE:
            return {"policy_loss": 0, "value_loss": 0, "entropy": 0}

        # 计算最后一个状态的价值（用于GAE）
        last_state = torch.FloatTensor(self.buffer.states[self.buffer.size - 1:self.buffer.size]).to(device)
        with torch.no_grad():
            last_value = self.critic(last_state, self.current_task).item()

        # 计算GAE
        self.buffer.compute_gae(last_value)

        # 获取批次数据
        states, actions, advantages, returns, old_log_probs, task_ids = self.buffer.get_batch()
        states = states.to(device)
        actions = actions.to(device)
        advantages = advantages.to(device)
        returns = returns.to(device)
        old_log_probs = old_log_probs.to(device)

        # PPO更新
        total_policy_loss = 0
        total_value_loss = 0
        total_entropy = 0

        for epoch in range(PPO_EPOCHS):
            # 随机打乱数据
            indices = torch.randperm(states.size(0))

            for start in range(0, states.size(0), MINI_BATCH_SIZE):
                end = start + MINI_BATCH_SIZE
                batch_indices = indices[start:end]

                batch_states = states[batch_indices]
                batch_actions = actions[batch_indices]
                batch_advantages = advantages[batch_indices]
                batch_returns = returns[batch_indices]
                batch_old_log_probs = old_log_probs[batch_indices]

                # 计算当前策略的log_prob和价值
                new_log_probs, entropy = self.actor.evaluate_actions(batch_states, batch_actions, self.current_task)
                values = self.critic(batch_states, self.current_task)

                # PPO策略损失
                ratio = torch.exp(new_log_probs - batch_old_log_probs)
                surr1 = ratio * batch_advantages
                surr2 = torch.clamp(ratio, 1 - CLIP_RATIO, 1 + CLIP_RATIO) * batch_advantages
                policy_loss = -torch.min(surr1, surr2).mean()

                # 价值损失
                value_clipped = values + torch.clamp(values - values, -VALUE_CLIP_RATIO, VALUE_CLIP_RATIO)
                value_loss1 = F.mse_loss(values, batch_returns)
                value_loss2 = F.mse_loss(value_clipped, batch_returns)
                value_loss = torch.max(value_loss1, value_loss2)

                # 熵损失
                entropy_loss = entropy.mean()

                # 总损失
                total_loss = policy_loss + VALUE_COEFF * value_loss - ENTROPY_COEFF * entropy_loss

                # 添加EWC正则化
                if self.current_task > 0:
                    total_loss += self.actor.compute_ewc_loss() + self.critic.compute_ewc_loss()

                # 更新网络
                self.actor_optimizer.zero_grad()
                self.critic_optimizer.zero_grad()
                total_loss.backward()
                torch.nn.utils.clip_grad_norm_(self.actor.parameters(), MAX_GRAD_NORM)
                torch.nn.utils.clip_grad_norm_(self.critic.parameters(), MAX_GRAD_NORM)
                self.actor_optimizer.step()
                self.critic_optimizer.step()

                total_policy_loss += policy_loss.item()
                total_value_loss += value_loss.item()
                total_entropy += entropy_loss.item()

        # 清空缓冲区
        self.buffer.clear()

        num_updates = PPO_EPOCHS * (states.size(0) // MINI_BATCH_SIZE)
        return {
            "policy_loss": total_policy_loss / num_updates,
            "value_loss": total_value_loss / num_updates,
            "entropy": total_entropy / num_updates
        }

    def record_performance(self, episode_reward):
        """记录任务性能"""
        self.task_performance[self.current_task].append(episode_reward)


# 基线PPO算法
class BasicPPO:
    def __init__(self, state_dim, action_dim):
        self.actor = TRACPPOActor(state_dim, action_dim, n_tasks=1).to(device)
        self.critic = TRACPPOCritic(state_dim, n_tasks=1).to(device)

        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=ACTOR_LR)
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=CRITIC_LR)

        self.buffer = PPOBuffer(BUFFER_SIZE, state_dim, action_dim)

    def select_action(self, state, deterministic=False):
        if len(state.shape) == 2:
            state = np.expand_dims(state, 0)

        state = torch.FloatTensor(state).to(device)

        with torch.no_grad():
            if deterministic:
                action_mean, _ = self.actor(state, 0)
                action = action_mean
                log_prob = torch.zeros(1)
                value = self.critic(state, 0)
            else:
                action, log_prob = self.actor.get_action_and_log_prob(state, 0)
                value = self.critic(state, 0)

        return action.cpu().numpy().flatten(), log_prob.cpu().numpy().flatten(), value.cpu().numpy().flatten()

    def update(self):
        if self.buffer.size < BUFFER_SIZE:
            return {"policy_loss": 0, "value_loss": 0, "entropy": 0}

        # 计算最后一个状态的价值
        last_state = torch.FloatTensor(self.buffer.states[self.buffer.size - 1:self.buffer.size]).to(device)
        with torch.no_grad():
            last_value = self.critic(last_state, 0).item()

        self.buffer.compute_gae(last_value)
        states, actions, advantages, returns, old_log_probs, task_ids = self.buffer.get_batch()
        states = states.to(device)
        actions = actions.to(device)
        advantages = advantages.to(device)
        returns = returns.to(device)
        old_log_probs = old_log_probs.to(device)

        total_policy_loss = 0
        total_value_loss = 0
        total_entropy = 0

        for epoch in range(PPO_EPOCHS):
            indices = torch.randperm(states.size(0))

            for start in range(0, states.size(0), MINI_BATCH_SIZE):
                end = start + MINI_BATCH_SIZE
                batch_indices = indices[start:end]

                batch_states = states[batch_indices]
                batch_actions = actions[batch_indices]
                batch_advantages = advantages[batch_indices]
                batch_returns = returns[batch_indices]
                batch_old_log_probs = old_log_probs[batch_indices]

                new_log_probs, entropy = self.actor.evaluate_actions(batch_states, batch_actions, 0)
                values = self.critic(batch_states, 0)

                ratio = torch.exp(new_log_probs - batch_old_log_probs)
                surr1 = ratio * batch_advantages
                surr2 = torch.clamp(ratio, 1 - CLIP_RATIO, 1 + CLIP_RATIO) * batch_advantages
                policy_loss = -torch.min(surr1, surr2).mean()

                value_loss = F.mse_loss(values, batch_returns)
                entropy_loss = entropy.mean()

                total_loss = policy_loss + VALUE_COEFF * value_loss - ENTROPY_COEFF * entropy_loss

                self.actor_optimizer.zero_grad()
                self.critic_optimizer.zero_grad()
                total_loss.backward()
                torch.nn.utils.clip_grad_norm_(self.actor.parameters(), MAX_GRAD_NORM)
                torch.nn.utils.clip_grad_norm_(self.critic.parameters(), MAX_GRAD_NORM)
                self.actor_optimizer.step()
                self.critic_optimizer.step()

                total_policy_loss += policy_loss.item()
                total_value_loss += value_loss.item()
                total_entropy += entropy_loss.item()

        self.buffer.clear()

        num_updates = PPO_EPOCHS * (states.size(0) // MINI_BATCH_SIZE)
        return {
            "policy_loss": total_policy_loss / num_updates,
            "value_loss": total_value_loss / num_updates,
            "entropy": total_entropy / num_updates
        }


def train():
    """训练TRAC-PPO智能体"""
    os.makedirs("results", exist_ok=True)

    env = Environment()
    state_dim = 2 + NUM_USERS * 3 + 1
    action_dim = 2

    # 初始化智能体
    agent_trac = TRAC_PPO(state_dim, action_dim, n_tasks=3)
    agent_basic = BasicPPO(state_dim, action_dim)

    total_episodes = 600
    episodes_per_task = 200
    eval_freq = 50

    rewards_history_trac = []
    rewards_history_basic = []
    smoothed_rewards_trac = []
    smoothed_rewards_basic = []
    best_reward_trac = -float('inf')
    best_reward_basic = -float('inf')

    start_time = time.time()

    for phase in range(1, 4):
        env.update_task_generating_users(phase)
        agent_trac.switch_task(phase - 1)

        # 基线智能体重新开始
        if phase > 1:
            agent_basic.buffer.clear()

        for episode in range(1, episodes_per_task + 1):
            global_episode = (phase - 1) * episodes_per_task + episode

            # 训练TRAC智能体
            state = env.reset()
            episode_reward_trac = 0

            for step in range(1, MAX_STEPS + 1):
                action, log_prob, value = agent_trac.select_action(state)
                next_state, reward, done, info = env.step(action)

                agent_trac.buffer.add(state, action, reward, value[0], log_prob[0], done, phase - 1)

                state = next_state
                episode_reward_trac += reward
                if done:
                    break

            # PPO更新
            if agent_trac.buffer.size >= BUFFER_SIZE:
                train_info = agent_trac.update()

            agent_trac.record_performance(episode_reward_trac)

            # 训练基线智能体
            state = env.reset()
            episode_reward_basic = 0
            for step in range(1, MAX_STEPS + 1):
                action, log_prob, value = agent_basic.select_action(state)
                next_state, reward, done, info = env.step(action)
                agent_basic.buffer.add(state, action, reward, value[0], log_prob[0], done, 0)
                state = next_state
                episode_reward_basic += reward
                if done:
                    break

            if agent_basic.buffer.size >= BUFFER_SIZE:
                agent_basic.update()

            rewards_history_trac.append(episode_reward_trac)
            rewards_history_basic.append(episode_reward_basic)

            # 平滑奖励
            window_size = 20
            if len(rewards_history_trac) >= window_size:
                smoothed_rewards_trac.append(np.mean(rewards_history_trac[-window_size:]))
                smoothed_rewards_basic.append(np.mean(rewards_history_basic[-window_size:]))
            else:
                smoothed_rewards_trac.append(episode_reward_trac)
                smoothed_rewards_basic.append(episode_reward_basic)

            # 更新最佳结果
            if episode_reward_trac > best_reward_trac:
                best_reward_trac = episode_reward_trac
                torch.save(agent_trac.actor.state_dict(), f"results/best_actor_trac_ppo_phase_{phase}.pth")

            if episode_reward_basic > best_reward_basic:
                best_reward_basic = episode_reward_basic
                torch.save(agent_basic.actor.state_dict(), f"results/best_actor_basic_ppo_phase_{phase}.pth")

            elapsed_time = time.time() - start_time

            print(f"Phase: {phase} | Episode: {episode}/{episodes_per_task} | "
                  f"Global Episode: {global_episode}/{total_episodes} | "
                  f"Reward TRAC-PPO: {episode_reward_trac:.2f} | "
                  f"Reward Basic-PPO: {episode_reward_basic:.2f} | "
                  f"Advantage: {episode_reward_trac - episode_reward_basic:.2f} | "
                  f"Time: {elapsed_time:.2f}s")

            # 生成图表
            if global_episode % eval_freq == 0 or global_episode == total_episodes:
                plt.figure(figsize=(15, 5))

                # 奖励对比
                plt.subplot(1, 3, 1)
                plt.plot(smoothed_rewards_trac, color='blue', linewidth=2, label='TRAC-PPO', alpha=0.8)
                plt.plot(smoothed_rewards_basic, color='red', linewidth=2, label='Basic-PPO', alpha=0.8)
                plt.axvline(x=episodes_per_task, color='green', linestyle='--', alpha=0.7, label='Phase 1→2')
                plt.axvline(x=2 * episodes_per_task, color='purple', linestyle='--', alpha=0.7, label='Phase 2→3')
                plt.title("奖励对比 (TRAC-PPO vs Basic-PPO)")
                plt.xlabel("Episode")
                plt.ylabel("Smoothed Reward")
                plt.legend()
                plt.grid(True, alpha=0.3)

                # 性能优势
                plt.subplot(1, 3, 2)
                if len(smoothed_rewards_trac) > 0:
                    advantage = np.array(smoothed_rewards_trac) - np.array(smoothed_rewards_basic)
                    plt.plot(advantage, color='purple', linewidth=2, alpha=0.8)
                    plt.axhline(y=0, color='black', linestyle='--', alpha=0.5)
                    plt.fill_between(range(len(advantage)), advantage, 0,
                                     where=(advantage >= 0), color='green', alpha=0.3, label='TRAC优势')
                    plt.fill_between(range(len(advantage)), advantage, 0,
                                     where=(advantage < 0), color='red', alpha=0.3, label='TRAC劣势')
                plt.title("TRAC-PPO的性能优势")
                plt.xlabel("Episode")
                plt.ylabel("Reward Difference")
                plt.legend()
                plt.grid(True, alpha=0.3)

                # 各任务平均性能
                plt.subplot(1, 3, 3)
                task_avg_rewards = []
                for task_id in range(3):
                    if agent_trac.task_performance[task_id]:
                        avg_reward = np.mean(agent_trac.task_performance[task_id][-50:])  # 最近50个episode的平均
                        task_avg_rewards.append(avg_reward)
                    else:
                        task_avg_rewards.append(0)

                colors = ['lightblue', 'lightgreen', 'lightcoral']
                bars = plt.bar(range(3), task_avg_rewards, color=colors, alpha=0.7)
                plt.title("各任务平均性能 (TRAC-PPO)")
                plt.xlabel("Task ID")
                plt.ylabel("Average Reward")
                plt.xticks(range(3), ['Task 1', 'Task 2', 'Task 3'])

                # 添加数值标签
                for i, bar in enumerate(bars):
                    height = bar.get_height()
                    plt.text(bar.get_x() + bar.get_width() / 2., height + 0.5,
                             f'{height:.1f}', ha='center', va='bottom')

                plt.grid(True, alpha=0.3)

                plt.tight_layout()
                plt.savefig(f"results/trac_ppo_comparison_episode_{global_episode}.png",
                            dpi=150, bbox_inches='tight')
                plt.close()

    print(f"\n训练完成!")
    print(f"TRAC-PPO最佳结果: {best_reward_trac:.2f}")
    print(f"Basic-PPO最佳结果: {best_reward_basic:.2f}")
    print(f"最终优势: {best_reward_trac - best_reward_basic:.2f}")

    # 计算各阶段平均性能
    for phase in range(3):
        start_idx = phase * episodes_per_task
        end_idx = (phase + 1) * episodes_per_task

        avg_trac = np.mean(smoothed_rewards_trac[start_idx:end_idx])
        avg_basic = np.mean(smoothed_rewards_basic[start_idx:end_idx])

        print(f"阶段 {phase + 1} - TRAC-PPO: {avg_trac:.2f}, Basic-PPO: {avg_basic:.2f}, "
              f"优势: {avg_trac - avg_basic:.2f}")

    return agent_trac, agent_basic, env


if __name__ == "__main__":
    agent_trac, agent_basic, env = train()
