"""
RARL Continuous A2C Agent - 鲁棒对抗强化学习连续动作智能体

本模块实现了完整的RARL训练算法，包含：
1. Protagonist Agent: 主策略，学习控制任务
2. Adversary Agent: 对抗策略，学习生成最优扰动
3. 交替训练机制: 零和博弈框架下的双智能体优化

核心机制：
- Protagonist 最大化 reward
- Adversary 最大化 -reward (等价于最小化 protagonist 的 reward)
- 交替更新形成纳什均衡

关键设计：
- Protagonist 输出控制动作 (actions_num维)
- Adversary 输出扰动 (10维：[fx_1..4, fy_1..4, M_phi, M_theta])
- 两个模型共享observation，但有各自独立的动作空间
"""

from rl_games.common import a2c_common
from rl_games.algos_torch import torch_ext
from rl_games.algos_torch import central_value
from rl_games.common import common_losses
from rl_games.common import datasets
from rl_games.algos_torch.a2c_continuous import A2CAgent

from torch import optim
import torch
from torch import nn
import numpy as np
import gym
import copy
import os
import time


def rescale_actions(low, high, action):
    """将动作从[-1,1]缩放到[low, high]"""
    d = (high - low) / 2.0
    m = (high + low) / 2.0
    scaled_action = action * d + m
    return scaled_action


class RARLAgent(a2c_common.ContinuousA2CBase):
    """
    RARL双智能体训练器
    
    继承自ContinuousA2CBase，实现：
    1. 同时管理protagonist和adversary两个模型
    2. 交替训练机制
    3. 环境扰动注入
    
    关键点：
    - protagonist和adversary有不同的动作空间维度
    - 训练时需要分别处理两者的动作预处理
    """
    
    def __init__(self, base_name, params):
        """
        初始化RARL Agent
        
        Args:
            base_name: 算法基础名称
            params: 配置参数字典
        """
        # 保存原始配置
        self.rarl_config = params['config'].copy()
        
        # RARL特有参数
        self.n_pro_itr = self.rarl_config.get('n_pro_itr', 1)
        self.n_adv_itr = self.rarl_config.get('n_adv_itr', 1)
        self.adv_reward_scale = self.rarl_config.get('adv_reward_scale', 1.0)
        
        # 获取adversary动作空间配置（与rarl_config.yaml同步）
        # 扰动空间：[fx_1..4, fy_1..4, M_phi, M_theta]
        adv_space_config = self.rarl_config.get('adv_action_space', {
            'low': [-150., -150., -150., -150., -150., -150., -150., -150., -60000., -5000.],
            'high': [150., 150., 150., 150., 150., 150., 150., 150., 60000., 5000.],
        })
        self.adv_actions_low_np = np.array(adv_space_config['low'], dtype=np.float32)
        self.adv_actions_high_np = np.array(adv_space_config['high'], dtype=np.float32)
        self.adv_actions_num = len(adv_space_config['low'])
        
        # 调用父类初始化（这会创建protagonist模型和环境）
        a2c_common.ContinuousA2CBase.__init__(self, base_name, params)
        
        # 保存protagonist的动作范围（父类已设置）
        self.pro_actions_low = self.actions_low.clone()
        self.pro_actions_high = self.actions_high.clone()
        self.pro_actions_num = self.actions_num
        
        # 创建adversary的动作范围tensor
        self.adv_actions_low = torch.from_numpy(self.adv_actions_low_np).float().to(self.ppo_device)
        self.adv_actions_high = torch.from_numpy(self.adv_actions_high_np).float().to(self.ppo_device)
        
        # 保存protagonist配置
        obs_shape = self.obs_shape
        pro_build_config = {
            'actions_num': self.pro_actions_num,
            'input_shape': obs_shape,
            'num_seqs': self.num_actors * self.num_agents,
            'value_size': self.env_info.get('value_size', 1),
            'normalize_value': self.normalize_value,
            'normalize_input': self.normalize_input,
        }
        
        # 创建protagonist模型（主策略）
        self.pro_model = self.network.build(pro_build_config)
        self.pro_model.to(self.ppo_device)
        
        # 创建adversary模型（对抗策略） - 不同的动作维度
        adv_build_config = pro_build_config.copy()
        adv_build_config['actions_num'] = self.adv_actions_num  # adversary输出10维扰动
        self.adv_model = self.network.build(adv_build_config)
        self.adv_model.to(self.ppo_device)
        
        # 主模型指向protagonist
        self.model = self.pro_model
        
        # 初始化RNN状态
        self.states = None
        self.adv_states = None
        self.init_rnn_from_model(self.pro_model)
        
        # 创建两个优化器
        self.last_lr = float(self.last_lr)
        self.pro_optimizer = optim.Adam(self.pro_model.parameters(), float(self.last_lr), eps=1e-08, weight_decay=self.weight_decay)
        self.adv_optimizer = optim.Adam(self.adv_model.parameters(), float(self.last_lr), eps=1e-08, weight_decay=self.weight_decay)
        
        # 当前使用的optimizer指向protagonist
        self.optimizer = self.pro_optimizer
        
        # Bound loss type
        self.bound_loss_type = self.config.get('bound_loss_type', 'bound')
        
        # Dataset - 需要为不同的动作空间创建不同的dataset
        self.pro_dataset = datasets.PPODataset(self.batch_size, self.minibatch_size, self.is_discrete, self.is_rnn, self.ppo_device, self.seq_len)
        self.adv_dataset = datasets.PPODataset(self.batch_size, self.minibatch_size, self.is_discrete, self.is_rnn, self.ppo_device, self.seq_len)
        self.dataset = self.pro_dataset
        
        # Value normalization
        if self.normalize_value:
            self.value_mean_std = self.pro_model.value_mean_std
        
        # Central value
        self.has_value_loss = True
        
        # 全局训练轮次
        self.global_epoch = 0
        
        # 当前训练的是哪个agent
        self.is_training_protagonist = True
        
        # 启用环境的RARL模式
        if hasattr(self.vec_env, 'env') and hasattr(self.vec_env.env, 'enable_rarl_mode'):
            self.vec_env.env.enable_rarl_mode(True)
        
        self.algo_observer.after_init(self)
        
        print("\n" + "=" * 60)
        print("RARL Agent 初始化完成")
        print(f"  Protagonist 动作维度: {self.pro_actions_num}")
        print(f"  Adversary 扰动维度: {self.adv_actions_num}")
        print(f"  每轮 Protagonist 迭代: {self.n_pro_itr}")
        print(f"  每轮 Adversary 迭代: {self.n_adv_itr}")
        print("=" * 60 + "\n")

    def init_tensors(self):
        """
        重写init_tensors，为protagonist和adversary分别创建独立的experience buffer
        
        这是解决动作维度不匹配问题的关键！
        - Protagonist的buffer使用9维动作空间
        - Adversary的buffer使用10维扰动空间
        """
        from rl_games.common.experience import ExperienceBuffer
        
        batch_size = self.num_agents * self.num_actors
        algo_info = {
            'num_actors': self.num_actors,
            'horizon_length': self.horizon_length,
            'has_central_value': self.has_central_value,
            'use_action_masks': self.use_action_masks
        }
        
        # 创建Protagonist的experience buffer（使用原始action_space，9维）
        pro_env_info = self.env_info.copy()
        self.pro_experience_buffer = ExperienceBuffer(pro_env_info, algo_info, self.ppo_device)
        
        # 创建Adversary的experience buffer（使用扰动空间，10维）
        adv_env_info = self.env_info.copy()
        adv_action_space = gym.spaces.Box(
            low=self.adv_actions_low_np,
            high=self.adv_actions_high_np,
            dtype=np.float32
        )
        adv_env_info['action_space'] = adv_action_space
        self.adv_experience_buffer = ExperienceBuffer(adv_env_info, algo_info, self.ppo_device)
        
        # 默认使用protagonist的buffer
        self.experience_buffer = self.pro_experience_buffer
        
        # 初始化其他tensor（与父类相同）
        val_shape = (self.horizon_length, batch_size, self.value_size)
        current_rewards_shape = (batch_size, self.value_size)
        self.current_rewards = torch.zeros(current_rewards_shape, dtype=torch.float32, device=self.ppo_device)
        self.current_actual_costs = torch.zeros(current_rewards_shape, dtype=torch.float32, device=self.ppo_device)
        self.current_lengths = torch.zeros(batch_size, dtype=torch.float32, device=self.ppo_device)
        self.dones = torch.ones((batch_size,), dtype=torch.uint8, device=self.ppo_device)
        self.infos = {}
        
        # RNN状态初始化
        if self.is_rnn:
            self.rnn_states = self.pro_model.get_default_rnn_state()
            self.rnn_states = [s.to(self.ppo_device) for s in self.rnn_states]
            
            # Adversary的RNN状态
            self.adv_rnn_states = self.adv_model.get_default_rnn_state()
            if self.adv_rnn_states is not None:
                self.adv_rnn_states = [s.to(self.ppo_device) for s in self.adv_rnn_states]
            
            total_agents = self.num_agents * self.num_actors
            num_seqs = self.horizon_length // self.seq_len
            assert((self.horizon_length * total_agents // self.num_minibatches) % self.seq_len == 0)
            self.mb_rnn_states = [torch.zeros((num_seqs, s.size()[0], total_agents, s.size()[2]), 
                                              dtype=torch.float32, device=self.ppo_device) 
                                 for s in self.rnn_states]
        
        # 初始化update_list和tensor_list（从ContinuousA2CBase继承）
        self.update_list = ['actions', 'neglogpacs', 'values', 'mus', 'sigmas']
        self.tensor_list = self.update_list + ['obses', 'states', 'dones']

    def preprocess_actions(self, actions):
        """
        重写动作预处理，根据当前训练的agent选择对应的动作范围
        
        这是修复维度不匹配问题的关键！
        """
        if self.is_training_protagonist:
            # Protagonist: 使用控制动作范围
            if self.clip_actions:
                clamped_actions = torch.clamp(actions, -1.0, 1.0)
                rescaled_actions = rescale_actions(self.pro_actions_low, self.pro_actions_high, clamped_actions)
            else:
                rescaled_actions = actions
        else:
            # Adversary: 使用扰动范围
            if self.clip_actions:
                clamped_actions = torch.clamp(actions, -1.0, 1.0)
                rescaled_actions = rescale_actions(self.adv_actions_low, self.adv_actions_high, clamped_actions)
            else:
                rescaled_actions = actions

        if not self.is_tensor_obses:
            rescaled_actions = rescaled_actions.cpu().numpy()

        return rescaled_actions

    def get_adversary_action(self, obs):
        """
        使用adversary模型生成扰动
        
        Args:
            obs: 当前观测 (batch_size, obs_dim)
        
        Returns:
            disturbance: 扰动向量 (batch_size, 10)，已缩放到实际扰动范围
        """
        processed_obs = self._preproc_obs(obs)
        input_dict = {
            'is_train': False,
            'prev_actions': None,
            'obs': processed_obs,
            'rnn_states': self.adv_states,
        }
        
        with torch.no_grad():
            res_dict = self.adv_model(input_dict)
            adv_actions = res_dict['actions']  # 范围 [-1, 1]
            
            # 缩放到扰动范围
            clamped = torch.clamp(adv_actions, -1.0, 1.0)
            disturbance = rescale_actions(self.adv_actions_low, self.adv_actions_high, clamped)
            
            if self.is_rnn:
                self.adv_states = res_dict.get('rnn_states', None)
        
        return disturbance

    def update_epoch(self):
        """更新epoch计数"""
        self.epoch_num += 1
        return self.epoch_num
    
    def save(self, fn):
        """保存两个模型的checkpoint"""
        state = {
            'epoch': self.epoch_num,
            'global_epoch': self.global_epoch,
            'pro_model': self.pro_model.state_dict(),
            'adv_model': self.adv_model.state_dict(),
            'pro_optimizer': self.pro_optimizer.state_dict(),
            'adv_optimizer': self.adv_optimizer.state_dict(),
            'frame': self.frame,
        }
        if self.normalize_input:
            state['pro_running_mean_std'] = self.pro_model.running_mean_std.state_dict()
            state['adv_running_mean_std'] = self.adv_model.running_mean_std.state_dict()
        if self.normalize_value:
            state['pro_value_mean_std'] = self.pro_model.value_mean_std.state_dict()
            state['adv_value_mean_std'] = self.adv_model.value_mean_std.state_dict()
        torch_ext.save_checkpoint(fn, state)
        print(f"Saved RARL checkpoint to {fn}")
    
    def restore(self, fn):
        """恢复两个模型的checkpoint"""
        checkpoint = torch_ext.load_checkpoint(fn)
        self.epoch_num = checkpoint.get('epoch', 0)
        self.global_epoch = checkpoint.get('global_epoch', 0)
        self.frame = checkpoint.get('frame', 0)
        
        self.pro_model.load_state_dict(checkpoint['pro_model'])
        self.adv_model.load_state_dict(checkpoint['adv_model'])
        self.pro_optimizer.load_state_dict(checkpoint['pro_optimizer'])
        self.adv_optimizer.load_state_dict(checkpoint['adv_optimizer'])
        
        if self.normalize_input and 'pro_running_mean_std' in checkpoint:
            self.pro_model.running_mean_std.load_state_dict(checkpoint['pro_running_mean_std'])
            self.adv_model.running_mean_std.load_state_dict(checkpoint['adv_running_mean_std'])
        if self.normalize_value and 'pro_value_mean_std' in checkpoint:
            self.pro_model.value_mean_std.load_state_dict(checkpoint['pro_value_mean_std'])
            self.adv_model.value_mean_std.load_state_dict(checkpoint['adv_value_mean_std'])
        
        print(f"Restored RARL checkpoint from {fn}")

    def play_steps(self):
        """
        执行环境交互，收集训练数据
        
        RARL特有：
        - 训练Protagonist时：adversary生成扰动注入环境，protagonist生成控制动作
        - 训练Adversary时：protagonist生成控制动作，adversary生成扰动，reward取反
        """
        update_list = self.update_list
        step_time = 0.0
        
        for n in range(self.horizon_length):
            # 获取当前观测
            obs = self.obs['obs']
            
            # === RARL核心：使用adversary生成扰动 ===
            if hasattr(self.vec_env, 'env') and hasattr(self.vec_env.env, 'set_adversary_disturbance'):
                disturbance = self.get_adversary_action(obs)
                self.vec_env.env.set_adversary_disturbance(disturbance)
            
            # 根据当前训练的agent获取动作
            res_dict = self.get_action_values(self.obs, self.infos)
            
            self.experience_buffer.update_data('obses', n, self.obs['obs'])
            self.experience_buffer.update_data('dones', n, self.dones)
            
            for k in update_list:
                self.experience_buffer.update_data(k, n, res_dict[k])
            
            if self.has_central_value:
                self.experience_buffer.update_data('states', n, self.obs['states'])
            
            step_time_start = time.time()
            
            # 关键：只有protagonist的动作会传给环境执行
            if self.is_training_protagonist:
                # 训练protagonist：protagonist动作传给环境
                self.obs, rewards, self.dones, infos = self.env_step(res_dict['actions'])
            else:
                # 训练adversary：需要用protagonist生成动作执行环境
                # 但是用adversary的动作来记录和计算loss
                with torch.no_grad():
                    pro_input = {
                        'is_train': False,
                        'prev_actions': None,
                        'obs': self._preproc_obs(obs),
                        'rnn_states': self.rnn_states,
                    }
                    pro_res = self.pro_model(pro_input)
                    pro_actions = pro_res['actions']
                
                # 使用protagonist的动作执行环境
                # 手动预处理protagonist动作
                if self.clip_actions:
                    clamped = torch.clamp(pro_actions, -1.0, 1.0)
                    pro_actions_scaled = rescale_actions(self.pro_actions_low, self.pro_actions_high, clamped)
                else:
                    pro_actions_scaled = pro_actions
                    
                if not self.is_tensor_obses:
                    pro_actions_scaled = pro_actions_scaled.cpu().numpy()
                
                self.obs, rewards, self.dones, infos = self.vec_env.step(pro_actions_scaled)
                
                if self.is_tensor_obses:
                    if self.value_size == 1:
                        rewards = rewards.unsqueeze(1)
                    self.obs = self.obs_to_tensors(self.obs)
                    rewards = rewards.to(self.ppo_device)
                    self.dones = self.dones.to(self.ppo_device)
                else:
                    if self.value_size == 1:
                        rewards = np.expand_dims(rewards, axis=1)
                    self.obs = self.obs_to_tensors(self.obs)
                    rewards = torch.from_numpy(rewards).to(self.ppo_device).float()
                    self.dones = torch.from_numpy(self.dones).to(self.ppo_device)
                
                # Adversary的奖励取反
                rewards = -rewards * self.adv_reward_scale
            
            self.infos = infos
            step_time_end = time.time()
            step_time += (step_time_end - step_time_start)
            
            shaped_rewards = self.rewards_shaper(rewards)
            if self.value_bootstrap and 'time_outs' in infos:
                shaped_rewards += self.gamma * res_dict['values'] * self.cast_obs(infos['time_outs']).unsqueeze(1).float()
            
            self.experience_buffer.update_data('rewards', n, shaped_rewards)
            
            self.current_rewards += rewards
            if 'actual_costs' in infos:
                self.current_actual_costs += infos['actual_costs'].unsqueeze(-1)
            self.current_lengths += 1
            all_done_indices = self.dones.nonzero(as_tuple=False)
            env_done_indices = self.dones.view(self.num_actors, self.num_agents).all(dim=1).nonzero(as_tuple=False)
            
            self.game_rewards.update(self.current_rewards[env_done_indices])
            self.step_rewards.weighted_update(
                self.current_rewards[env_done_indices] / self.current_lengths[env_done_indices].unsqueeze(-1),
                self.current_lengths[env_done_indices].unsqueeze(-1)
            )
            self.step_actual_costs.weighted_update(
                self.current_actual_costs[env_done_indices] / self.current_lengths[env_done_indices].unsqueeze(-1),
                self.current_lengths[env_done_indices].unsqueeze(-1)
            )
            self.game_lengths.update(self.current_lengths[env_done_indices])
            self.algo_observer.process_infos(infos, env_done_indices)
            
            not_dones = 1.0 - self.dones.bool().float()
            self.current_rewards = self.current_rewards * not_dones.unsqueeze(1)
            self.current_actual_costs = self.current_actual_costs * not_dones.unsqueeze(1)
            self.current_lengths = self.current_lengths * not_dones
        
        last_values = self.get_values(self.obs)
        
        fdones = self.dones.float()
        mb_fdones = self.experience_buffer.tensor_dict['dones'].float()
        mb_values = self.experience_buffer.tensor_dict['values']
        mb_rewards = self.experience_buffer.tensor_dict['rewards']
        mb_advs = self.discount_values(fdones, last_values, mb_fdones, mb_values, mb_rewards)
        mb_returns = mb_advs + mb_values
        
        batch_dict = self.experience_buffer.get_transformed_list(a2c_common.swap_and_flatten01, self.tensor_list)
        batch_dict['returns'] = a2c_common.swap_and_flatten01(mb_returns)
        batch_dict['played_frames'] = self.batch_size
        batch_dict['step_time'] = step_time
        
        return batch_dict
    
    def calc_gradients(self, input_dict):
        """计算梯度（与原a2c_continuous相同）"""
        value_preds_batch = input_dict['old_values']
        old_action_log_probs_batch = input_dict['old_logp_actions']
        advantage = input_dict['advantages']
        old_mu_batch = input_dict['mu']
        old_sigma_batch = input_dict['sigma']
        return_batch = input_dict['returns']
        actions_batch = input_dict['actions']
        obs_batch = input_dict['obs']
        obs_batch = self._preproc_obs(obs_batch)
        
        lr_mul = 1.0
        curr_e_clip = self.e_clip
        
        batch_dict = {
            'is_train': True,
            'prev_actions': actions_batch,
            'obs': obs_batch,
        }
        
        rnn_masks = None
        if self.is_rnn:
            rnn_masks = input_dict['rnn_masks']
            batch_dict['rnn_states'] = input_dict['rnn_states']
            batch_dict['seq_length'] = self.seq_len
            batch_dict['dones'] = input_dict['dones']
        
        with torch.cuda.amp.autocast(enabled=self.mixed_precision):
            res_dict = self.model(batch_dict)
            action_log_probs = res_dict['prev_neglogp']
            values = res_dict['values']
            entropy = res_dict['entropy']
            mu = res_dict['mus']
            sigma = res_dict['sigmas']
            
            a_loss = self.actor_loss_func(old_action_log_probs_batch, action_log_probs, advantage, self.ppo, curr_e_clip)
            
            if self.has_value_loss:
                c_loss = common_losses.critic_loss(value_preds_batch, values, curr_e_clip, return_batch, self.clip_value)
            else:
                c_loss = torch.zeros(1, device=self.ppo_device)
            
            if self.bound_loss_type == 'regularisation':
                b_loss = self.reg_loss(mu)
            elif self.bound_loss_type == 'bound':
                b_loss = self.bound_loss(mu)
            else:
                b_loss = torch.zeros(1, device=self.ppo_device)
            
            loss_terms = [a_loss.unsqueeze(1), c_loss, entropy.unsqueeze(1), b_loss.unsqueeze(1)]
            losses, sum_mask = torch_ext.apply_masks(loss_terms, rnn_masks)
            a_loss, c_loss, entropy, b_loss = losses[0], losses[1], losses[2], losses[3]
            
            loss = a_loss + 0.5 * c_loss * self.critic_coef - entropy * self.entropy_coef + b_loss * self.bounds_loss_coef
            
            if self.multi_gpu:
                self.optimizer.zero_grad()
            else:
                for param in self.model.parameters():
                    param.grad = None
        
        self.scaler.scale(loss).backward()
        self.trancate_gradients_and_step()
        
        with torch.no_grad():
            reduce_kl = rnn_masks is None
            kl_dist = torch_ext.policy_kl(mu.detach(), sigma.detach(), old_mu_batch, old_sigma_batch, reduce_kl)
            if rnn_masks is not None:
                kl_dist = (kl_dist * rnn_masks).sum() / rnn_masks.numel()
        
        self.diagnostics.mini_batch(self, {
            'values': value_preds_batch,
            'returns': return_batch,
            'new_neglogp': action_log_probs,
            'old_neglogp': old_action_log_probs_batch,
            'masks': rnn_masks
        }, curr_e_clip, 0)
        
        self.train_result = (a_loss, c_loss, entropy, kl_dist, self.last_lr, lr_mul, mu.detach(), sigma.detach(), b_loss)
    
    def train_actor_critic(self, input_dict):
        """训练actor-critic"""
        self.calc_gradients(input_dict)
        return self.train_result
    
    def train(self):
        """
        RARL主训练循环
        
        交替训练protagonist和adversary
        """
        self.init_tensors()
        self.last_mean_rewards = -100500
        self.last_mean_step_rewards = -100500
        start_time = time.time()
        total_time = 0
        self.obs = self.env_reset()
        self.curr_frames = self.batch_size_envs
        
        max_epochs = self.config.get('max_epochs', 1000)
        
        print("\n" + "=" * 60)
        print("开始 RARL 训练")
        print("=" * 60 + "\n")
        
        while True:
            # ============ RARL 交替训练 ============
            self.global_epoch += 1
            
            # Phase 1: 训练 Protagonist
            print(f"\n--- Global Epoch {self.global_epoch}: Training Protagonist ---")
            self.is_training_protagonist = True
            self.model = self.pro_model
            self.optimizer = self.pro_optimizer
            self.dataset = self.pro_dataset
            self.experience_buffer = self.pro_experience_buffer  # 切换到protagonist的buffer
            if self.normalize_value:
                self.value_mean_std = self.pro_model.value_mean_std
            self.model.train()
            self.adv_model.eval()  # 冻结adversary
            
            for _ in range(self.n_pro_itr):
                epoch_num = self.update_epoch()
                train_result = self._train_one_epoch()
                total_time += train_result['sum_time']
            
            # Phase 2: 训练 Adversary
            print(f"\n--- Global Epoch {self.global_epoch}: Training Adversary ---")
            self.is_training_protagonist = False
            self.model = self.adv_model
            self.optimizer = self.adv_optimizer
            self.dataset = self.adv_dataset
            self.experience_buffer = self.adv_experience_buffer  # 切换到adversary的buffer
            if self.normalize_value:
                self.value_mean_std = self.adv_model.value_mean_std
            self.model.train()
            self.pro_model.eval()  # 冻结protagonist
            
            for _ in range(self.n_adv_itr):
                epoch_num = self.update_epoch()
                train_result = self._train_one_epoch()
                total_time += train_result['sum_time']
            
            # 恢复protagonist为主模型
            self.model = self.pro_model
            self.optimizer = self.pro_optimizer
            self.dataset = self.pro_dataset
            self.experience_buffer = self.pro_experience_buffer  # 切换回protagonist的buffer
            self.is_training_protagonist = True
            if self.normalize_value:
                self.value_mean_std = self.pro_model.value_mean_std
            
            # 检查是否退出
            should_exit = False
            frame = self.frame // self.num_agents
            
            if self.print_stats and self.game_rewards.current_size > 0:
                mean_rewards = self.game_rewards.get_mean()
                mean_step_rewards = self.step_rewards.get_mean()
                print(f"\nGlobal Epoch {self.global_epoch}: mean_reward={mean_rewards[0]:.2f}, mean_step_reward={mean_step_rewards[0]:.4f}")
                
                # 保存最佳模型
                if mean_step_rewards[0] > self.last_mean_step_rewards and epoch_num >= self.save_best_after:
                    print(f'Saving best model with reward: {mean_step_rewards[0]}')
                    self.last_mean_step_rewards = mean_step_rewards[0]
                    self.save(os.path.join(self.nn_dir, self.config['name']))
            
            # 定期保存
            if self.save_freq > 0 and self.global_epoch % self.save_freq == 0:
                self.save(os.path.join(self.nn_dir, f"rarl_epoch_{self.global_epoch}"))
            
            if self.global_epoch >= max_epochs or self.frame >= self.max_frames:
                self.save(os.path.join(self.nn_dir, f"rarl_final_epoch_{self.global_epoch}"))
                print('\nRARL训练完成!')
                should_exit = True
            
            if should_exit:
                return self.last_mean_step_rewards, self.global_epoch
    
    def _train_one_epoch(self):
        """执行一个训练epoch"""
        self.set_eval()
        play_time_start = time.time()
        
        with torch.no_grad():
            if self.is_rnn:
                batch_dict = self.play_steps_rnn()
            else:
                batch_dict = self.play_steps()
        
        play_time_end = time.time()
        update_time_start = time.time()
        
        self.set_train()
        self.curr_frames = batch_dict.pop('played_frames')
        self.prepare_dataset(batch_dict)
        self.algo_observer.after_steps()
        
        a_losses = []
        c_losses = []
        b_losses = []
        entropies = []
        kls = []
        
        for mini_ep in range(0, self.mini_epochs_num):
            ep_kls = []
            for i in range(len(self.dataset)):
                train_results = self.train_actor_critic(self.dataset[i])
                a_loss, c_loss, entropy, kl, last_lr, lr_mul, cmu, csigma, b_loss = train_results
                
                a_losses.append(a_loss)
                c_losses.append(c_loss)
                ep_kls.append(kl)
                entropies.append(entropy)
                b_losses.append(b_loss)
                
                self.dataset.update_mu_sigma(cmu, csigma)
                
                if self.schedule_type == 'legacy':
                    av_kls = kl
                    self.last_lr, self.entropy_coef = self.scheduler.update(self.last_lr, self.entropy_coef, self.epoch_num, 0, av_kls.item())
                    self.update_lr(self.last_lr)
            
            av_kls = torch_ext.mean_list(ep_kls)
            if self.schedule_type == 'standard':
                self.last_lr, self.entropy_coef = self.scheduler.update(self.last_lr, self.entropy_coef, self.epoch_num, 0, av_kls.item())
                self.update_lr(self.last_lr)
            
            kls.append(av_kls)
            self.diagnostics.mini_epoch(self, mini_ep)
            if self.normalize_input:
                self.model.running_mean_std.eval()
        
        update_time_end = time.time()
        play_time = play_time_end - play_time_start
        update_time = update_time_end - update_time_start
        sum_time = update_time_end - play_time_start
        
        # 更新frame计数
        curr_frames = self.curr_frames
        self.frame += curr_frames
        
        # 打印统计
        agent_name = "Protagonist" if self.is_training_protagonist else "Adversary"
        a_loss_mean = torch_ext.mean_list(a_losses).item()
        c_loss_mean = torch_ext.mean_list(c_losses).item()
        print(f"  {agent_name} - a_loss: {a_loss_mean:.4f}, c_loss: {c_loss_mean:.4f}, frames: {self.frame}")
        
        # 清理内存
        self.dataset.update_values_dict(None)
        
        return {
            'sum_time': sum_time,
            'a_losses': a_losses,
            'c_losses': c_losses,
        }
    
    def reg_loss(self, mu):
        """正则化损失"""
        if self.bounds_loss_coef is not None:
            reg_loss = (mu * mu).sum(axis=-1)
        else:
            reg_loss = 0
        return reg_loss
    
    def bound_loss(self, mu):
        """边界损失"""
        if self.bounds_loss_coef is not None:
            soft_bound = 1.1
            mu_loss_high = torch.clamp_min(mu - soft_bound, 0.0) ** 2
            mu_loss_low = torch.clamp_max(mu + soft_bound, 0.0) ** 2
            b_loss = (mu_loss_low + mu_loss_high).sum(axis=-1)
        else:
            b_loss = 0
        return b_loss
    
    def get_masked_action_values(self, obs, action_masks):
        """不支持action masks"""
        assert False


# 为了兼容原有Runner，创建别名
A2CAgent = RARLAgent
