"""
RARL (Robust Adversarial Reinforcement Learning) 双智能体训练算法

核心思想：
- 主策略(protagonist)：学习控制任务，最大化reward
- 对抗策略(adversary)：学习生成扰动，最小化主策略reward（即最大化-reward）
- 两个策略交替训练，形成零和博弈

训练流程：
1. 固定adversary，训练protagonist n_pro_itr次
2. 固定protagonist，训练adversary n_adv_itr次
3. 重复上述过程
"""

from rl_games.common import a2c_common
from rl_games.algos_torch import torch_ext
from rl_games.algos_torch import central_value
from rl_games.common import common_losses
from rl_games.common import datasets
from rl_games.algos_torch.a2c_continuous import A2CAgent

from torch import optim
import torch
from torch import nn
import numpy as np
import gym
import copy
import os


class RARLAgent:
    """
    RARL双智能体训练管理器
    
    该类管理protagonist和adversary两个A2CAgent，实现交替训练逻辑。
    - protagonist: 主控制策略，输入observation，输出控制动作
    - adversary: 对抗扰动策略，输入observation，输出扰动量
    """
    
    def __init__(self, base_name, params):
        """
        初始化RARL训练器
        
        Args:
            base_name: 算法基础名称
            params: 包含protagonist和adversary配置的参数字典
        """
        self.base_name = base_name
        self.params = params
        self.config = params['config']
        
        # RARL特有参数
        self.n_pro_itr = self.config.get('n_pro_itr', 1)  # 每轮主策略训练迭代数
        self.n_adv_itr = self.config.get('n_adv_itr', 1)  # 每轮对抗策略训练迭代数
        self.adv_reward_scale = self.config.get('adv_reward_scale', 1.0)  # 对抗奖励缩放
        
        # 设备配置
        self.ppo_device = self.config.get('device', 'cuda:0')
        
        # 创建protagonist配置（复制原始配置）
        pro_params = self._create_pro_params(params)
        
        # 创建adversary配置（修改动作空间为扰动空间）
        adv_params = self._create_adv_params(params)
        
        # 实例化两个A2CAgent
        print("=" * 50)
        print("初始化 Protagonist Agent...")
        print("=" * 50)
        self.protagonist = A2CAgent(base_name + "_pro", pro_params)
        
        print("=" * 50)
        print("初始化 Adversary Agent...")
        print("=" * 50)
        self.adversary = A2CAgent(base_name + "_adv", adv_params)
        
        # 确保两个agent共享同一个环境
        self.vec_env = self.protagonist.vec_env
        self.adversary.vec_env = self.vec_env
        
        # 训练状态
        self.global_epoch = 0
        self.frame = 0
        
        # 日志目录
        self.experiment_dir = self.protagonist.experiment_dir
        self.nn_dir = self.protagonist.nn_dir
        
    def _create_pro_params(self, params):
        """创建protagonist的参数配置"""
        pro_params = copy.deepcopy(params)
        pro_params['config']['name'] = params['config']['name'] + '_protagonist'
        pro_params['config']['is_protagonist'] = True
        return pro_params
    
    def _create_adv_params(self, params):
        """
        创建adversary的参数配置
        
        关键修改：将动作空间改为扰动空间（10维）
        """
        adv_params = copy.deepcopy(params)
        adv_params['config']['name'] = params['config']['name'] + '_adversary'
        adv_params['config']['is_protagonist'] = False
        
        # adversary使用扰动动作空间
        # 从env_config中获取扰动空间配置（与rarl_config.yaml同步）
        adv_action_space = self.config.get('adv_action_space', {
            'low': [-150., -150., -150., -150., -150., -150., -150., -150., -60000., -5000.],
            'high': [150., 150., 150., 150., 150., 150., 150., 150., 60000., 5000.],
        })
        adv_params['config']['adv_action_space'] = adv_action_space
        
        return adv_params
    
    def train(self):
        """
        RARL主训练循环
        
        交替训练protagonist和adversary：
        1. protagonist训练时，adversary提供扰动但不更新
        2. adversary训练时，protagonist提供动作但不更新
        """
        print("\n" + "=" * 60)
        print("开始 RARL 训练")
        print(f"Protagonist迭代数/轮: {self.n_pro_itr}")
        print(f"Adversary迭代数/轮: {self.n_adv_itr}")
        print("=" * 60 + "\n")
        
        max_epochs = self.config.get('max_epochs', 1000)
        
        # 初始化两个agent的tensors
        self.protagonist.init_tensors()
        self.adversary.init_tensors()
        
        # 重置环境并获取初始观测
        self.protagonist.obs = self.protagonist.env_reset()
        self.adversary.obs = self.protagonist.obs  # 共享观测
        
        while self.global_epoch < max_epochs:
            print(f"\n{'='*40}")
            print(f"RARL Global Epoch: {self.global_epoch}")
            print(f"{'='*40}")
            
            # ============ Phase 1: 训练 Protagonist ============
            print(f"\n--- Phase 1: Training Protagonist ({self.n_pro_itr} iters) ---")
            self.adversary.model.eval()  # 冻结adversary
            
            for pro_itr in range(self.n_pro_itr):
                # 使用adversary生成扰动，但只更新protagonist
                self._train_protagonist_epoch()
                
            # ============ Phase 2: 训练 Adversary ============
            print(f"\n--- Phase 2: Training Adversary ({self.n_adv_itr} iters) ---")
            self.protagonist.model.eval()  # 冻结protagonist
            self.adversary.model.train()
            
            for adv_itr in range(self.n_adv_itr):
                # 使用protagonist执行动作，训练adversary最大化负奖励
                self._train_adversary_epoch()
            
            self.protagonist.model.train()  # 恢复protagonist训练模式
            
            self.global_epoch += 1
            
            # 保存checkpoint
            if self.global_epoch % self.config.get('save_frequency', 100) == 0:
                self.save(os.path.join(self.nn_dir, f'rarl_epoch_{self.global_epoch}'))
        
        print("\n" + "=" * 60)
        print("RARL 训练完成!")
        print("=" * 60)
        
        return self.protagonist.last_mean_step_rewards, self.global_epoch
    
    def _train_protagonist_epoch(self):
        """训练protagonist一个epoch"""
        # 设置环境使用adversary生成扰动
        self.vec_env.set_adversary_policy(self.adversary.model, self.ppo_device)
        self.vec_env.set_adversary_mode(True)
        
        # 执行一个完整的训练epoch
        step_time, play_time, update_time, sum_time, a_losses, c_losses, \
            b_losses, s_losses, auto_losses, entropies, kls, last_lr, lr_mul = \
            self.protagonist.train_epoch()
        
        self.frame += self.protagonist.curr_frames
        self._print_stats("Protagonist", a_losses, c_losses, entropies)
    
    def _train_adversary_epoch(self):
        """训练adversary一个epoch"""
        # 设置环境使用protagonist生成动作
        self.vec_env.set_protagonist_policy(self.protagonist.model, self.ppo_device)
        self.vec_env.set_adversary_mode(True)
        
        # Adversary的奖励是protagonist奖励的负值
        # 这在env的step中处理
        
        step_time, play_time, update_time, sum_time, a_losses, c_losses, \
            b_losses, s_losses, auto_losses, entropies, kls, last_lr, lr_mul = \
            self.adversary.train_epoch()
        
        self._print_stats("Adversary", a_losses, c_losses, entropies)
    
    def _print_stats(self, agent_name, a_losses, c_losses, entropies):
        """打印训练统计信息"""
        a_loss = torch_ext.mean_list(a_losses).item() if a_losses else 0
        c_loss = torch_ext.mean_list(c_losses).item() if c_losses else 0
        entropy = torch_ext.mean_list(entropies).item() if entropies else 0
        print(f"  {agent_name} - a_loss: {a_loss:.4f}, c_loss: {c_loss:.4f}, entropy: {entropy:.4f}")
    
    def save(self, filename):
        """保存两个agent的checkpoint"""
        state = {
            'global_epoch': self.global_epoch,
            'frame': self.frame,
            'protagonist': self.protagonist.get_full_state_weights(),
            'adversary': self.adversary.get_full_state_weights(),
        }
        torch_ext.save_checkpoint(filename, state)
        print(f"Saved RARL checkpoint to {filename}")
    
    def restore(self, filename):
        """恢复两个agent的checkpoint"""
        checkpoint = torch_ext.load_checkpoint(filename)
        self.global_epoch = checkpoint.get('global_epoch', 0)
        self.frame = checkpoint.get('frame', 0)
        self.protagonist.set_full_state_weights(checkpoint['protagonist'])
        self.adversary.set_full_state_weights(checkpoint['adversary'])
        print(f"Restored RARL checkpoint from {filename}")


class AdversaryA2CAgent(A2CAgent):
    """
    对抗策略专用的A2CAgent
    
    与标准A2CAgent的主要区别：
    1. 动作空间是扰动空间（10维）
    2. 奖励取反（最大化-reward）
    """
    
    def __init__(self, base_name, params):
        # 修改env_info中的action_space
        self._setup_adversary_action_space(params)
        super().__init__(base_name, params)
        self.is_adversary = True
    
    def _setup_adversary_action_space(self, params):
        """设置对抗策略的动作空间为扰动空间"""
        adv_config = params['config'].get('adv_action_space', {})
        low = np.array(adv_config.get('low', [-50.] * 8 + [-200., -300.]))
        high = np.array(adv_config.get('high', [50.] * 8 + [200., 300.]))
        
        # 创建扰动动作空间
        self.adv_action_space = gym.spaces.Box(
            low=low.astype(np.float32),
            high=high.astype(np.float32),
            dtype=np.float32
        )
    
    def play_steps(self):
        """重写play_steps，对奖励取反"""
        batch_dict = super().play_steps()
        
        # 对抗策略的目标是最小化protagonist的reward
        # 等价于最大化 -reward
        if 'rewards' in batch_dict:
            batch_dict['rewards'] = -batch_dict['rewards']
        
        return batch_dict


class RARLVecEnvWrapper:
    """
    RARL环境包装器
    
    封装原始环境，支持：
    1. 接收protagonist动作和adversary扰动
    2. 将扰动应用到动力学模型
    3. 为adversary返回负奖励
    """
    
    def __init__(self, vec_env, adversary_action_space):
        self.vec_env = vec_env
        self.adversary_action_space = adversary_action_space
        
        # 策略引用
        self.protagonist_policy = None
        self.adversary_policy = None
        self.adversary_mode = False
        
        self.device = vec_env.env.device if hasattr(vec_env.env, 'device') else 'cuda:0'
    
    def set_protagonist_policy(self, policy, device):
        """设置protagonist策略"""
        self.protagonist_policy = policy
        self.device = device
    
    def set_adversary_policy(self, policy, device):
        """设置adversary策略"""
        self.adversary_policy = policy
        self.device = device
    
    def set_adversary_mode(self, enabled):
        """启用/禁用对抗模式"""
        self.adversary_mode = enabled
    
    def step(self, actions):
        """
        执行一步环境交互
        
        如果启用对抗模式：
        1. 使用adversary_policy生成扰动
        2. 将扰动传递给环境
        """
        if self.adversary_mode and self.adversary_policy is not None:
            # 获取当前观测
            obs = self.vec_env.env.obs()
            
            # 使用adversary生成扰动
            with torch.no_grad():
                adv_input = {'obs': obs, 'is_train': False}
                adv_result = self.adversary_policy(adv_input)
                adv_actions = adv_result['actions']
            
            # 将扰动传递给环境
            self.vec_env.env.set_adversary_disturbance(adv_actions)
        
        return self.vec_env.step(actions)
    
    def reset(self):
        """重置环境"""
        return self.vec_env.reset()
    
    def __getattr__(self, name):
        """代理其他属性到原始vec_env"""
        return getattr(self.vec_env, name)

