"""
RARL (Robust Adversarial Reinforcement Learning) 训练入口

使用方法:
    训练: python run_rarl.py train <env_name> [options]
    测试: python run_rarl.py test <env_name> [options]

示例:
    python run_rarl.py train fixed_circle_iwd --epochs 2000 --num-parallel 10000
    python run_rarl.py test fixed_circle_iwd --checkpoint runs/rarl_xxx/nn/rarl_epoch_1000.pth
"""

import os
import sys
file_path = os.path.dirname(__file__)
sys.path.append(os.path.join(file_path, "rl_games"))
sys.path.append(os.path.join(file_path, "xcar-simulation"))

from rl_games.common import env_configurations, vecenv
from rl_games.torch_runner import Runner
from utils.rlgame_utils import RLGPUEnv, RLGPUAlgoObserver
from envs.fixed_circle_iwd import FixedCircleIWDEnv
from envs.eight_drift_iwd import EightDriftIWDEnv
from envs.state_tracker_iwd import StateTrackerIWDEnv
from envs.continuous_drift_iwd import ContinuousDriftIWDEnv
from envs.singlelane import SingleLaneChangeStabilityIWDEnv
from envs.lane import LaneChangeStabilityIWDEnv
from envs.moose import MooseIWDEnv

import yaml
import argparse
import glob
import pprint
from datetime import datetime
from icecream import ic

ic.configureOutput(argToStringFunction=lambda x: pprint.pformat(x, sort_dicts=False))

# ============ 命令行参数 ============
parser = argparse.ArgumentParser(description="RARL训练与测试")
parser.add_argument("train_or_test", type=str, help="train 或 test")
parser.add_argument("env", type=str, help="环境名称")
parser.add_argument("--car-preset", type=str, default="tesla_model_3")
parser.add_argument("--device", type=str, default='cuda:0')
parser.add_argument("--dt", type=float, default=0.01)
parser.add_argument("--rnn", action='store_true', help="使用RNN网络")
parser.add_argument("--seed", type=int, default=2025)
parser.add_argument("--exp-name", type=str, default="default")
parser.add_argument("--epochs", type=int, default=6000)
parser.add_argument("--num-parallel", type=int, default=70000)
parser.add_argument("--lr-schedule", type=str, default="adaptive")
parser.add_argument("--mini-epochs", type=int, default=5)
parser.add_argument("--gamma", type=float, default=0.99)
parser.add_argument("--horizon", type=int, default=200)
parser.add_argument("--save-freq", type=int, default=50)
parser.add_argument("--checkpoint", type=str, default=None)
parser.add_argument("--resume", action='store_true')

# RARL特有参数
parser.add_argument("--n-pro-itr", type=int, default=1, help="每轮protagonist训练迭代数")
parser.add_argument("--n-adv-itr", type=int, default=1, help="每轮adversary训练迭代数")
parser.add_argument("--adv-scale", type=float, default=1.0, help="对抗奖励缩放")

args = parser.parse_args()

# ============ 环境配置 ============
def get_num_parallel():
    if args.train_or_test == "train":
        return args.num_parallel
    else:
        return 20

envs = {
    "fixed_circle_iwd": lambda **kwargs: FixedCircleIWDEnv(args.car_preset, get_num_parallel(), args.device, **kwargs),
    "eight_drift_iwd": lambda **kwargs: EightDriftIWDEnv(args.car_preset, get_num_parallel(), args.device, **kwargs),
    "state_tracker_iwd": lambda **kwargs: StateTrackerIWDEnv(args.car_preset, get_num_parallel(), args.device, **kwargs),
    "continuous_drift_iwd": lambda **kwargs: ContinuousDriftIWDEnv(args.car_preset, get_num_parallel(), args.device, "hybrid", **kwargs),
    "singlelane": lambda **kwargs: SingleLaneChangeStabilityIWDEnv(args.car_preset, get_num_parallel(), args.device, **kwargs),
    "lane": lambda **kwargs: LaneChangeStabilityIWDEnv(args.car_preset, get_num_parallel(), args.device, **kwargs),
    "moose": lambda **kwargs: MooseIWDEnv(args.car_preset, get_num_parallel(), args.device, **kwargs),
}

# 默认环境配置
default_env_config = {
    "dt": args.dt,
    "disturbance_param": None,  # RARL模式下不使用随机扰动
    "randomize_param": {},
    "random_seed": args.seed,
    "train": (args.train_or_test == "train"),
    "rarl_mode": True,  # 启用RARL模式
}

# ============ 注册环境 ============
blacklist_keys = lambda d, blacklist: {k: d[k] for k in d if not (k in blacklist)}
vecenv.register('RLGPU',
                lambda config_name, num_actors, **kwargs: RLGPUEnv(config_name, num_actors, **kwargs))
env_configurations.register('rlgpu', {
    'vecenv_type': 'RLGPU',
    'env_creator': lambda **env_config: envs[args.env](
        **blacklist_keys(default_env_config, env_config.keys()),
        **env_config,
    ),
})

# ============ 加载RARL配置 ============
runner = Runner(RLGPUAlgoObserver())
with open(os.path.join(file_path, "rarl_config.yaml")) as f:
    runner_config = yaml.safe_load(f)

# 构建实验名称
full_experiment_name = f"rarl_{args.env}_{args.exp_name}"
if args.train_or_test == "train":
    timestamp = datetime.now().strftime("%m%d_%H%M")
    full_experiment_name = f"{full_experiment_name}_{timestamp}"

# 更新配置
runner_config["params"]["seed"] = args.seed
runner_config["params"]["config"]["num_actors"] = args.num_parallel
runner_config["params"]["config"]["max_epochs"] = args.epochs
runner_config["params"]["config"]["minibatch_size"] = args.num_parallel
runner_config["params"]["config"]["mini_epochs"] = args.mini_epochs
runner_config["params"]["config"]["lr_schedule"] = args.lr_schedule
runner_config["params"]["config"]["gamma"] = args.gamma
runner_config["params"]["config"]["horizon_length"] = args.horizon
runner_config["params"]["config"]["name"] = args.env
runner_config["params"]["config"]["full_experiment_name"] = full_experiment_name
runner_config["params"]["config"]["save_frequency"] = args.save_freq
runner_config["params"]["config"]["device_name"] = args.device
runner_config["params"]["config"]["device"] = args.device

# RARL特有配置
runner_config["params"]["config"]["n_pro_itr"] = args.n_pro_itr
runner_config["params"]["config"]["n_adv_itr"] = args.n_adv_itr
runner_config["params"]["config"]["adv_reward_scale"] = args.adv_scale
runner_config["params"]["config"]["rarl_mode"] = True

# 传递给环境
default_env_config["full_experiment_name"] = full_experiment_name

if not args.rnn:
    runner_config["params"]["network"].pop("rnn", None)

runner.load(runner_config)

# ============ 主程序 ============
if __name__ == "__main__":
    print("\n" + "=" * 60)
    print("RARL (Robust Adversarial Reinforcement Learning)")
    print("=" * 60)
    print(f"环境: {args.env}")
    print(f"模式: {args.train_or_test}")
    print(f"Protagonist迭代数/轮: {args.n_pro_itr}")
    print(f"Adversary迭代数/轮: {args.n_adv_itr}")
    print(f"并行环境数: {args.num_parallel}")
    print("=" * 60 + "\n")
    
    if args.train_or_test == "train":
        train_config = {'train': True}
        
        if args.resume or args.checkpoint:
            if args.checkpoint:
                checkpoint_name = args.checkpoint
            else:
                checkpoint_dir = f"runs/{full_experiment_name}/nn"
                checkpoint_name = f"{checkpoint_dir}/{args.env}.pth"
            print(f"从checkpoint恢复训练: {checkpoint_name}")
            train_config['checkpoint'] = checkpoint_name
        
        runner.run(train_config)
        
    elif args.train_or_test == "test":
        if args.checkpoint:
            checkpoint_name = args.checkpoint
        else:
            checkpoint_dir = f"runs/{full_experiment_name}/nn"
            checkpoint_name = f"{checkpoint_dir}/{args.env}.pth"
        
        print(f"加载checkpoint: {checkpoint_name}")
        runner.run({
            'train': False,
            'play': True,
            'checkpoint': checkpoint_name,
        })

