import gymnasium as gym
import torch
import ppo
# 硬件加速配置
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.backends.cudnn.benchmark = True  # 启用CUDA加速

# 超参数配置
config = {
    "n_envs": 12,               # 并行环境数量（建议设置为CPU核心数）
    "total_timesteps": 5_000_00,  # 总训练步数（至少需要500万步）
    "policy_kwargs": {
        "net_arch": {
            "pi": [512, 512],  # 策略网络结构
            "vf": [512, 512]   # 值函数网络结构
        },
        "activation_fn": torch.nn.ReLU
    },
    "learning_rate": 3e-4,
    "batch_size": 4096,         # 匹配GPU显存容量
    "n_steps": 2048,            # 每环境采样步数
    "gamma": 0.99,              # 折扣因子
    "gae_lambda": 0.95,
    "clip_range": 0.2,
    "ent_coef": 0.001,          # 适度鼓励探索
    "target_kl": 0.05,          # KL散度阈值
    "max_grad_norm": 0.5        # 梯度裁剪
}

env = gym.make('Humanoid-v5', ctrl_cost_weight=0.1)

model = ppo.PPO()

# 训练循环
try:
    model.learn(

    )
finally:
    # 保存模型和归一化参数
    model.save("humanoid_walk_ppo")
    env.save("humanoid_vecnormalize.pkl")
    env.close()