LLParallax's picture
Upload folder using huggingface_hub
673f32c
{
"help": false,
"algo": "APPO",
"env": "nethack_challenge",
"experiment": "default_experiment",
"train_dir": "/net/tscratch/people/plgbartekcupial/mrunner_scratch/sample_factory_nethack/03_01-17_00-vigilant_hodgkin/2023-12-28-monk-appo_4m9d_2/train_dir",
"restart_behavior": "resume",
"device": "gpu",
"seed": 2,
"num_policies": 1,
"async_rl": true,
"serial_mode": false,
"batched_sampling": false,
"num_batches_to_accumulate": 2,
"worker_num_splits": 2,
"policy_workers_per_policy": 1,
"max_policy_lag": 1000,
"num_workers": 16,
"num_envs_per_worker": 32,
"batch_size": 4096,
"num_batches_per_epoch": 1,
"num_epochs": 1,
"rollout": 32,
"recurrence": 32,
"shuffle_minibatches": false,
"gamma": 0.999,
"reward_scale": 1.0,
"reward_clip": 10.0,
"value_bootstrap": false,
"normalize_returns": true,
"exploration_loss_coeff": 0.001,
"value_loss_coeff": 1.0,
"kl_loss_coeff": 0.0,
"exploration_loss": "entropy",
"gae_lambda": 1.0,
"ppo_clip_ratio": 0.1,
"ppo_clip_value": 1.0,
"with_vtrace": false,
"vtrace_rho": 1.0,
"vtrace_c": 1.0,
"optimizer": "adam",
"adam_eps": 1e-07,
"adam_beta1": 0.9,
"adam_beta2": 0.999,
"max_grad_norm": 4,
"learning_rate": 0.0001,
"lr_schedule": "constant",
"lr_schedule_kl_threshold": 0.008,
"lr_adaptive_min": 1e-06,
"lr_adaptive_max": 0.01,
"obs_subtract_mean": 0.0,
"obs_scale": 1.0,
"normalize_input": false,
"normalize_input_keys": null,
"decorrelate_experience_max_seconds": 0,
"decorrelate_envs_on_one_worker": true,
"actor_worker_gpus": [],
"set_workers_cpu_affinity": true,
"force_envs_single_thread": false,
"default_niceness": 0,
"log_to_file": true,
"experiment_summaries_interval": 50,
"flush_summaries_interval": 30,
"stats_avg": 100,
"summaries_use_frameskip": true,
"heartbeat_interval": 20,
"heartbeat_reporting_interval": 180,
"train_for_env_steps": 2000000000,
"train_for_seconds": 10000000000,
"save_every_sec": 120,
"keep_checkpoints": 2,
"load_checkpoint_kind": "latest",
"save_milestones_sec": -1,
"save_best_every_sec": 5,
"save_best_metric": "reward",
"save_best_after": 100000,
"benchmark": false,
"encoder_mlp_layers": [
512,
512
],
"encoder_conv_architecture": "convnet_simple",
"encoder_conv_mlp_layers": [
512
],
"use_rnn": true,
"rnn_size": 512,
"rnn_type": "lstm",
"rnn_num_layers": 1,
"decoder_mlp_layers": [],
"nonlinearity": "relu",
"policy_initialization": "orthogonal",
"policy_init_gain": 1.0,
"actor_critic_share_weights": true,
"adaptive_stddev": false,
"continuous_tanh_scale": 0.0,
"initial_stddev": 1.0,
"use_env_info_cache": false,
"env_gpu_actions": false,
"env_gpu_observations": true,
"env_frameskip": 1,
"env_framestack": 1,
"pixel_format": "CHW",
"use_record_episode_statistics": false,
"episode_counter": false,
"with_wandb": true,
"wandb_user": "bartekcupial",
"wandb_project": "sample_factory_nethack",
"wandb_group": "gmum",
"wandb_job_type": "SF",
"wandb_tags": [],
"with_pbt": false,
"pbt_mix_policies_in_one_env": true,
"pbt_period_env_steps": 5000000,
"pbt_start_mutation": 20000000,
"pbt_replace_fraction": 0.3,
"pbt_mutation_rate": 0.15,
"pbt_replace_reward_gap": 0.1,
"pbt_replace_reward_gap_absolute": 1e-06,
"pbt_optimize_gamma": false,
"pbt_target_objective": "true_objective",
"pbt_perturb_min": 1.1,
"pbt_perturb_max": 1.5,
"character": "mon-hum-neu-mal",
"max_episode_steps": 100000,
"penalty_step": 0.0,
"penalty_time": 0.0,
"fn_penalty_step": "constant",
"savedir": null,
"save_ttyrec_every": 0,
"add_image_observation": true,
"crop_dim": 18,
"pixel_size": 6,
"use_prev_action": true,
"use_tty_only": true,
"model": "ChaoticDwarvenGPT5",
"add_stats_to_info": true,
"command_line": "--env=nethack_challenge --train_for_env_steps=2000000000 --character=mon-hum-neu-mal --num_workers=16 --num_envs_per_worker=32 --worker_num_splits=2 --rollout=32 --batch_size=4096 --async_rl=True --serial_mode=False --wandb_user=bartekcupial --wandb_project=sample_factory_nethack --wandb_group=gmum --with_wandb=True --use_prev_action=True --model=ChaoticDwarvenGPT5 --rnn_size=512 --seed=2",
"cli_args": {
"env": "nethack_challenge",
"seed": 2,
"async_rl": true,
"serial_mode": false,
"worker_num_splits": 2,
"num_workers": 16,
"num_envs_per_worker": 32,
"batch_size": 4096,
"rollout": 32,
"train_for_env_steps": 2000000000,
"rnn_size": 512,
"with_wandb": true,
"wandb_user": "bartekcupial",
"wandb_project": "sample_factory_nethack",
"wandb_group": "gmum",
"character": "mon-hum-neu-mal",
"use_prev_action": true,
"model": "ChaoticDwarvenGPT5"
},
"git_hash": "unknown",
"git_repo_name": "not a git repository",
"wandb_unique_id": "default_experiment_20240103_191716_696528"
}