qgallouedec's picture
qgallouedec HF staff
Upload . with huggingface_hub
fd09b37
{
"help": false,
"algo": "APPO",
"env": "peg-unplug-side-v2",
"experiment": "peg-unplug-side-v2",
"train_dir": "/home/qgallouedec/gia/data/envs/metaworld/train_dir",
"restart_behavior": "restart",
"device": "cpu",
"seed": null,
"num_policies": 1,
"async_rl": false,
"serial_mode": false,
"batched_sampling": false,
"num_batches_to_accumulate": 2,
"worker_num_splits": 2,
"policy_workers_per_policy": 1,
"max_policy_lag": 1000,
"num_workers": 8,
"num_envs_per_worker": 8,
"batch_size": 1024,
"num_batches_per_epoch": 4,
"num_epochs": 2,
"rollout": 64,
"recurrence": 1,
"shuffle_minibatches": false,
"gamma": 0.99,
"reward_scale": 0.1,
"reward_clip": 1000.0,
"value_bootstrap": true,
"normalize_returns": true,
"exploration_loss_coeff": 0.0,
"value_loss_coeff": 1.3,
"kl_loss_coeff": 0.1,
"exploration_loss": "entropy",
"gae_lambda": 0.95,
"ppo_clip_ratio": 0.2,
"ppo_clip_value": 1.0,
"with_vtrace": false,
"vtrace_rho": 1.0,
"vtrace_c": 1.0,
"optimizer": "adam",
"adam_eps": 1e-06,
"adam_beta1": 0.9,
"adam_beta2": 0.999,
"max_grad_norm": 3.5,
"learning_rate": 0.00295,
"lr_schedule": "linear_decay",
"lr_schedule_kl_threshold": 0.008,
"lr_adaptive_min": 1e-06,
"lr_adaptive_max": 0.01,
"obs_subtract_mean": 0.0,
"obs_scale": 1.0,
"normalize_input": true,
"normalize_input_keys": null,
"decorrelate_experience_max_seconds": 0,
"decorrelate_envs_on_one_worker": true,
"actor_worker_gpus": [],
"set_workers_cpu_affinity": true,
"force_envs_single_thread": false,
"default_niceness": 0,
"log_to_file": true,
"experiment_summaries_interval": 3,
"flush_summaries_interval": 30,
"stats_avg": 100,
"summaries_use_frameskip": true,
"heartbeat_interval": 20,
"heartbeat_reporting_interval": 180,
"train_for_env_steps": 100000000,
"train_for_seconds": 10000000000,
"save_every_sec": 15,
"keep_checkpoints": 2,
"load_checkpoint_kind": "latest",
"save_milestones_sec": -1,
"save_best_every_sec": 5,
"save_best_metric": "reward",
"save_best_after": 100000,
"benchmark": false,
"encoder_mlp_layers": [
64,
64
],
"encoder_conv_architecture": "convnet_simple",
"encoder_conv_mlp_layers": [
512
],
"use_rnn": false,
"rnn_size": 512,
"rnn_type": "gru",
"rnn_num_layers": 1,
"decoder_mlp_layers": [],
"nonlinearity": "tanh",
"policy_initialization": "torch_default",
"policy_init_gain": 1.0,
"actor_critic_share_weights": true,
"adaptive_stddev": false,
"continuous_tanh_scale": 0.0,
"initial_stddev": 1.0,
"use_env_info_cache": false,
"env_gpu_actions": false,
"env_gpu_observations": true,
"env_frameskip": 1,
"env_framestack": 1,
"pixel_format": "CHW",
"use_record_episode_statistics": false,
"with_wandb": true,
"wandb_user": "qgallouedec",
"wandb_project": "sample_facotry_metaworld",
"wandb_group": null,
"wandb_job_type": "SF",
"wandb_tags": [],
"with_pbt": false,
"pbt_mix_policies_in_one_env": true,
"pbt_period_env_steps": 5000000,
"pbt_start_mutation": 20000000,
"pbt_replace_fraction": 0.3,
"pbt_mutation_rate": 0.15,
"pbt_replace_reward_gap": 0.1,
"pbt_replace_reward_gap_absolute": 1e-06,
"pbt_optimize_gamma": false,
"pbt_target_objective": "true_objective",
"pbt_perturb_min": 1.1,
"pbt_perturb_max": 1.5,
"command_line": "--env peg-unplug-side-v2 --experiment peg-unplug-side-v2 --with_wandb True --wandb_user qgallouedec --wandb_project sample_facotry_metaworld",
"cli_args": {
"env": "peg-unplug-side-v2",
"experiment": "peg-unplug-side-v2",
"with_wandb": true,
"wandb_user": "qgallouedec",
"wandb_project": "sample_facotry_metaworld"
},
"git_hash": "2bb8d8c9cd813ffeafaad42038b1e71364092d36",
"git_repo_name": "https://github.com/huggingface/gia",
"wandb_unique_id": "peg-unplug-side-v2_20230309_112700_413212"
}