johnnyf's picture
First Push
fb31f1f verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7846235036849976,
"min": 0.7618783116340637,
"max": 2.864053964614868,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7508.8466796875,
"min": 7508.8466796875,
"max": 29362.28125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.84020709991455,
"min": 0.3779557943344116,
"max": 12.84020709991455,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2503.84033203125,
"min": 73.32342529296875,
"max": 2608.18115234375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06473177885017517,
"min": 0.06276025597146466,
"max": 0.07261159207315787,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2589271154007007,
"min": 0.25104102388585864,
"max": 0.358244339856088,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.17273082971280695,
"min": 0.13480677596731658,
"max": 0.28861139422538235,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.6909233188512278,
"min": 0.5392271038692663,
"max": 1.4430569711269117,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.204545454545453,
"min": 3.8863636363636362,
"max": 25.204545454545453,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1109.0,
"min": 171.0,
"max": 1385.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.204545454545453,
"min": 3.8863636363636362,
"max": 25.204545454545453,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1109.0,
"min": 171.0,
"max": 1385.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1715860075",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1715860630"
},
"total": 555.0220534779996,
"count": 1,
"self": 0.536615528999846,
"children": {
"run_training.setup": {
"total": 0.07452605299977222,
"count": 1,
"self": 0.07452605299977222
},
"TrainerController.start_learning": {
"total": 554.410911896,
"count": 1,
"self": 0.7744667189949723,
"children": {
"TrainerController._reset_env": {
"total": 3.7246600450002916,
"count": 1,
"self": 3.7246600450002916
},
"TrainerController.advance": {
"total": 549.8179748370048,
"count": 18201,
"self": 0.393810120966009,
"children": {
"env_step": {
"total": 549.4241647160388,
"count": 18201,
"self": 426.3384825630601,
"children": {
"SubprocessEnvManager._take_step": {
"total": 122.69548642500013,
"count": 18201,
"self": 2.0682653260596453,
"children": {
"TorchPolicy.evaluate": {
"total": 120.62722109894048,
"count": 18201,
"self": 120.62722109894048
}
}
},
"workers": {
"total": 0.39019572797860747,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 552.6499505940651,
"count": 18201,
"is_parallel": true,
"self": 258.8022968720443,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00972464200003742,
"count": 1,
"is_parallel": true,
"self": 0.0071664559995952,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0025581860004422197,
"count": 10,
"is_parallel": true,
"self": 0.0025581860004422197
}
}
},
"UnityEnvironment.step": {
"total": 0.05377156999975341,
"count": 1,
"is_parallel": true,
"self": 0.0008719060001567414,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004811069998140738,
"count": 1,
"is_parallel": true,
"self": 0.0004811069998140738
},
"communicator.exchange": {
"total": 0.04167233299995132,
"count": 1,
"is_parallel": true,
"self": 0.04167233299995132
},
"steps_from_proto": {
"total": 0.010746223999831273,
"count": 1,
"is_parallel": true,
"self": 0.004298016999655374,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.006448207000175898,
"count": 10,
"is_parallel": true,
"self": 0.006448207000175898
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 293.8476537220208,
"count": 18200,
"is_parallel": true,
"self": 13.97859301295648,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.039085694009373,
"count": 18200,
"is_parallel": true,
"self": 7.039085694009373
},
"communicator.exchange": {
"total": 230.8799974560311,
"count": 18200,
"is_parallel": true,
"self": 230.8799974560311
},
"steps_from_proto": {
"total": 41.949977559023864,
"count": 18200,
"is_parallel": true,
"self": 8.3272029401096,
"children": {
"_process_rank_one_or_two_observation": {
"total": 33.622774618914264,
"count": 182000,
"is_parallel": true,
"self": 33.622774618914264
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00016208299985009944,
"count": 1,
"self": 0.00016208299985009944,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 543.2737291329095,
"count": 738559,
"is_parallel": true,
"self": 16.253693395718983,
"children": {
"process_trajectory": {
"total": 295.62095216218904,
"count": 738559,
"is_parallel": true,
"self": 294.55958947318913,
"children": {
"RLTrainer._checkpoint": {
"total": 1.0613626889999068,
"count": 4,
"is_parallel": true,
"self": 1.0613626889999068
}
}
},
"_update_policy": {
"total": 231.39908357500144,
"count": 90,
"is_parallel": true,
"self": 66.3568457880042,
"children": {
"TorchPPOOptimizer.update": {
"total": 165.04223778699725,
"count": 4587,
"is_parallel": true,
"self": 165.04223778699725
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.0936482120000619,
"count": 1,
"self": 0.0011828290002995345,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09246538299976237,
"count": 1,
"self": 0.09246538299976237
}
}
}
}
}
}
}