PratikSahu's picture
First Push
4c16504
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.1321903467178345,
"min": 1.1321903467178345,
"max": 2.873920440673828,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10872.423828125,
"min": 10872.423828125,
"max": 29431.8203125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.436102867126465,
"min": 0.45559248328208923,
"max": 12.436102867126465,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2425.0400390625,
"min": 88.38494110107422,
"max": 2497.45263671875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06429133673744075,
"min": 0.06429133673744075,
"max": 0.07345170655963895,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.257165346949763,
"min": 0.257165346949763,
"max": 0.3648258062283846,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20680761183885968,
"min": 0.10510232823663482,
"max": 0.27600525378012186,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8272304473554387,
"min": 0.42040931294653927,
"max": 1.3800262689006093,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.363636363636363,
"min": 3.1363636363636362,
"max": 25.363636363636363,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1116.0,
"min": 138.0,
"max": 1355.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.363636363636363,
"min": 3.1363636363636362,
"max": 25.363636363636363,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1116.0,
"min": 138.0,
"max": 1355.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1695331264",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1695331828"
},
"total": 563.335585581,
"count": 1,
"self": 0.8869743089999247,
"children": {
"run_training.setup": {
"total": 0.07341583400000218,
"count": 1,
"self": 0.07341583400000218
},
"TrainerController.start_learning": {
"total": 562.3751954380001,
"count": 1,
"self": 0.7276764309995087,
"children": {
"TrainerController._reset_env": {
"total": 4.314881501999935,
"count": 1,
"self": 4.314881501999935
},
"TrainerController.advance": {
"total": 557.0009233960005,
"count": 18213,
"self": 0.3981985970002597,
"children": {
"env_step": {
"total": 556.6027247990003,
"count": 18213,
"self": 407.93795377498463,
"children": {
"SubprocessEnvManager._take_step": {
"total": 148.298126798015,
"count": 18213,
"self": 2.29144132401791,
"children": {
"TorchPolicy.evaluate": {
"total": 146.0066854739971,
"count": 18213,
"self": 146.0066854739971
}
}
},
"workers": {
"total": 0.3666442260006306,
"count": 18213,
"self": 0.0,
"children": {
"worker_root": {
"total": 560.0245748469985,
"count": 18213,
"is_parallel": true,
"self": 257.94107450198874,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006042525999987447,
"count": 1,
"is_parallel": true,
"self": 0.004329981000068983,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017125449999184639,
"count": 10,
"is_parallel": true,
"self": 0.0017125449999184639
}
}
},
"UnityEnvironment.step": {
"total": 0.04576094699996247,
"count": 1,
"is_parallel": true,
"self": 0.0005194730000539494,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000602358000037384,
"count": 1,
"is_parallel": true,
"self": 0.000602358000037384
},
"communicator.exchange": {
"total": 0.041051537999919674,
"count": 1,
"is_parallel": true,
"self": 0.041051537999919674
},
"steps_from_proto": {
"total": 0.0035875779999514634,
"count": 1,
"is_parallel": true,
"self": 0.000461780999671646,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0031257970002798174,
"count": 10,
"is_parallel": true,
"self": 0.0031257970002798174
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 302.0835003450097,
"count": 18212,
"is_parallel": true,
"self": 12.230056430024888,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.234582470983696,
"count": 18212,
"is_parallel": true,
"self": 6.234582470983696
},
"communicator.exchange": {
"total": 239.2423680279992,
"count": 18212,
"is_parallel": true,
"self": 239.2423680279992
},
"steps_from_proto": {
"total": 44.37649341600195,
"count": 18212,
"is_parallel": true,
"self": 8.751798726948323,
"children": {
"_process_rank_one_or_two_observation": {
"total": 35.62469468905363,
"count": 182120,
"is_parallel": true,
"self": 35.62469468905363
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001961150001079659,
"count": 1,
"self": 0.0001961150001079659,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 552.297722449933,
"count": 529372,
"is_parallel": true,
"self": 13.296027070877471,
"children": {
"process_trajectory": {
"total": 305.4635409730554,
"count": 529372,
"is_parallel": true,
"self": 304.3317492590554,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1317917139999736,
"count": 4,
"is_parallel": true,
"self": 1.1317917139999736
}
}
},
"_update_policy": {
"total": 233.5381544060001,
"count": 90,
"is_parallel": true,
"self": 93.90035019499669,
"children": {
"TorchPPOOptimizer.update": {
"total": 139.6378042110034,
"count": 4587,
"is_parallel": true,
"self": 139.6378042110034
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.33151799400002346,
"count": 1,
"self": 0.001615313000002061,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3299026810000214,
"count": 1,
"self": 0.3299026810000214
}
}
}
}
}
}
}