kaliputra's picture
First Push
0e97672
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9016520380973816,
"min": 0.9016520380973816,
"max": 2.8693056106567383,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8608.9736328125,
"min": 8608.9736328125,
"max": 29416.12109375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.826777458190918,
"min": 0.3271358013153076,
"max": 12.826777458190918,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2501.2216796875,
"min": 63.46434783935547,
"max": 2589.490234375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07645784874534464,
"min": 0.06286412527734486,
"max": 0.07645784874534464,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.30583139498137857,
"min": 0.2637896755911118,
"max": 0.3669677871172704,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1878687548725044,
"min": 0.0973890304859892,
"max": 0.29223428169886273,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7514750194900176,
"min": 0.3895561219439568,
"max": 1.419357762909403,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.40909090909091,
"min": 2.840909090909091,
"max": 25.40909090909091,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1118.0,
"min": 125.0,
"max": 1379.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.40909090909091,
"min": 2.840909090909091,
"max": 25.40909090909091,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1118.0,
"min": 125.0,
"max": 1379.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678793473",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1678793941"
},
"total": 468.58006868100006,
"count": 1,
"self": 0.3866525820000106,
"children": {
"run_training.setup": {
"total": 0.10240821000002143,
"count": 1,
"self": 0.10240821000002143
},
"TrainerController.start_learning": {
"total": 468.091007889,
"count": 1,
"self": 0.5264990209993243,
"children": {
"TrainerController._reset_env": {
"total": 9.646890308000025,
"count": 1,
"self": 9.646890308000025
},
"TrainerController.advance": {
"total": 457.7695896250007,
"count": 18201,
"self": 0.28168129699594147,
"children": {
"env_step": {
"total": 457.4879083280048,
"count": 18201,
"self": 326.40866774399575,
"children": {
"SubprocessEnvManager._take_step": {
"total": 130.8090000780017,
"count": 18201,
"self": 2.332553960997245,
"children": {
"TorchPolicy.evaluate": {
"total": 128.47644611700446,
"count": 18201,
"self": 128.47644611700446
}
}
},
"workers": {
"total": 0.2702405060073261,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 466.68930709701107,
"count": 18201,
"is_parallel": true,
"self": 224.62918766201454,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0049110740000060105,
"count": 1,
"is_parallel": true,
"self": 0.003349884000044767,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015611899999612433,
"count": 10,
"is_parallel": true,
"self": 0.0015611899999612433
}
}
},
"UnityEnvironment.step": {
"total": 0.048371696000003794,
"count": 1,
"is_parallel": true,
"self": 0.0005747390000578889,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00031268800000816555,
"count": 1,
"is_parallel": true,
"self": 0.00031268800000816555
},
"communicator.exchange": {
"total": 0.045541315999969356,
"count": 1,
"is_parallel": true,
"self": 0.045541315999969356
},
"steps_from_proto": {
"total": 0.0019429529999683837,
"count": 1,
"is_parallel": true,
"self": 0.0003946899999505149,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015482630000178688,
"count": 10,
"is_parallel": true,
"self": 0.0015482630000178688
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 242.06011943499652,
"count": 18200,
"is_parallel": true,
"self": 9.736031111000557,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.315822823995461,
"count": 18200,
"is_parallel": true,
"self": 5.315822823995461
},
"communicator.exchange": {
"total": 194.61940928600114,
"count": 18200,
"is_parallel": true,
"self": 194.61940928600114
},
"steps_from_proto": {
"total": 32.38885621399936,
"count": 18200,
"is_parallel": true,
"self": 6.374146542021492,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.01470967197787,
"count": 182000,
"is_parallel": true,
"self": 26.01470967197787
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00011934899998777837,
"count": 1,
"self": 0.00011934899998777837,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 454.07933884606075,
"count": 411966,
"is_parallel": true,
"self": 10.63137609008686,
"children": {
"process_trajectory": {
"total": 254.94461030497376,
"count": 411966,
"is_parallel": true,
"self": 254.19926587497366,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7453444300001024,
"count": 4,
"is_parallel": true,
"self": 0.7453444300001024
}
}
},
"_update_policy": {
"total": 188.50335245100013,
"count": 90,
"is_parallel": true,
"self": 63.16454122299808,
"children": {
"TorchPPOOptimizer.update": {
"total": 125.33881122800204,
"count": 4587,
"is_parallel": true,
"self": 125.33881122800204
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.14790958599996884,
"count": 1,
"self": 0.0009885880000410907,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14692099799992775,
"count": 1,
"self": 0.14692099799992775
}
}
}
}
}
}
}