Johnlhugface's picture
First Push
2615070
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0354397296905518,
"min": 1.0354397296905518,
"max": 2.872195243835449,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9920.5478515625,
"min": 9920.5478515625,
"max": 29508.93359375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.881795883178711,
"min": 0.30320730805397034,
"max": 12.881795883178711,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2511.9501953125,
"min": 58.82221984863281,
"max": 2596.1640625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07007416399731073,
"min": 0.06162457402074234,
"max": 0.07567545332715905,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2802966559892429,
"min": 0.24649829608296936,
"max": 0.36234031834322317,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2051412756828701,
"min": 0.12759438287877642,
"max": 0.2860072107876049,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8205651027314804,
"min": 0.5103775315151057,
"max": 1.4300360539380244,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.75,
"min": 3.5681818181818183,
"max": 25.75,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1133.0,
"min": 157.0,
"max": 1386.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.75,
"min": 3.5681818181818183,
"max": 25.75,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1133.0,
"min": 157.0,
"max": 1386.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1700312461",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1700312839"
},
"total": 377.41094918799996,
"count": 1,
"self": 0.32144988599998214,
"children": {
"run_training.setup": {
"total": 0.049707328000010875,
"count": 1,
"self": 0.049707328000010875
},
"TrainerController.start_learning": {
"total": 377.03979197399997,
"count": 1,
"self": 0.4800627259990051,
"children": {
"TrainerController._reset_env": {
"total": 9.467420834999984,
"count": 1,
"self": 9.467420834999984
},
"TrainerController.advance": {
"total": 367.00959616700106,
"count": 18203,
"self": 0.21976895799548402,
"children": {
"env_step": {
"total": 366.7898272090056,
"count": 18203,
"self": 249.92038940700155,
"children": {
"SubprocessEnvManager._take_step": {
"total": 116.64752890500364,
"count": 18203,
"self": 1.2135913309909938,
"children": {
"TorchPolicy.evaluate": {
"total": 115.43393757401265,
"count": 18203,
"self": 115.43393757401265
}
}
},
"workers": {
"total": 0.22190889700038952,
"count": 18203,
"self": 0.0,
"children": {
"worker_root": {
"total": 376.3803241360046,
"count": 18203,
"is_parallel": true,
"self": 192.84537577300688,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00488973299997042,
"count": 1,
"is_parallel": true,
"self": 0.0033515049999550683,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015382280000153514,
"count": 10,
"is_parallel": true,
"self": 0.0015382280000153514
}
}
},
"UnityEnvironment.step": {
"total": 0.026894056999935856,
"count": 1,
"is_parallel": true,
"self": 0.0003840449999188422,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003190590000485827,
"count": 1,
"is_parallel": true,
"self": 0.0003190590000485827
},
"communicator.exchange": {
"total": 0.024967053999944255,
"count": 1,
"is_parallel": true,
"self": 0.024967053999944255
},
"steps_from_proto": {
"total": 0.0012238990000241756,
"count": 1,
"is_parallel": true,
"self": 0.00025634999974499806,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009675490002791776,
"count": 10,
"is_parallel": true,
"self": 0.0009675490002791776
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 183.53494836299774,
"count": 18202,
"is_parallel": true,
"self": 7.071432302009384,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 3.691474815994411,
"count": 18202,
"is_parallel": true,
"self": 3.691474815994411
},
"communicator.exchange": {
"total": 150.05216554099195,
"count": 18202,
"is_parallel": true,
"self": 150.05216554099195
},
"steps_from_proto": {
"total": 22.719875704001993,
"count": 18202,
"is_parallel": true,
"self": 4.393616855944515,
"children": {
"_process_rank_one_or_two_observation": {
"total": 18.326258848057478,
"count": 182020,
"is_parallel": true,
"self": 18.326258848057478
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001539309999998295,
"count": 1,
"self": 0.0001539309999998295,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 366.1313267459508,
"count": 238297,
"is_parallel": true,
"self": 3.1432667489453934,
"children": {
"process_trajectory": {
"total": 209.04659103500546,
"count": 238297,
"is_parallel": true,
"self": 208.50515498500556,
"children": {
"RLTrainer._checkpoint": {
"total": 0.541436049999902,
"count": 4,
"is_parallel": true,
"self": 0.541436049999902
}
}
},
"_update_policy": {
"total": 153.94146896199993,
"count": 90,
"is_parallel": true,
"self": 35.80477726899801,
"children": {
"TorchPPOOptimizer.update": {
"total": 118.13669169300192,
"count": 4587,
"is_parallel": true,
"self": 118.13669169300192
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.08255831499991473,
"count": 1,
"self": 0.000824247999958061,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08173406699995667,
"count": 1,
"self": 0.08173406699995667
}
}
}
}
}
}
}