D0k-tor's picture
First Push
2392379
raw
history blame
No virus
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9059309363365173,
"min": 0.9059309363365173,
"max": 2.8218741416931152,
"count": 10
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 17927.466796875,
"min": 17927.466796875,
"max": 58045.953125,
"count": 10
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 19992.0,
"max": 199984.0,
"count": 10
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 19992.0,
"max": 199984.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.104545593261719,
"min": 0.6539127826690674,
"max": 13.104545593261719,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 5228.7138671875,
"min": 260.91119384765625,
"max": 5228.7138671875,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 19701.0,
"min": 19701.0,
"max": 19701.0,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06959990624930357,
"min": 0.06547574243621271,
"max": 0.07339285991189422,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.6263991562437321,
"min": 0.5892816819259143,
"max": 0.660535739207048,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20983186302491522,
"min": 0.15744856274524432,
"max": 0.281030580758529,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.888486767224237,
"min": 1.4170370647071988,
"max": 2.529275226826761,
"count": 10
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.6332094556000002e-05,
"min": 1.6332094556000002e-05,
"max": 0.00028363200545599993,
"count": 10
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.000146988851004,
"min": 0.000146988851004,
"max": 0.0025526880491039995,
"count": 10
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10544400000000001,
"min": 0.10544400000000001,
"max": 0.19454400000000002,
"count": 10
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.9489960000000001,
"min": 0.9489960000000001,
"max": 1.7508960000000002,
"count": 10
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00028165560000000005,
"min": 0.00028165560000000005,
"max": 0.0047277456,
"count": 10
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0025349004000000007,
"min": 0.0025349004000000007,
"max": 0.0425497104,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.606060606060606,
"min": 4.808080808080808,
"max": 25.828282828282827,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 2535.0,
"min": 476.0,
"max": 2557.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.606060606060606,
"min": 4.808080808080808,
"max": 25.828282828282827,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 2535.0,
"min": 476.0,
"max": 2557.0,
"count": 10
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678693800",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1678694498"
},
"total": 698.486242081,
"count": 1,
"self": 0.7064504140000736,
"children": {
"run_training.setup": {
"total": 0.2220008109999867,
"count": 1,
"self": 0.2220008109999867
},
"TrainerController.start_learning": {
"total": 697.557790856,
"count": 1,
"self": 1.0026535199973523,
"children": {
"TrainerController._reset_env": {
"total": 6.826679038000009,
"count": 1,
"self": 6.826679038000009
},
"TrainerController.advance": {
"total": 689.5578237660028,
"count": 18202,
"self": 0.5272575480104251,
"children": {
"env_step": {
"total": 689.0305662179924,
"count": 18202,
"self": 556.3805058329876,
"children": {
"SubprocessEnvManager._take_step": {
"total": 132.1503171209991,
"count": 18202,
"self": 5.1227913849939455,
"children": {
"TorchPolicy.evaluate": {
"total": 127.02752573600515,
"count": 18202,
"self": 127.02752573600515
}
}
},
"workers": {
"total": 0.4997432640056445,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 694.8355583180095,
"count": 18202,
"is_parallel": true,
"self": 308.9843949229972,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007334834999994655,
"count": 1,
"is_parallel": true,
"self": 0.0054120850001027065,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019227499998919484,
"count": 10,
"is_parallel": true,
"self": 0.0019227499998919484
}
}
},
"UnityEnvironment.step": {
"total": 0.09686668399996279,
"count": 1,
"is_parallel": true,
"self": 0.0009361209999951825,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005157519999556825,
"count": 1,
"is_parallel": true,
"self": 0.0005157519999556825
},
"communicator.exchange": {
"total": 0.09276901800001269,
"count": 1,
"is_parallel": true,
"self": 0.09276901800001269
},
"steps_from_proto": {
"total": 0.0026457929999992302,
"count": 1,
"is_parallel": true,
"self": 0.000743652000039674,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019021409999595562,
"count": 10,
"is_parallel": true,
"self": 0.0019021409999595562
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 385.8511633950123,
"count": 18201,
"is_parallel": true,
"self": 15.955175952008574,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 8.179617947002725,
"count": 18201,
"is_parallel": true,
"self": 8.179617947002725
},
"communicator.exchange": {
"total": 311.6394226209971,
"count": 18201,
"is_parallel": true,
"self": 311.6394226209971
},
"steps_from_proto": {
"total": 50.0769468750039,
"count": 18201,
"is_parallel": true,
"self": 10.864940795009488,
"children": {
"_process_rank_one_or_two_observation": {
"total": 39.21200607999441,
"count": 182010,
"is_parallel": true,
"self": 39.21200607999441
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0002027819999739222,
"count": 1,
"self": 0.0002027819999739222,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 682.479013100006,
"count": 706317,
"is_parallel": true,
"self": 20.226990178014034,
"children": {
"process_trajectory": {
"total": 373.54394379999155,
"count": 706317,
"is_parallel": true,
"self": 371.38178160099153,
"children": {
"RLTrainer._checkpoint": {
"total": 2.162162199000022,
"count": 4,
"is_parallel": true,
"self": 2.162162199000022
}
}
},
"_update_policy": {
"total": 288.7080791220004,
"count": 90,
"is_parallel": true,
"self": 102.54170750700115,
"children": {
"TorchPPOOptimizer.update": {
"total": 186.16637161499926,
"count": 4587,
"is_parallel": true,
"self": 186.16637161499926
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1704317499999206,
"count": 1,
"self": 0.001631586999906176,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16880016300001444,
"count": 1,
"self": 0.16880016300001444
}
}
}
}
}
}
}