MLAgents-3DBall / run_logs /timers.json
infinitejoy's picture
default parameters training
44ef782
{
"name": "root",
"gauges": {
"3DBall.Policy.Entropy.mean": {
"value": 1.2530262470245361,
"min": 1.249975562095642,
"max": 1.4189382791519165,
"count": 41
},
"3DBall.Policy.Entropy.sum": {
"value": 15036.3154296875,
"min": 14999.70703125,
"max": 19505.73046875,
"count": 41
},
"3DBall.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 22.092130518234164,
"max": 999.0,
"count": 41
},
"3DBall.Environment.EpisodeLength.sum": {
"value": 11988.0,
"min": 11422.0,
"max": 12761.0,
"count": 41
},
"3DBall.Step.mean": {
"value": 491224.0,
"min": 11989.0,
"max": 491224.0,
"count": 41
},
"3DBall.Step.sum": {
"value": 491224.0,
"min": 11989.0,
"max": 491224.0,
"count": 41
},
"3DBall.Policy.ExtrinsicValueEstimate.mean": {
"value": 9.998326301574707,
"min": -0.21952161192893982,
"max": 10.004788398742676,
"count": 41
},
"3DBall.Policy.ExtrinsicValueEstimate.sum": {
"value": 119.97991180419922,
"min": -114.15123748779297,
"max": 221.3089599609375,
"count": 41
},
"3DBall.Environment.CumulativeReward.mean": {
"value": 100.00001525878906,
"min": 1.205576880161579,
"max": 100.00001525878906,
"count": 41
},
"3DBall.Environment.CumulativeReward.sum": {
"value": 1200.0001831054688,
"min": 626.899977684021,
"max": 1243.4001913070679,
"count": 41
},
"3DBall.Policy.ExtrinsicReward.mean": {
"value": 100.00001525878906,
"min": 1.205576880161579,
"max": 100.00001525878906,
"count": 41
},
"3DBall.Policy.ExtrinsicReward.sum": {
"value": 1200.0001831054688,
"min": 626.899977684021,
"max": 1243.4001913070679,
"count": 41
},
"3DBall.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 41
},
"3DBall.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 41
},
"3DBall.Losses.PolicyLoss.mean": {
"value": 0.0875553363431031,
"min": 0.0853438666777715,
"max": 0.10285655303845986,
"count": 38
},
"3DBall.Losses.PolicyLoss.sum": {
"value": 0.0875553363431031,
"min": 0.0853438666777715,
"max": 0.10285655303845986,
"count": 38
},
"3DBall.Losses.ValueLoss.mean": {
"value": 0.003986369480576965,
"min": 0.0011322325467241375,
"max": 3.11412584240732,
"count": 38
},
"3DBall.Losses.ValueLoss.sum": {
"value": 0.003986369480576965,
"min": 0.0011322325467241375,
"max": 3.11412584240732,
"count": 38
},
"3DBall.Policy.LearningRate.mean": {
"value": 1.006569664480001e-05,
"min": 1.006569664480001e-05,
"max": 0.0002927814024062,
"count": 38
},
"3DBall.Policy.LearningRate.sum": {
"value": 1.006569664480001e-05,
"min": 1.006569664480001e-05,
"max": 0.0002927814024062,
"count": 38
},
"3DBall.Policy.Epsilon.mean": {
"value": 0.10335519999999998,
"min": 0.10335519999999998,
"max": 0.19759379999999996,
"count": 38
},
"3DBall.Policy.Epsilon.sum": {
"value": 0.10335519999999998,
"min": 0.10335519999999998,
"max": 0.19759379999999996,
"count": 38
},
"3DBall.Policy.Beta.mean": {
"value": 4.321648000000003e-05,
"min": 4.321648000000003e-05,
"max": 0.0009761786200000003,
"count": 38
},
"3DBall.Policy.Beta.sum": {
"value": 4.321648000000003e-05,
"min": 4.321648000000003e-05,
"max": 0.0009761786200000003,
"count": 38
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1657904946",
"python_version": "3.8.13 (default, Mar 28 2022, 06:16:26) \n[Clang 12.0.0 ]",
"command_line_arguments": "/usr/local/Caskroom/miniconda/base/envs/ml-agents/bin/mlagents-learn ./config/ppo/3DBall.yaml --run-id=first3DBallRun3 --force --env=./trained-envs-executables/macos/3d_ball/3d ball --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1",
"numpy_version": "1.22.3",
"end_time_seconds": "1657905343"
},
"total": 397.450924047,
"count": 1,
"self": 0.34027323800000886,
"children": {
"run_training.setup": {
"total": 0.04273055499999989,
"count": 1,
"self": 0.04273055499999989
},
"TrainerController.start_learning": {
"total": 397.067920254,
"count": 1,
"self": 0.7700594520004529,
"children": {
"TrainerController._reset_env": {
"total": 2.5437515289999997,
"count": 1,
"self": 2.5437515289999997
},
"TrainerController.advance": {
"total": 393.69847658399954,
"count": 43985,
"self": 0.7830804549955701,
"children": {
"env_step": {
"total": 230.3137608409983,
"count": 43985,
"self": 202.2693348190025,
"children": {
"SubprocessEnvManager._take_step": {
"total": 27.511717070997804,
"count": 43985,
"self": 2.4751557829968895,
"children": {
"TorchPolicy.evaluate": {
"total": 25.036561288000915,
"count": 42155,
"self": 4.847578280001574,
"children": {
"TorchPolicy.sample_actions": {
"total": 20.18898300799934,
"count": 42155,
"self": 20.18898300799934
}
}
}
}
},
"workers": {
"total": 0.532708950998015,
"count": 43985,
"self": 0.0,
"children": {
"worker_root": {
"total": 393.1404451730054,
"count": 43985,
"is_parallel": true,
"self": 237.20454446299738,
"children": {
"steps_from_proto": {
"total": 0.000473571000000117,
"count": 1,
"is_parallel": true,
"self": 0.00017897400000022046,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0002945969999998965,
"count": 2,
"is_parallel": true,
"self": 0.0002945969999998965
}
}
},
"UnityEnvironment.step": {
"total": 155.93542713900803,
"count": 43985,
"is_parallel": true,
"self": 5.6048235150036305,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.386330972004274,
"count": 43985,
"is_parallel": true,
"self": 7.386330972004274
},
"communicator.exchange": {
"total": 132.06233277000166,
"count": 43985,
"is_parallel": true,
"self": 132.06233277000166
},
"steps_from_proto": {
"total": 10.88193988199847,
"count": 43985,
"is_parallel": true,
"self": 5.169313770000414,
"children": {
"_process_rank_one_or_two_observation": {
"total": 5.7126261119980555,
"count": 87970,
"is_parallel": true,
"self": 5.7126261119980555
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 162.60163528800567,
"count": 43985,
"self": 1.2461401930077045,
"children": {
"process_trajectory": {
"total": 22.86444937999798,
"count": 43985,
"self": 22.749369502998007,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11507987699997102,
"count": 1,
"self": 0.11507987699997102
}
}
},
"_update_policy": {
"total": 138.49104571499998,
"count": 39,
"self": 67.82378533699823,
"children": {
"TorchPPOOptimizer.update": {
"total": 70.66726037800176,
"count": 23229,
"self": 70.66726037800176
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0870000437535055e-06,
"count": 1,
"self": 1.0870000437535055e-06
},
"TrainerController._save_models": {
"total": 0.05563160199994854,
"count": 1,
"self": 0.0015699369999424562,
"children": {
"RLTrainer._checkpoint": {
"total": 0.05406166500000609,
"count": 1,
"self": 0.05406166500000609
}
}
}
}
}
}
}