vubh2015's picture
First Push
e182bcc
raw
history blame
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8651920557022095,
"min": 0.8523612022399902,
"max": 2.8740010261535645,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8279.8876953125,
"min": 8279.8876953125,
"max": 29401.029296875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.522505760192871,
"min": 0.3638897240161896,
"max": 12.52877426147461,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2441.888671875,
"min": 70.5946044921875,
"max": 2555.869873046875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06259346365439734,
"min": 0.06255198740380401,
"max": 0.07434596892277363,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2503738546175894,
"min": 0.25020794961521603,
"max": 0.36708153742844896,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20776893717108985,
"min": 0.12567587055530693,
"max": 0.3196186223158649,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8310757486843594,
"min": 0.5027034822212277,
"max": 1.5016098129289117,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.477272727272727,
"min": 3.6363636363636362,
"max": 24.90909090909091,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1077.0,
"min": 160.0,
"max": 1370.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.477272727272727,
"min": 3.6363636363636362,
"max": 24.90909090909091,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1077.0,
"min": 160.0,
"max": 1370.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1688700820",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1688701311"
},
"total": 490.800143774,
"count": 1,
"self": 0.4316293919999339,
"children": {
"run_training.setup": {
"total": 0.04252697400002603,
"count": 1,
"self": 0.04252697400002603
},
"TrainerController.start_learning": {
"total": 490.325987408,
"count": 1,
"self": 0.6520620690052965,
"children": {
"TrainerController._reset_env": {
"total": 4.192583184,
"count": 1,
"self": 4.192583184
},
"TrainerController.advance": {
"total": 485.33756374199464,
"count": 18204,
"self": 0.3217913209924177,
"children": {
"env_step": {
"total": 485.0157724210022,
"count": 18204,
"self": 354.5796261739994,
"children": {
"SubprocessEnvManager._take_step": {
"total": 130.12360594000555,
"count": 18204,
"self": 2.0692647820065133,
"children": {
"TorchPolicy.evaluate": {
"total": 128.05434115799903,
"count": 18204,
"self": 128.05434115799903
}
}
},
"workers": {
"total": 0.31254030699730606,
"count": 18204,
"self": 0.0,
"children": {
"worker_root": {
"total": 488.4018560430036,
"count": 18204,
"is_parallel": true,
"self": 224.17367479400622,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005478624000005539,
"count": 1,
"is_parallel": true,
"self": 0.003760557000020981,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017180669999845577,
"count": 10,
"is_parallel": true,
"self": 0.0017180669999845577
}
}
},
"UnityEnvironment.step": {
"total": 0.04340403500000889,
"count": 1,
"is_parallel": true,
"self": 0.00045465100004093983,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00032256599999413993,
"count": 1,
"is_parallel": true,
"self": 0.00032256599999413993
},
"communicator.exchange": {
"total": 0.04061275599997316,
"count": 1,
"is_parallel": true,
"self": 0.04061275599997316
},
"steps_from_proto": {
"total": 0.0020140620000006493,
"count": 1,
"is_parallel": true,
"self": 0.0003402859999823704,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016737760000182789,
"count": 10,
"is_parallel": true,
"self": 0.0016737760000182789
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 264.2281812489974,
"count": 18203,
"is_parallel": true,
"self": 10.932735793989508,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.466403400007152,
"count": 18203,
"is_parallel": true,
"self": 5.466403400007152
},
"communicator.exchange": {
"total": 211.65751243200293,
"count": 18203,
"is_parallel": true,
"self": 211.65751243200293
},
"steps_from_proto": {
"total": 36.17152962299778,
"count": 18203,
"is_parallel": true,
"self": 6.735939079007807,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.435590543989974,
"count": 182030,
"is_parallel": true,
"self": 29.435590543989974
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00015533200007666892,
"count": 1,
"self": 0.00015533200007666892,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 481.74200125804015,
"count": 475801,
"is_parallel": true,
"self": 10.128154182054175,
"children": {
"process_trajectory": {
"total": 267.8717545559862,
"count": 475801,
"is_parallel": true,
"self": 266.3726373989862,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4991171569999437,
"count": 4,
"is_parallel": true,
"self": 1.4991171569999437
}
}
},
"_update_policy": {
"total": 203.74209251999977,
"count": 90,
"is_parallel": true,
"self": 75.32750616000109,
"children": {
"TorchPPOOptimizer.update": {
"total": 128.41458635999868,
"count": 4584,
"is_parallel": true,
"self": 128.41458635999868
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.143623080999987,
"count": 1,
"self": 0.0009553270000424163,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14266775399994458,
"count": 1,
"self": 0.14266775399994458
}
}
}
}
}
}
}