brand25's picture
First Push
4fe6295
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9958349466323853,
"min": 0.9748709201812744,
"max": 2.867983818054199,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9573.95703125,
"min": 9573.95703125,
"max": 29434.119140625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.898480415344238,
"min": 0.4154200255870819,
"max": 13.015299797058105,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2515.20361328125,
"min": 80.59148406982422,
"max": 2655.12109375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06922505580273448,
"min": 0.0620533174683349,
"max": 0.07566631524041798,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27690022321093793,
"min": 0.2482132698733396,
"max": 0.36086392742272083,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20471748819246013,
"min": 0.12787280657999767,
"max": 0.27367511964311786,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8188699527698405,
"min": 0.5114912263199907,
"max": 1.3683755982155892,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.295454545454547,
"min": 3.272727272727273,
"max": 25.59090909090909,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1113.0,
"min": 144.0,
"max": 1391.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.295454545454547,
"min": 3.272727272727273,
"max": 25.59090909090909,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1113.0,
"min": 144.0,
"max": 1391.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678982043",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1678982515"
},
"total": 471.98232890500003,
"count": 1,
"self": 0.38574213299989424,
"children": {
"run_training.setup": {
"total": 0.1089158640002097,
"count": 1,
"self": 0.1089158640002097
},
"TrainerController.start_learning": {
"total": 471.4876709079999,
"count": 1,
"self": 0.576619139041668,
"children": {
"TrainerController._reset_env": {
"total": 9.3491990120001,
"count": 1,
"self": 9.3491990120001
},
"TrainerController.advance": {
"total": 461.40660660695767,
"count": 18219,
"self": 0.2812070029417555,
"children": {
"env_step": {
"total": 461.1253996040159,
"count": 18219,
"self": 323.143738624,
"children": {
"SubprocessEnvManager._take_step": {
"total": 137.69975390102127,
"count": 18219,
"self": 2.020776693944299,
"children": {
"TorchPolicy.evaluate": {
"total": 135.67897720707697,
"count": 18219,
"self": 135.67897720707697
}
}
},
"workers": {
"total": 0.2819070789946636,
"count": 18219,
"self": 0.0,
"children": {
"worker_root": {
"total": 469.7219479880132,
"count": 18219,
"is_parallel": true,
"self": 225.537891905979,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020936370001436444,
"count": 1,
"is_parallel": true,
"self": 0.0007797979992574255,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013138390008862189,
"count": 10,
"is_parallel": true,
"self": 0.0013138390008862189
}
}
},
"UnityEnvironment.step": {
"total": 0.05417842199994993,
"count": 1,
"is_parallel": true,
"self": 0.0005442090000542521,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003774369997699978,
"count": 1,
"is_parallel": true,
"self": 0.0003774369997699978
},
"communicator.exchange": {
"total": 0.051378301000113424,
"count": 1,
"is_parallel": true,
"self": 0.051378301000113424
},
"steps_from_proto": {
"total": 0.0018784750000122585,
"count": 1,
"is_parallel": true,
"self": 0.000408111000979261,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014703639990329975,
"count": 10,
"is_parallel": true,
"self": 0.0014703639990329975
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 244.1840560820342,
"count": 18218,
"is_parallel": true,
"self": 9.573762101021202,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.375444526972842,
"count": 18218,
"is_parallel": true,
"self": 5.375444526972842
},
"communicator.exchange": {
"total": 196.44589237201217,
"count": 18218,
"is_parallel": true,
"self": 196.44589237201217
},
"steps_from_proto": {
"total": 32.78895708202799,
"count": 18218,
"is_parallel": true,
"self": 6.6323903570933,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.15656672493469,
"count": 182180,
"is_parallel": true,
"self": 26.15656672493469
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001503560001765436,
"count": 1,
"self": 0.0001503560001765436,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 457.8077600360225,
"count": 426762,
"is_parallel": true,
"self": 10.482762268136867,
"children": {
"process_trajectory": {
"total": 258.0722363588857,
"count": 426762,
"is_parallel": true,
"self": 256.88915257688586,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1830837819998123,
"count": 4,
"is_parallel": true,
"self": 1.1830837819998123
}
}
},
"_update_policy": {
"total": 189.25276140899996,
"count": 90,
"is_parallel": true,
"self": 68.11220825399869,
"children": {
"TorchPPOOptimizer.update": {
"total": 121.14055315500127,
"count": 4587,
"is_parallel": true,
"self": 121.14055315500127
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.15509579400031726,
"count": 1,
"self": 0.0008356340003956575,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1542601599999216,
"count": 1,
"self": 0.1542601599999216
}
}
}
}
}
}
}