Developer-Karthi's picture
First Commit
0029cb6
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0006742477416992,
"min": 1.0006742477416992,
"max": 2.8659210205078125,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9554.4375,
"min": 9554.4375,
"max": 29381.421875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.567713737487793,
"min": 0.38466447591781616,
"max": 12.567713737487793,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2450.7041015625,
"min": 74.62490844726562,
"max": 2533.6982421875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06775474859008968,
"min": 0.06289641794983433,
"max": 0.07432345583488224,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2710189943603587,
"min": 0.26051240516872165,
"max": 0.37161727917441123,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1975701829674197,
"min": 0.12026640623305723,
"max": 0.2932920032856511,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7902807318696788,
"min": 0.48106562493222893,
"max": 1.4664600164282555,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.977272727272727,
"min": 3.3181818181818183,
"max": 24.977272727272727,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1099.0,
"min": 146.0,
"max": 1353.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.977272727272727,
"min": 3.3181818181818183,
"max": 24.977272727272727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1099.0,
"min": 146.0,
"max": 1353.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678985289",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1678985748"
},
"total": 458.461173326,
"count": 1,
"self": 0.4278694219999579,
"children": {
"run_training.setup": {
"total": 0.10860596500000952,
"count": 1,
"self": 0.10860596500000952
},
"TrainerController.start_learning": {
"total": 457.92469793900005,
"count": 1,
"self": 0.5454323040013378,
"children": {
"TrainerController._reset_env": {
"total": 9.369324665000022,
"count": 1,
"self": 9.369324665000022
},
"TrainerController.advance": {
"total": 447.8787338629987,
"count": 18201,
"self": 0.2808378079914178,
"children": {
"env_step": {
"total": 447.59789605500725,
"count": 18201,
"self": 324.1301853760099,
"children": {
"SubprocessEnvManager._take_step": {
"total": 123.20400422200365,
"count": 18201,
"self": 2.0306372500032808,
"children": {
"TorchPolicy.evaluate": {
"total": 121.17336697200037,
"count": 18201,
"self": 121.17336697200037
}
}
},
"workers": {
"total": 0.2637064569937593,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 456.52092677299925,
"count": 18201,
"is_parallel": true,
"self": 217.6389669730009,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005976062999991427,
"count": 1,
"is_parallel": true,
"self": 0.004442185000016252,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001533877999975175,
"count": 10,
"is_parallel": true,
"self": 0.001533877999975175
}
}
},
"UnityEnvironment.step": {
"total": 0.10477444100001776,
"count": 1,
"is_parallel": true,
"self": 0.006530690999994704,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045363500001371904,
"count": 1,
"is_parallel": true,
"self": 0.00045363500001371904
},
"communicator.exchange": {
"total": 0.09050651399999765,
"count": 1,
"is_parallel": true,
"self": 0.09050651399999765
},
"steps_from_proto": {
"total": 0.007283601000011686,
"count": 1,
"is_parallel": true,
"self": 0.0003864870000143128,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.006897113999997373,
"count": 10,
"is_parallel": true,
"self": 0.006897113999997373
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 238.88195979999836,
"count": 18200,
"is_parallel": true,
"self": 9.570402731990612,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.103622965006878,
"count": 18200,
"is_parallel": true,
"self": 5.103622965006878
},
"communicator.exchange": {
"total": 193.73055856699838,
"count": 18200,
"is_parallel": true,
"self": 193.73055856699838
},
"steps_from_proto": {
"total": 30.477375536002484,
"count": 18200,
"is_parallel": true,
"self": 6.032038180009039,
"children": {
"_process_rank_one_or_two_observation": {
"total": 24.445337355993445,
"count": 182000,
"is_parallel": true,
"self": 24.445337355993445
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001157409999450465,
"count": 1,
"self": 0.0001157409999450465,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 444.78629745999166,
"count": 392109,
"is_parallel": true,
"self": 9.240268402990125,
"children": {
"process_trajectory": {
"total": 244.3337443390014,
"count": 392109,
"is_parallel": true,
"self": 243.60354508400135,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7301992550000591,
"count": 4,
"is_parallel": true,
"self": 0.7301992550000591
}
}
},
"_update_policy": {
"total": 191.21228471800012,
"count": 90,
"is_parallel": true,
"self": 73.44514242400163,
"children": {
"TorchPPOOptimizer.update": {
"total": 117.76714229399849,
"count": 4587,
"is_parallel": true,
"self": 117.76714229399849
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.13109136600007787,
"count": 1,
"self": 0.0008577900000545924,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13023357600002328,
"count": 1,
"self": 0.13023357600002328
}
}
}
}
}
}
}