Tiru8055's picture
First Push
8adac6e
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.5554143786430359,
"min": 0.5431214570999146,
"max": 0.8691041469573975,
"count": 30
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 5351.97314453125,
"min": 5228.943359375,
"max": 8976.9765625,
"count": 30
},
"SnowballTarget.Step.mean": {
"value": 499952.0,
"min": 209936.0,
"max": 499952.0,
"count": 30
},
"SnowballTarget.Step.sum": {
"value": 499952.0,
"min": 209936.0,
"max": 499952.0,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.531689643859863,
"min": 12.990665435791016,
"max": 13.649582862854004,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2760.464599609375,
"min": 2494.838134765625,
"max": 2784.514892578125,
"count": 30
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 30
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 30
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0689360275408855,
"min": 0.06196786984219216,
"max": 0.08477714681004156,
"count": 30
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3446801377044275,
"min": 0.24787147936876863,
"max": 0.3776190985734894,
"count": 30
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.17166311223717298,
"min": 0.1591150899801184,
"max": 0.21358135138072218,
"count": 30
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8583155611858649,
"min": 0.6364603599204736,
"max": 0.9760537848753088,
"count": 30
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 3.1056989647999945e-06,
"min": 3.1056989647999945e-06,
"max": 0.00017668564110480002,
"count": 30
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.5528494823999972e-05,
"min": 1.5528494823999972e-05,
"max": 0.000853728215424,
"count": 30
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10103520000000002,
"min": 0.10103520000000002,
"max": 0.15889520000000001,
"count": 30
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5051760000000001,
"min": 0.4120608,
"max": 0.7845760000000002,
"count": 30
},
"SnowballTarget.Policy.Beta.mean": {
"value": 6.165647999999992e-05,
"min": 6.165647999999992e-05,
"max": 0.002948870480000001,
"count": 30
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0003082823999999996,
"min": 0.0003082823999999996,
"max": 0.0142503424,
"count": 30
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.254545454545454,
"min": 25.072727272727274,
"max": 26.818181818181817,
"count": 30
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1444.0,
"min": 1137.0,
"max": 1472.0,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.254545454545454,
"min": 25.072727272727274,
"max": 26.818181818181817,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1444.0,
"min": 1137.0,
"max": 1472.0,
"count": 30
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1688361359",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1688362084"
},
"total": 725.1287801450001,
"count": 1,
"self": 0.42301487899976564,
"children": {
"run_training.setup": {
"total": 0.03936998600011066,
"count": 1,
"self": 0.03936998600011066
},
"TrainerController.start_learning": {
"total": 724.6663952800002,
"count": 1,
"self": 0.8414607440045074,
"children": {
"TrainerController._reset_env": {
"total": 3.9242372219998742,
"count": 1,
"self": 3.9242372219998742
},
"TrainerController.advance": {
"total": 719.7430740599957,
"count": 27274,
"self": 0.42077153500281383,
"children": {
"env_step": {
"total": 719.3223025249929,
"count": 27274,
"self": 524.3226615199812,
"children": {
"SubprocessEnvManager._take_step": {
"total": 194.58223836600496,
"count": 27274,
"self": 2.794133769002201,
"children": {
"TorchPolicy.evaluate": {
"total": 191.78810459700276,
"count": 27274,
"self": 191.78810459700276
}
}
},
"workers": {
"total": 0.4174026390066956,
"count": 27274,
"self": 0.0,
"children": {
"worker_root": {
"total": 722.2940654699773,
"count": 27274,
"is_parallel": true,
"self": 337.9235702160188,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001969485000017812,
"count": 1,
"is_parallel": true,
"self": 0.0005988259999867296,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013706590000310825,
"count": 10,
"is_parallel": true,
"self": 0.0013706590000310825
}
}
},
"UnityEnvironment.step": {
"total": 0.08242344599989337,
"count": 1,
"is_parallel": true,
"self": 0.0006884039999022207,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045308199992177833,
"count": 1,
"is_parallel": true,
"self": 0.00045308199992177833
},
"communicator.exchange": {
"total": 0.07630878300005861,
"count": 1,
"is_parallel": true,
"self": 0.07630878300005861
},
"steps_from_proto": {
"total": 0.0049731770000107645,
"count": 1,
"is_parallel": true,
"self": 0.002791672000057588,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021815049999531766,
"count": 10,
"is_parallel": true,
"self": 0.0021815049999531766
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 384.37049525395855,
"count": 27273,
"is_parallel": true,
"self": 15.486524079959281,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 8.425367698016771,
"count": 27273,
"is_parallel": true,
"self": 8.425367698016771
},
"communicator.exchange": {
"total": 305.2477889279937,
"count": 27273,
"is_parallel": true,
"self": 305.2477889279937
},
"steps_from_proto": {
"total": 55.210814547988775,
"count": 27273,
"is_parallel": true,
"self": 10.211360998963528,
"children": {
"_process_rank_one_or_two_observation": {
"total": 44.99945354902525,
"count": 272730,
"is_parallel": true,
"self": 44.99945354902525
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.70650000149908e-05,
"count": 1,
"self": 3.70650000149908e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 714.1417267301904,
"count": 684441,
"is_parallel": true,
"self": 15.42709771122577,
"children": {
"process_trajectory": {
"total": 390.330625886965,
"count": 684441,
"is_parallel": true,
"self": 388.8141920999649,
"children": {
"RLTrainer._checkpoint": {
"total": 1.5164337870000963,
"count": 6,
"is_parallel": true,
"self": 1.5164337870000963
}
}
},
"_update_policy": {
"total": 308.38400313199963,
"count": 136,
"is_parallel": true,
"self": 119.26334611800644,
"children": {
"TorchPPOOptimizer.update": {
"total": 189.1206570139932,
"count": 6933,
"is_parallel": true,
"self": 189.1206570139932
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.15758618900008514,
"count": 1,
"self": 0.0010900650001985923,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15649612399988655,
"count": 1,
"self": 0.15649612399988655
}
}
}
}
}
}
}