golightly's picture
first push
18b3972
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9737669825553894,
"min": 0.9737669825553894,
"max": 2.8477532863616943,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9297.52734375,
"min": 9297.52734375,
"max": 29226.4921875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.822077751159668,
"min": 0.5429990291595459,
"max": 12.82219409942627,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2500.30517578125,
"min": 105.34181213378906,
"max": 2615.7275390625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06676845516581215,
"min": 0.06237705047975646,
"max": 0.07499224423384065,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2670738206632486,
"min": 0.24950820191902584,
"max": 0.3749612211692033,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20973283483409416,
"min": 0.1315800077989515,
"max": 0.2725131223160846,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8389313393363766,
"min": 0.526320031195806,
"max": 1.3206256099191367,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.09090909090909,
"min": 3.840909090909091,
"max": 25.295454545454547,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1104.0,
"min": 169.0,
"max": 1381.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.09090909090909,
"min": 3.840909090909091,
"max": 25.295454545454547,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1104.0,
"min": 169.0,
"max": 1381.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679436159",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679436613"
},
"total": 454.27794730799997,
"count": 1,
"self": 0.4367535389998807,
"children": {
"run_training.setup": {
"total": 0.10426161500004127,
"count": 1,
"self": 0.10426161500004127
},
"TrainerController.start_learning": {
"total": 453.73693215400004,
"count": 1,
"self": 0.521177101003218,
"children": {
"TrainerController._reset_env": {
"total": 8.786569208000003,
"count": 1,
"self": 8.786569208000003
},
"TrainerController.advance": {
"total": 444.2956438649969,
"count": 18202,
"self": 0.2697542229965961,
"children": {
"env_step": {
"total": 444.0258896420003,
"count": 18202,
"self": 322.0301683309988,
"children": {
"SubprocessEnvManager._take_step": {
"total": 121.72784948800086,
"count": 18202,
"self": 2.409746105998181,
"children": {
"TorchPolicy.evaluate": {
"total": 119.31810338200268,
"count": 18202,
"self": 119.31810338200268
}
}
},
"workers": {
"total": 0.26787182300068935,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 452.32907189398935,
"count": 18202,
"is_parallel": true,
"self": 217.96029296899678,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0049242930000445995,
"count": 1,
"is_parallel": true,
"self": 0.003434133999974165,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014901590000704346,
"count": 10,
"is_parallel": true,
"self": 0.0014901590000704346
}
}
},
"UnityEnvironment.step": {
"total": 0.044138550000013765,
"count": 1,
"is_parallel": true,
"self": 0.0005396269999664582,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00038351900002453476,
"count": 1,
"is_parallel": true,
"self": 0.00038351900002453476
},
"communicator.exchange": {
"total": 0.041466677000016716,
"count": 1,
"is_parallel": true,
"self": 0.041466677000016716
},
"steps_from_proto": {
"total": 0.0017487270000060562,
"count": 1,
"is_parallel": true,
"self": 0.0003549569999563573,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013937700000496989,
"count": 10,
"is_parallel": true,
"self": 0.0013937700000496989
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 234.36877892499257,
"count": 18201,
"is_parallel": true,
"self": 9.382024527993565,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.00912664301012,
"count": 18201,
"is_parallel": true,
"self": 5.00912664301012
},
"communicator.exchange": {
"total": 189.66076142998884,
"count": 18201,
"is_parallel": true,
"self": 189.66076142998884
},
"steps_from_proto": {
"total": 30.316866324000046,
"count": 18201,
"is_parallel": true,
"self": 5.869759810998914,
"children": {
"_process_rank_one_or_two_observation": {
"total": 24.44710651300113,
"count": 182010,
"is_parallel": true,
"self": 24.44710651300113
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00010714899997310567,
"count": 1,
"self": 0.00010714899997310567,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 441.24087976604307,
"count": 388424,
"is_parallel": true,
"self": 9.065434973059553,
"children": {
"process_trajectory": {
"total": 243.60303965398356,
"count": 388424,
"is_parallel": true,
"self": 242.15874137898362,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4442982749999373,
"count": 4,
"is_parallel": true,
"self": 1.4442982749999373
}
}
},
"_update_policy": {
"total": 188.57240513899995,
"count": 90,
"is_parallel": true,
"self": 71.15227853000607,
"children": {
"TorchPPOOptimizer.update": {
"total": 117.42012660899388,
"count": 4587,
"is_parallel": true,
"self": 117.42012660899388
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.13343483099993136,
"count": 1,
"self": 0.0008533029998716302,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13258152800005973,
"count": 1,
"self": 0.13258152800005973
}
}
}
}
}
}
}