dyingc's picture
First Push
01bdce8
raw
history blame contribute delete
No virus
18.6 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.42006388306617737,
"min": 0.4001881778240204,
"max": 1.0226112604141235,
"count": 180
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 4334.21923828125,
"min": 3899.380126953125,
"max": 10461.3134765625,
"count": 180
},
"SnowballTarget.Step.mean": {
"value": 1999968.0,
"min": 209936.0,
"max": 1999968.0,
"count": 180
},
"SnowballTarget.Step.sum": {
"value": 1999968.0,
"min": 209936.0,
"max": 1999968.0,
"count": 180
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 14.270292282104492,
"min": 11.798823356628418,
"max": 14.552634239196777,
"count": 180
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2911.1396484375,
"min": 2303.447998046875,
"max": 2983.2900390625,
"count": 180
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 180
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 180
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07010941528098877,
"min": 0.05930819391627949,
"max": 0.07843405551815807,
"count": 180
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.35054707640494387,
"min": 0.2377243166510929,
"max": 0.3921702775907904,
"count": 180
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.16052074736239863,
"min": 0.132761103849785,
"max": 0.22644827617149726,
"count": 180
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8026037368119932,
"min": 0.53104441539914,
"max": 1.1322413808574863,
"count": 180
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.16499761199997e-07,
"min": 7.16499761199997e-07,
"max": 0.00026917141027619996,
"count": 180
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.582498805999985e-06,
"min": 3.582498805999985e-06,
"max": 0.0013384320538559998,
"count": 180
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.1002388,
"min": 0.1002388,
"max": 0.1897238,
"count": 180
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.501194,
"min": 0.40293520000000005,
"max": 0.9461439999999999,
"count": 180
},
"SnowballTarget.Policy.Beta.mean": {
"value": 2.1916119999999956e-05,
"min": 2.1916119999999956e-05,
"max": 0.00448721762,
"count": 180
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00010958059999999978,
"min": 0.00010958059999999978,
"max": 0.022312585599999997,
"count": 180
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 28.272727272727273,
"min": 22.522727272727273,
"max": 28.618181818181817,
"count": 180
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1555.0,
"min": 991.0,
"max": 1574.0,
"count": 180
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 28.272727272727273,
"min": 22.522727272727273,
"max": 28.618181818181817,
"count": 180
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1555.0,
"min": 991.0,
"max": 1574.0,
"count": 180
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 180
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 180
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679201223",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget.x86_64 --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679205238"
},
"total": 4015.240720519,
"count": 1,
"self": 0.43275698100023874,
"children": {
"run_training.setup": {
"total": 0.09888846699959686,
"count": 1,
"self": 0.09888846699959686
},
"TrainerController.start_learning": {
"total": 4014.7090750710004,
"count": 1,
"self": 4.71832054284323,
"children": {
"TrainerController._reset_env": {
"total": 5.659111906999897,
"count": 1,
"self": 5.659111906999897
},
"TrainerController.advance": {
"total": 4004.1981672151574,
"count": 163674,
"self": 2.526422040025409,
"children": {
"env_step": {
"total": 4001.671745175132,
"count": 163674,
"self": 2892.840898169175,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1106.4984949000418,
"count": 163674,
"self": 18.84952482197332,
"children": {
"TorchPolicy.evaluate": {
"total": 1087.6489700780685,
"count": 163674,
"self": 1087.6489700780685
}
}
},
"workers": {
"total": 2.332352105915106,
"count": 163674,
"self": 0.0,
"children": {
"worker_root": {
"total": 4003.2350350009165,
"count": 163674,
"is_parallel": true,
"self": 1890.5377339919942,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018879219996961183,
"count": 1,
"is_parallel": true,
"self": 0.000592881999182282,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012950400005138363,
"count": 10,
"is_parallel": true,
"self": 0.0012950400005138363
}
}
},
"UnityEnvironment.step": {
"total": 0.034174099000210845,
"count": 1,
"is_parallel": true,
"self": 0.0006893470003888069,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00038115799998195143,
"count": 1,
"is_parallel": true,
"self": 0.00038115799998195143
},
"communicator.exchange": {
"total": 0.030933258999993996,
"count": 1,
"is_parallel": true,
"self": 0.030933258999993996
},
"steps_from_proto": {
"total": 0.002170334999846091,
"count": 1,
"is_parallel": true,
"self": 0.0004783879990100104,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016919470008360804,
"count": 10,
"is_parallel": true,
"self": 0.0016919470008360804
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2112.6973010089223,
"count": 163673,
"is_parallel": true,
"self": 83.50783148883147,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 44.78645162088287,
"count": 163673,
"is_parallel": true,
"self": 44.78645162088287
},
"communicator.exchange": {
"total": 1713.159306039065,
"count": 163673,
"is_parallel": true,
"self": 1713.159306039065
},
"steps_from_proto": {
"total": 271.24371186014287,
"count": 163673,
"is_parallel": true,
"self": 52.68720711913238,
"children": {
"_process_rank_one_or_two_observation": {
"total": 218.55650474101049,
"count": 1636730,
"is_parallel": true,
"self": 218.55650474101049
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001982789999601664,
"count": 1,
"self": 0.0001982789999601664,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 3974.8836340395733,
"count": 3568782,
"is_parallel": true,
"self": 85.5665882126641,
"children": {
"process_trajectory": {
"total": 2183.3365064108993,
"count": 3568782,
"is_parallel": true,
"self": 2174.2487073949014,
"children": {
"RLTrainer._checkpoint": {
"total": 9.087799015997916,
"count": 36,
"is_parallel": true,
"self": 9.087799015997916
}
}
},
"_update_policy": {
"total": 1705.9805394160098,
"count": 818,
"is_parallel": true,
"self": 640.6093947930644,
"children": {
"TorchPPOOptimizer.update": {
"total": 1065.3711446229454,
"count": 41715,
"is_parallel": true,
"self": 1065.3711446229454
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.13327712699992844,
"count": 1,
"self": 0.001997130999370711,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13127999600055773,
"count": 1,
"self": 0.13127999600055773
}
}
}
}
}
}
}