jemal's picture
First Push
7b09e47 verified
raw
history blame
17.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 2.260896921157837,
"min": 2.260896921157837,
"max": 2.8903067111968994,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 21487.564453125,
"min": 21487.564453125,
"max": 29758.59765625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.7147886753082275,
"min": 0.10848310589790344,
"max": 2.7147886753082275,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 529.3837890625,
"min": 21.04572296142578,
"max": 529.3837890625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 10.25,
"min": 2.8636363636363638,
"max": 10.25,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 451.0,
"min": 126.0,
"max": 544.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 10.25,
"min": 2.8636363636363638,
"max": 10.25,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 451.0,
"min": 126.0,
"max": 544.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.03119076028803344,
"min": 0.030324644064724755,
"max": 0.03982431573725383,
"count": 9
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.03119076028803344,
"min": 0.030324644064724755,
"max": 0.03982431573725383,
"count": 9
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2643603929628929,
"min": 0.10406896757582823,
"max": 0.2783568767209848,
"count": 9
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.2643603929628929,
"min": 0.10406896757582823,
"max": 0.2783568767209848,
"count": 9
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 2.293209235599999e-05,
"min": 2.293209235599999e-05,
"max": 0.00026924401025199996,
"count": 9
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.293209235599999e-05,
"min": 2.293209235599999e-05,
"max": 0.00026924401025199996,
"count": 9
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10764400000000002,
"min": 0.10764400000000002,
"max": 0.18974799999999997,
"count": 9
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.10764400000000002,
"min": 0.10764400000000002,
"max": 0.18974799999999997,
"count": 9
},
"SnowballTarget.Policy.Beta.mean": {
"value": 8.567559999999996e-05,
"min": 8.567559999999996e-05,
"max": 0.0008985052,
"count": 9
},
"SnowballTarget.Policy.Beta.sum": {
"value": 8.567559999999996e-05,
"min": 8.567559999999996e-05,
"max": 0.0008985052,
"count": 9
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1710361533",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1710361904"
},
"total": 371.05637807000016,
"count": 1,
"self": 0.32254712899998594,
"children": {
"run_training.setup": {
"total": 0.058175157000050604,
"count": 1,
"self": 0.058175157000050604
},
"TrainerController.start_learning": {
"total": 370.6756557840001,
"count": 1,
"self": 0.4335986389955906,
"children": {
"TrainerController._reset_env": {
"total": 2.9190766350000104,
"count": 1,
"self": 2.9190766350000104
},
"TrainerController.advance": {
"total": 367.2436087220044,
"count": 18192,
"self": 0.4408980120283559,
"children": {
"env_step": {
"total": 289.1463464069957,
"count": 18192,
"self": 226.09859325400635,
"children": {
"SubprocessEnvManager._take_step": {
"total": 62.78247694999186,
"count": 18192,
"self": 1.3891813849954815,
"children": {
"TorchPolicy.evaluate": {
"total": 61.39329556499638,
"count": 18192,
"self": 61.39329556499638
}
}
},
"workers": {
"total": 0.2652762029974838,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 369.83431145800455,
"count": 18192,
"is_parallel": true,
"self": 173.0308666539902,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002532737000137786,
"count": 1,
"is_parallel": true,
"self": 0.0007632859999375796,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017694510002002062,
"count": 10,
"is_parallel": true,
"self": 0.0017694510002002062
}
}
},
"UnityEnvironment.step": {
"total": 0.02826898400007849,
"count": 1,
"is_parallel": true,
"self": 0.0005571279998548562,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003967160000684089,
"count": 1,
"is_parallel": true,
"self": 0.0003967160000684089
},
"communicator.exchange": {
"total": 0.025833587000079206,
"count": 1,
"is_parallel": true,
"self": 0.025833587000079206
},
"steps_from_proto": {
"total": 0.001481553000076019,
"count": 1,
"is_parallel": true,
"self": 0.0003035020001789235,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011780509998970956,
"count": 10,
"is_parallel": true,
"self": 0.0011780509998970956
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 196.80344480401436,
"count": 18191,
"is_parallel": true,
"self": 7.200884225004529,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 3.8926841530171714,
"count": 18191,
"is_parallel": true,
"self": 3.8926841530171714
},
"communicator.exchange": {
"total": 161.70457059099363,
"count": 18191,
"is_parallel": true,
"self": 161.70457059099363
},
"steps_from_proto": {
"total": 24.005305834999035,
"count": 18191,
"is_parallel": true,
"self": 4.705077781013415,
"children": {
"_process_rank_one_or_two_observation": {
"total": 19.30022805398562,
"count": 181910,
"is_parallel": true,
"self": 19.30022805398562
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 77.65636430298036,
"count": 18192,
"self": 0.540954147968705,
"children": {
"process_trajectory": {
"total": 25.865606511011265,
"count": 18192,
"self": 25.446275068011346,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4193314429999191,
"count": 4,
"self": 0.4193314429999191
}
}
},
"_update_policy": {
"total": 51.249803644000394,
"count": 9,
"self": 36.293784923995645,
"children": {
"TorchPPOOptimizer.update": {
"total": 14.95601872000475,
"count": 1080,
"self": 14.95601872000475
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.710001904750243e-07,
"count": 1,
"self": 9.710001904750243e-07
},
"TrainerController._save_models": {
"total": 0.07937081699992632,
"count": 1,
"self": 0.00047476599979745515,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07889605100012886,
"count": 1,
"self": 0.07889605100012886
}
}
}
}
}
}
}