FredericProtat's picture
Second Push
bec2880
raw
history blame contribute delete
No virus
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.46688005328178406,
"min": 0.46145811676979065,
"max": 0.548062801361084,
"count": 50
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 4791.58984375,
"min": 4390.77392578125,
"max": 5577.51953125,
"count": 50
},
"SnowballTarget.Step.mean": {
"value": 1999944.0,
"min": 1509984.0,
"max": 1999944.0,
"count": 50
},
"SnowballTarget.Step.sum": {
"value": 1999944.0,
"min": 1509984.0,
"max": 1999944.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 14.30431079864502,
"min": 13.924019813537598,
"max": 14.351710319519043,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2918.079345703125,
"min": 2711.312744140625,
"max": 2942.005615234375,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06890000453008813,
"min": 0.06153105939784105,
"max": 0.07687349683150316,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.34450002265044066,
"min": 0.25084291392019675,
"max": 0.364112547010996,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.14710586433317147,
"min": 0.13593446288038702,
"max": 0.18358633409975572,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7355293216658574,
"min": 0.5437378515215481,
"max": 0.835933457548712,
"count": 50
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.296997567999969e-07,
"min": 7.296997567999969e-07,
"max": 7.415467528179998e-05,
"count": 50
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.648498783999985e-06,
"min": 3.648498783999985e-06,
"max": 0.00036334837888399996,
"count": 50
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10024320000000002,
"min": 0.10024320000000002,
"max": 0.1247182,
"count": 50
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5012160000000001,
"min": 0.40295280000000006,
"max": 0.621116,
"count": 50
},
"SnowballTarget.Policy.Beta.mean": {
"value": 2.213567999999995e-05,
"min": 2.213567999999995e-05,
"max": 0.00124343818,
"count": 50
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00011067839999999976,
"min": 0.00011067839999999976,
"max": 0.006093688400000002,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 28.327272727272728,
"min": 27.020833333333332,
"max": 28.477272727272727,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1558.0,
"min": 1198.0,
"max": 1559.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 28.327272727272728,
"min": 27.020833333333332,
"max": 28.477272727272727,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1558.0,
"min": 1198.0,
"max": 1559.0,
"count": 50
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1692965776",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget2 --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1692967032"
},
"total": 1256.3228092959998,
"count": 1,
"self": 0.47509459699995205,
"children": {
"run_training.setup": {
"total": 0.0447613159994944,
"count": 1,
"self": 0.0447613159994944
},
"TrainerController.start_learning": {
"total": 1255.8029533830004,
"count": 1,
"self": 1.442056101005619,
"children": {
"TrainerController._reset_env": {
"total": 4.730609146000461,
"count": 1,
"self": 4.730609146000461
},
"TrainerController.advance": {
"total": 1249.4823890559937,
"count": 45477,
"self": 0.7374466800038135,
"children": {
"env_step": {
"total": 1248.74494237599,
"count": 45477,
"self": 911.1786683529326,
"children": {
"SubprocessEnvManager._take_step": {
"total": 336.8326590630595,
"count": 45477,
"self": 4.573453362008877,
"children": {
"TorchPolicy.evaluate": {
"total": 332.25920570105063,
"count": 45477,
"self": 332.25920570105063
}
}
},
"workers": {
"total": 0.7336149599977944,
"count": 45477,
"self": 0.0,
"children": {
"worker_root": {
"total": 1251.7938582959005,
"count": 45477,
"is_parallel": true,
"self": 585.271636632955,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019754929999180604,
"count": 1,
"is_parallel": true,
"self": 0.0005514640006367699,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014240289992812905,
"count": 10,
"is_parallel": true,
"self": 0.0014240289992812905
}
}
},
"UnityEnvironment.step": {
"total": 0.07299540999974852,
"count": 1,
"is_parallel": true,
"self": 0.0006748169989805319,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004447620003702468,
"count": 1,
"is_parallel": true,
"self": 0.0004447620003702468
},
"communicator.exchange": {
"total": 0.06937806499990984,
"count": 1,
"is_parallel": true,
"self": 0.06937806499990984
},
"steps_from_proto": {
"total": 0.0024977660004879,
"count": 1,
"is_parallel": true,
"self": 0.0006389070013028686,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018588589991850313,
"count": 10,
"is_parallel": true,
"self": 0.0018588589991850313
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 666.5222216629454,
"count": 45476,
"is_parallel": true,
"self": 27.84803703185571,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 13.8533773930194,
"count": 45476,
"is_parallel": true,
"self": 13.8533773930194
},
"communicator.exchange": {
"total": 527.5904366399864,
"count": 45476,
"is_parallel": true,
"self": 527.5904366399864
},
"steps_from_proto": {
"total": 97.23037059808394,
"count": 45476,
"is_parallel": true,
"self": 17.93557078792037,
"children": {
"_process_rank_one_or_two_observation": {
"total": 79.29479981016357,
"count": 454760,
"is_parallel": true,
"self": 79.29479981016357
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00020477300040511182,
"count": 1,
"self": 0.00020477300040511182,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1238.3293930730097,
"count": 1159049,
"is_parallel": true,
"self": 27.88234454603662,
"children": {
"process_trajectory": {
"total": 673.0375864319767,
"count": 1159049,
"is_parallel": true,
"self": 669.9807408189754,
"children": {
"RLTrainer._checkpoint": {
"total": 3.0568456130013146,
"count": 10,
"is_parallel": true,
"self": 3.0568456130013146
}
}
},
"_update_policy": {
"total": 537.4094620949963,
"count": 227,
"is_parallel": true,
"self": 216.54626846698739,
"children": {
"TorchPPOOptimizer.update": {
"total": 320.86319362800896,
"count": 11574,
"is_parallel": true,
"self": 320.86319362800896
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.14769430700016528,
"count": 1,
"self": 0.0011035859997718944,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1465907210003934,
"count": 1,
"self": 0.1465907210003934
}
}
}
}
}
}
}