DaniElAbrazos's picture
First Push
de1905c verified
raw
history blame contribute delete
No virus
18.4 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": -3.576278402306343e-07,
"min": -3.576278402306343e-07,
"max": 2.853149175643921,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": -0.0034185645636171103,
"min": -0.0037214753683656454,
"max": 29250.486328125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5494749546051025,
"min": 0.23019982874393463,
"max": 1.8132851123809814,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 107.14762115478516,
"min": 44.65876770019531,
"max": 371.72344970703125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 6.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 272.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 6.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 272.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.04698769247761091,
"min": 0.045914910217377876,
"max": 0.17927464174830487,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.09397538495522181,
"min": 0.09182982043475575,
"max": 0.5378239252449146,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.00022483321179107388,
"min": 0.00014167892710318877,
"max": 0.21930968309227633,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.00044966642358214775,
"min": 0.00028335785420637754,
"max": 0.43861936618455266,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 6.432009785600003e-05,
"min": 6.432009785600003e-05,
"max": 0.002902320003256,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.00012864019571200006,
"min": 0.00012864019571200006,
"max": 0.007419960052668,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.102144,
"min": 0.102144,
"max": 0.196744,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.204288,
"min": 0.204288,
"max": 0.547332,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001169856000000001,
"min": 0.0001169856000000001,
"max": 0.0048375256,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0002339712000000002,
"min": 0.0002339712000000002,
"max": 0.012371866799999999,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1713945534",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1713946610"
},
"total": 1075.6014756499999,
"count": 1,
"self": 0.43132703399987804,
"children": {
"run_training.setup": {
"total": 0.06342404599996598,
"count": 1,
"self": 0.06342404599996598
},
"TrainerController.start_learning": {
"total": 1075.10672457,
"count": 1,
"self": 0.5901515229929828,
"children": {
"TrainerController._reset_env": {
"total": 2.6713747379999973,
"count": 1,
"self": 2.6713747379999973
},
"TrainerController.advance": {
"total": 1071.3782294240073,
"count": 18203,
"self": 0.28343075201655665,
"children": {
"env_step": {
"total": 1071.0947986719907,
"count": 18203,
"self": 916.2595843850149,
"children": {
"SubprocessEnvManager._take_step": {
"total": 154.5302365419803,
"count": 18203,
"self": 1.5045670279858427,
"children": {
"TorchPolicy.evaluate": {
"total": 153.02566951399444,
"count": 18203,
"self": 153.02566951399444
}
}
},
"workers": {
"total": 0.30497774499548314,
"count": 18203,
"self": 0.0,
"children": {
"worker_root": {
"total": 1073.5785019270015,
"count": 18203,
"is_parallel": true,
"self": 863.7435115420095,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004702709000014238,
"count": 1,
"is_parallel": true,
"self": 0.003213532000017949,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001489176999996289,
"count": 10,
"is_parallel": true,
"self": 0.001489176999996289
}
}
},
"UnityEnvironment.step": {
"total": 0.04085419099999399,
"count": 1,
"is_parallel": true,
"self": 0.0006584569999290579,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003934070000468637,
"count": 1,
"is_parallel": true,
"self": 0.0003934070000468637
},
"communicator.exchange": {
"total": 0.037712424999995164,
"count": 1,
"is_parallel": true,
"self": 0.037712424999995164
},
"steps_from_proto": {
"total": 0.002089902000022903,
"count": 1,
"is_parallel": true,
"self": 0.0004004000001032182,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016895019999196847,
"count": 10,
"is_parallel": true,
"self": 0.0016895019999196847
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 209.834990384992,
"count": 18202,
"is_parallel": true,
"self": 10.63633701399283,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.761099173005675,
"count": 18202,
"is_parallel": true,
"self": 5.761099173005675
},
"communicator.exchange": {
"total": 157.45708275600202,
"count": 18202,
"is_parallel": true,
"self": 157.45708275600202
},
"steps_from_proto": {
"total": 35.98047144199148,
"count": 18202,
"is_parallel": true,
"self": 6.818430501001103,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.16204094099038,
"count": 182020,
"is_parallel": true,
"self": 29.16204094099038
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00019592099988585687,
"count": 1,
"self": 0.00019592099988585687,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1064.1910759700527,
"count": 826578,
"is_parallel": true,
"self": 18.191081803151974,
"children": {
"process_trajectory": {
"total": 292.7390154439006,
"count": 826578,
"is_parallel": true,
"self": 289.66322821290066,
"children": {
"RLTrainer._checkpoint": {
"total": 3.0757872309999357,
"count": 4,
"is_parallel": true,
"self": 3.0757872309999357
}
}
},
"_update_policy": {
"total": 753.2609787230001,
"count": 45,
"is_parallel": true,
"self": 415.6952496390069,
"children": {
"TorchPPOOptimizer.update": {
"total": 337.56572908399323,
"count": 22920,
"is_parallel": true,
"self": 337.56572908399323
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.4667729640000289,
"count": 1,
"self": 0.017649950000077297,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4491230139999516,
"count": 1,
"self": 0.4491230139999516
}
}
}
}
}
}
}