renatostrianese's picture
First Commit
b173e7b
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.45312342047691345,
"min": 0.45312342047691345,
"max": 1.4126359224319458,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13542.953125,
"min": 13542.953125,
"max": 42853.72265625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989941.0,
"min": 29960.0,
"max": 989941.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989941.0,
"min": 29960.0,
"max": 989941.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5085486769676208,
"min": -0.08976323157548904,
"max": 0.5085486769676208,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 138.8337860107422,
"min": -21.543174743652344,
"max": 138.8337860107422,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 1.6106587648391724,
"min": -0.05523677170276642,
"max": 1.6106587648391724,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 439.7098388671875,
"min": -14.748218536376953,
"max": 439.7098388671875,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07124645535244556,
"min": 0.06496397565171633,
"max": 0.07214075917216407,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9974503749342378,
"min": 0.5296473797198225,
"max": 1.0367239441062945,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.2241313061709529,
"min": 0.00020159684056382426,
"max": 0.2241313061709529,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 3.1378382863933405,
"min": 0.0028223557678935395,
"max": 3.1378382863933405,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.535411773942864e-06,
"min": 7.535411773942864e-06,
"max": 0.00029485350171549996,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010549576483520009,
"min": 0.00010549576483520009,
"max": 0.0035081471306177,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251177142857144,
"min": 0.10251177142857144,
"max": 0.1982845,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4351648000000001,
"min": 1.4351648000000001,
"max": 2.5693823000000005,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002609259657142859,
"min": 0.0002609259657142859,
"max": 0.00982862155,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003652963520000002,
"min": 0.003652963520000002,
"max": 0.11696129176999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013752668164670467,
"min": 0.013752668164670467,
"max": 0.5091304779052734,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1925373524427414,
"min": 0.1925373524427414,
"max": 4.0730438232421875,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 361.7922077922078,
"min": 361.7922077922078,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27858.0,
"min": 16375.0,
"max": 32861.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5862467312193536,
"min": -0.9997226321889509,
"max": 1.5862467312193536,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 122.14099830389023,
"min": -30.991401597857475,
"max": 122.14099830389023,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5862467312193536,
"min": -0.9997226321889509,
"max": 1.5862467312193536,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 122.14099830389023,
"min": -30.991401597857475,
"max": 122.14099830389023,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0522890016300507,
"min": 0.0522890016300507,
"max": 9.974960602600785,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.026253125513904,
"min": 4.026253125513904,
"max": 169.57433024421334,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1691544975",
"python_version": "3.9.17 (main, Jul 5 2023, 21:05:34) \n[GCC 11.2.0]",
"command_line_arguments": "/home/renatostrianese/anaconda3/envs/RenatoTF/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1691546497"
},
"total": 1522.3088262740002,
"count": 1,
"self": 0.3206392110005254,
"children": {
"run_training.setup": {
"total": 0.02925035000043863,
"count": 1,
"self": 0.02925035000043863
},
"TrainerController.start_learning": {
"total": 1521.9589367129993,
"count": 1,
"self": 1.3761018438472092,
"children": {
"TrainerController._reset_env": {
"total": 3.3895987670002796,
"count": 1,
"self": 3.3895987670002796
},
"TrainerController.advance": {
"total": 1517.096650718151,
"count": 63550,
"self": 1.361308352134074,
"children": {
"env_step": {
"total": 1000.8401751188358,
"count": 63550,
"self": 902.1975040446923,
"children": {
"SubprocessEnvManager._take_step": {
"total": 97.77462586208821,
"count": 63550,
"self": 3.8350899726901844,
"children": {
"TorchPolicy.evaluate": {
"total": 93.93953588939803,
"count": 62551,
"self": 93.93953588939803
}
}
},
"workers": {
"total": 0.8680452120552218,
"count": 63550,
"self": 0.0,
"children": {
"worker_root": {
"total": 1519.5600681218966,
"count": 63550,
"is_parallel": true,
"self": 712.2250033952678,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022115169995231554,
"count": 1,
"is_parallel": true,
"self": 0.0008310689991049003,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001380448000418255,
"count": 8,
"is_parallel": true,
"self": 0.001380448000418255
}
}
},
"UnityEnvironment.step": {
"total": 0.0322420560005412,
"count": 1,
"is_parallel": true,
"self": 0.00025998999808507506,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00023164000049291644,
"count": 1,
"is_parallel": true,
"self": 0.00023164000049291644
},
"communicator.exchange": {
"total": 0.03082764000100724,
"count": 1,
"is_parallel": true,
"self": 0.03082764000100724
},
"steps_from_proto": {
"total": 0.0009227860009559663,
"count": 1,
"is_parallel": true,
"self": 0.00023912800133985002,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006836579996161163,
"count": 8,
"is_parallel": true,
"self": 0.0006836579996161163
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 807.3350647266288,
"count": 63549,
"is_parallel": true,
"self": 26.45920644857688,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 15.637679546878644,
"count": 63549,
"is_parallel": true,
"self": 15.637679546878644
},
"communicator.exchange": {
"total": 693.5519352910087,
"count": 63549,
"is_parallel": true,
"self": 693.5519352910087
},
"steps_from_proto": {
"total": 71.68624344016462,
"count": 63549,
"is_parallel": true,
"self": 15.637165626963906,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.04907781320071,
"count": 508392,
"is_parallel": true,
"self": 56.04907781320071
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 514.8951672471812,
"count": 63550,
"self": 2.3717744022887928,
"children": {
"process_trajectory": {
"total": 85.97077450689903,
"count": 63550,
"self": 85.78830737389944,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18246713299959083,
"count": 2,
"self": 0.18246713299959083
}
}
},
"_update_policy": {
"total": 426.5526183379934,
"count": 455,
"self": 266.21230887886304,
"children": {
"TorchPPOOptimizer.update": {
"total": 160.34030945913037,
"count": 22749,
"self": 160.34030945913037
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.899998308857903e-07,
"count": 1,
"self": 7.899998308857903e-07
},
"TrainerController._save_models": {
"total": 0.09658459400088759,
"count": 1,
"self": 0.0012960020012542373,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09528859199963335,
"count": 1,
"self": 0.09528859199963335
}
}
}
}
}
}
}