asubiabre's picture
First Push
2288c42
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4190213084220886,
"min": 0.4190213084220886,
"max": 1.505911111831665,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12490.1875,
"min": 12490.1875,
"max": 45683.3203125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989872.0,
"min": 29952.0,
"max": 989872.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989872.0,
"min": 29952.0,
"max": 989872.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.48404812812805176,
"min": -0.08189662545919418,
"max": 0.6003912687301636,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 132.1451416015625,
"min": -19.81898307800293,
"max": 169.31033325195312,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.006385415326803923,
"min": 0.0049592116847634315,
"max": 0.273568332195282,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.7432184219360352,
"min": 1.3340280055999756,
"max": 65.92996978759766,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06757521334102853,
"min": 0.06461229398908992,
"max": 0.0746009397633585,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.47302649338719976,
"min": 0.298403759053434,
"max": 0.5734980662281018,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.019140020994133384,
"min": 2.424145398540661e-05,
"max": 0.019140020994133384,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.13398014695893368,
"min": 0.00014544872391243966,
"max": 0.13863446111402786,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.378854683271426e-06,
"min": 7.378854683271426e-06,
"max": 0.00029544960151679995,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 5.165198278289998e-05,
"min": 5.165198278289998e-05,
"max": 0.0019438587520470996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10245958571428572,
"min": 0.10245958571428572,
"max": 0.1984832,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 0.7172171,
"min": 0.7172171,
"max": 1.3479529000000001,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025571261285714286,
"min": 0.00025571261285714286,
"max": 0.009848471680000002,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0017899882899999999,
"min": 0.0017899882899999999,
"max": 0.06480049471,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.00799519382417202,
"min": 0.00799519382417202,
"max": 0.40666455030441284,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.05596635490655899,
"min": 0.05596635490655899,
"max": 1.6266582012176514,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 336.9318181818182,
"min": 308.70526315789476,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29650.0,
"min": 15984.0,
"max": 34298.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.617602243850177,
"min": -1.0000000521540642,
"max": 1.6912947250039954,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 142.34899745881557,
"min": -31.996001660823822,
"max": 160.67299887537956,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.617602243850177,
"min": -1.0000000521540642,
"max": 1.6912947250039954,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 142.34899745881557,
"min": -31.996001660823822,
"max": 160.67299887537956,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.02770611367271737,
"min": 0.026123846878947102,
"max": 8.458508744835854,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.4381380031991284,
"min": 2.422532507684082,
"max": 135.33613991737366,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1681316970",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1681320051"
},
"total": 3081.14246312,
"count": 1,
"self": 0.5552134929998829,
"children": {
"run_training.setup": {
"total": 0.21130279400000518,
"count": 1,
"self": 0.21130279400000518
},
"TrainerController.start_learning": {
"total": 3080.375946833,
"count": 1,
"self": 1.8684813209770255,
"children": {
"TrainerController._reset_env": {
"total": 1.504006961000016,
"count": 1,
"self": 1.504006961000016
},
"TrainerController.advance": {
"total": 3076.8727257960227,
"count": 63750,
"self": 2.057924364039536,
"children": {
"env_step": {
"total": 1795.2252732590068,
"count": 63750,
"self": 1667.1199278410438,
"children": {
"SubprocessEnvManager._take_step": {
"total": 126.97075461996718,
"count": 63750,
"self": 5.4964485800822445,
"children": {
"TorchPolicy.evaluate": {
"total": 121.47430603988494,
"count": 62562,
"self": 121.47430603988494
}
}
},
"workers": {
"total": 1.1345907979958838,
"count": 63750,
"self": 0.0,
"children": {
"worker_root": {
"total": 3074.806628449011,
"count": 63750,
"is_parallel": true,
"self": 1545.4351803830114,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005472853000014766,
"count": 1,
"is_parallel": true,
"self": 0.003205018000016935,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002267834999997831,
"count": 8,
"is_parallel": true,
"self": 0.002267834999997831
}
}
},
"UnityEnvironment.step": {
"total": 0.10589841000000888,
"count": 1,
"is_parallel": true,
"self": 0.0006653920000303515,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005119489999856341,
"count": 1,
"is_parallel": true,
"self": 0.0005119489999856341
},
"communicator.exchange": {
"total": 0.10285576899997295,
"count": 1,
"is_parallel": true,
"self": 0.10285576899997295
},
"steps_from_proto": {
"total": 0.0018653000000199427,
"count": 1,
"is_parallel": true,
"self": 0.00041095699998550117,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014543430000344415,
"count": 8,
"is_parallel": true,
"self": 0.0014543430000344415
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1529.3714480659996,
"count": 63749,
"is_parallel": true,
"self": 39.96980703793838,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.094747244009113,
"count": 63749,
"is_parallel": true,
"self": 22.094747244009113
},
"communicator.exchange": {
"total": 1351.842619247059,
"count": 63749,
"is_parallel": true,
"self": 1351.842619247059
},
"steps_from_proto": {
"total": 115.46427453699306,
"count": 63749,
"is_parallel": true,
"self": 25.150309940280692,
"children": {
"_process_rank_one_or_two_observation": {
"total": 90.31396459671237,
"count": 509992,
"is_parallel": true,
"self": 90.31396459671237
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1279.5895281729763,
"count": 63750,
"self": 3.306275357964296,
"children": {
"process_trajectory": {
"total": 135.4343795760168,
"count": 63750,
"self": 135.16082114701692,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2735584289998769,
"count": 2,
"self": 0.2735584289998769
}
}
},
"_update_policy": {
"total": 1140.848873238995,
"count": 233,
"self": 380.7742060779501,
"children": {
"TorchPPOOptimizer.update": {
"total": 760.074667161045,
"count": 23016,
"self": 760.074667161045
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1050001376133878e-06,
"count": 1,
"self": 1.1050001376133878e-06
},
"TrainerController._save_models": {
"total": 0.13073165000014342,
"count": 1,
"self": 0.0023674980002397206,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1283641519999037,
"count": 1,
"self": 0.1283641519999037
}
}
}
}
}
}
}