osbm's picture
initial commit
9ee5f9e
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4059933125972748,
"min": 0.39715439081192017,
"max": 1.391348123550415,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12205.783203125,
"min": 11901.9228515625,
"max": 42207.9375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989972.0,
"min": 29896.0,
"max": 989972.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989972.0,
"min": 29896.0,
"max": 989972.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.46790915727615356,
"min": -0.1095510721206665,
"max": 0.4824177026748657,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 127.73919677734375,
"min": -26.40180778503418,
"max": 132.6648712158203,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.052653852850198746,
"min": -0.06417358666658401,
"max": 0.36108410358428955,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -14.374502182006836,
"min": -16.492610931396484,
"max": 85.57693481445312,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07295300758033547,
"min": 0.06457576945716803,
"max": 0.07571827622368271,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0213421061246966,
"min": 0.6814644860131444,
"max": 1.0510492202204964,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01587568756804103,
"min": 0.00016397854359953543,
"max": 0.01587568756804103,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22225962595257442,
"min": 0.0021317210667939606,
"max": 0.22225962595257442,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.366597544500002e-06,
"min": 7.366597544500002e-06,
"max": 0.0002948651017116333,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010313236562300003,
"min": 0.00010313236562300003,
"max": 0.0033821882726039997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1024555,
"min": 0.1024555,
"max": 0.1982883666666667,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.434377,
"min": 1.434377,
"max": 2.527396,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002553044500000001,
"min": 0.0002553044500000001,
"max": 0.009829007829999998,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003574262300000001,
"min": 0.003574262300000001,
"max": 0.11276686039999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.016512218862771988,
"min": 0.016512218862771988,
"max": 0.4444085359573364,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.23117107152938843,
"min": 0.23117107152938843,
"max": 3.9996767044067383,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 396.82894736842104,
"min": 382.13157894736844,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30159.0,
"min": 16439.0,
"max": 32427.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5242026057094336,
"min": -0.9999125516042113,
"max": 1.5242026057094336,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 115.83939803391695,
"min": -31.997201651334763,
"max": 115.83939803391695,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5242026057094336,
"min": -0.9999125516042113,
"max": 1.5242026057094336,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 115.83939803391695,
"min": -31.997201651334763,
"max": 115.83939803391695,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06795989735186786,
"min": 0.06590360357720208,
"max": 9.615516344414038,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.1649521987419575,
"min": 5.008673871867359,
"max": 163.46377785503864,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680692242",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1680694300"
},
"total": 2058.175234011,
"count": 1,
"self": 0.4242503720001878,
"children": {
"run_training.setup": {
"total": 0.10575246900043567,
"count": 1,
"self": 0.10575246900043567
},
"TrainerController.start_learning": {
"total": 2057.6452311699995,
"count": 1,
"self": 1.393002608034294,
"children": {
"TrainerController._reset_env": {
"total": 4.364087276999726,
"count": 1,
"self": 4.364087276999726
},
"TrainerController.advance": {
"total": 2051.796258510965,
"count": 63608,
"self": 1.397841679100111,
"children": {
"env_step": {
"total": 1450.4963841819126,
"count": 63608,
"self": 1345.9276786417827,
"children": {
"SubprocessEnvManager._take_step": {
"total": 103.76707985005169,
"count": 63608,
"self": 4.65658583204231,
"children": {
"TorchPolicy.evaluate": {
"total": 99.11049401800938,
"count": 62559,
"self": 99.11049401800938
}
}
},
"workers": {
"total": 0.8016256900782537,
"count": 63608,
"self": 0.0,
"children": {
"worker_root": {
"total": 2052.885664612976,
"count": 63608,
"is_parallel": true,
"self": 813.1550638510248,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018020560000877595,
"count": 1,
"is_parallel": true,
"self": 0.0005594770004790917,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012425789996086678,
"count": 8,
"is_parallel": true,
"self": 0.0012425789996086678
}
}
},
"UnityEnvironment.step": {
"total": 0.11026811200008524,
"count": 1,
"is_parallel": true,
"self": 0.0005845370001225092,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00042601399991326616,
"count": 1,
"is_parallel": true,
"self": 0.00042601399991326616
},
"communicator.exchange": {
"total": 0.09954681099998197,
"count": 1,
"is_parallel": true,
"self": 0.09954681099998197
},
"steps_from_proto": {
"total": 0.009710750000067492,
"count": 1,
"is_parallel": true,
"self": 0.008496582000589115,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012141679994783772,
"count": 8,
"is_parallel": true,
"self": 0.0012141679994783772
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1239.7306007619513,
"count": 63607,
"is_parallel": true,
"self": 31.617140868037495,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.3351962899701,
"count": 63607,
"is_parallel": true,
"self": 22.3351962899701
},
"communicator.exchange": {
"total": 1095.3404195850026,
"count": 63607,
"is_parallel": true,
"self": 1095.3404195850026
},
"steps_from_proto": {
"total": 90.43784401894118,
"count": 63607,
"is_parallel": true,
"self": 19.14610923491864,
"children": {
"_process_rank_one_or_two_observation": {
"total": 71.29173478402254,
"count": 508856,
"is_parallel": true,
"self": 71.29173478402254
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 599.9020326499522,
"count": 63608,
"self": 2.503271927971582,
"children": {
"process_trajectory": {
"total": 101.13825509498338,
"count": 63608,
"self": 100.93733062598358,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20092446899980132,
"count": 2,
"self": 0.20092446899980132
}
}
},
"_update_policy": {
"total": 496.2605056269972,
"count": 452,
"self": 316.4680698309776,
"children": {
"TorchPPOOptimizer.update": {
"total": 179.79243579601962,
"count": 22770,
"self": 179.79243579601962
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.770002750679851e-07,
"count": 1,
"self": 8.770002750679851e-07
},
"TrainerController._save_models": {
"total": 0.09188189700034854,
"count": 1,
"self": 0.0014045910011191154,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09047730599922943,
"count": 1,
"self": 0.09047730599922943
}
}
}
}
}
}
}