chudp's picture
First Push
34425cb verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6608625650405884,
"min": 0.6520163416862488,
"max": 1.4733320474624634,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 19836.451171875,
"min": 19435.302734375,
"max": 44695.0,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989996.0,
"min": 29952.0,
"max": 989996.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989996.0,
"min": 29952.0,
"max": 989996.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.0531952828168869,
"min": -0.1175866425037384,
"max": 0.09910526871681213,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 13.13923454284668,
"min": -28.338380813598633,
"max": 24.578105926513672,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.0028643980622291565,
"min": -0.014672333374619484,
"max": 0.20042133331298828,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -0.7075062990188599,
"min": -3.6387386322021484,
"max": 48.10111999511719,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06941136405464127,
"min": 0.06301330679923657,
"max": 0.0718804516339432,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9717590967649777,
"min": 0.5024250880249739,
"max": 1.0543235874541934,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.00750516156693709,
"min": 0.00021090192975234916,
"max": 0.008246933069215116,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.10507226193711926,
"min": 0.002952627016532888,
"max": 0.11545706296901162,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.279319002164286e-06,
"min": 7.279319002164286e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010191046603030001,
"min": 0.00010191046603030001,
"max": 0.0035073002308999997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10242640714285715,
"min": 0.10242640714285715,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4339697000000002,
"min": 1.3886848,
"max": 2.5691,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002523980735714286,
"min": 0.0002523980735714286,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035335730300000002,
"min": 0.0035335730300000002,
"max": 0.11693309,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013306832872331142,
"min": 0.013306832872331142,
"max": 0.426264226436615,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.18629565834999084,
"min": 0.18629565834999084,
"max": 2.98384952545166,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 798.6756756756756,
"min": 769.8780487804878,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29551.0,
"min": 15984.0,
"max": 33183.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.22809185369594678,
"min": -1.0000000521540642,
"max": 0.3604423859805772,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 8.43939858675003,
"min": -31.996801674365997,
"max": 12.423798389732838,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.22809185369594678,
"min": -1.0000000521540642,
"max": 0.3604423859805772,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 8.43939858675003,
"min": -31.996801674365997,
"max": 12.423798389732838,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.1103283313128191,
"min": 0.10874850682519151,
"max": 8.698082627728581,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.082148258574307,
"min": 3.5887007252313197,
"max": 139.1693220436573,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1751285979",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1751287943"
},
"total": 1964.0221399559996,
"count": 1,
"self": 0.47537400299916044,
"children": {
"run_training.setup": {
"total": 0.019126350000078673,
"count": 1,
"self": 0.019126350000078673
},
"TrainerController.start_learning": {
"total": 1963.5276396030004,
"count": 1,
"self": 1.1122876189633644,
"children": {
"TrainerController._reset_env": {
"total": 2.2631129450001026,
"count": 1,
"self": 2.2631129450001026
},
"TrainerController.advance": {
"total": 1960.070935314037,
"count": 63222,
"self": 1.2638017998910982,
"children": {
"env_step": {
"total": 1325.4295653401864,
"count": 63222,
"self": 1190.1567859761526,
"children": {
"SubprocessEnvManager._take_step": {
"total": 134.57986645296933,
"count": 63222,
"self": 4.169371226069416,
"children": {
"TorchPolicy.evaluate": {
"total": 130.4104952268999,
"count": 62551,
"self": 130.4104952268999
}
}
},
"workers": {
"total": 0.6929129110644681,
"count": 63222,
"self": 0.0,
"children": {
"worker_root": {
"total": 1959.5511119101275,
"count": 63222,
"is_parallel": true,
"self": 870.4864729770602,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017380379999849538,
"count": 1,
"is_parallel": true,
"self": 0.0005477299996528018,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001190308000332152,
"count": 8,
"is_parallel": true,
"self": 0.001190308000332152
}
}
},
"UnityEnvironment.step": {
"total": 0.04583969699979207,
"count": 1,
"is_parallel": true,
"self": 0.0005045850002716179,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004284110000298824,
"count": 1,
"is_parallel": true,
"self": 0.0004284110000298824
},
"communicator.exchange": {
"total": 0.04338716799975373,
"count": 1,
"is_parallel": true,
"self": 0.04338716799975373
},
"steps_from_proto": {
"total": 0.001519532999736839,
"count": 1,
"is_parallel": true,
"self": 0.0003227639999749954,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011967689997618436,
"count": 8,
"is_parallel": true,
"self": 0.0011967689997618436
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1089.0646389330673,
"count": 63221,
"is_parallel": true,
"self": 30.103232349101745,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 21.379260087759576,
"count": 63221,
"is_parallel": true,
"self": 21.379260087759576
},
"communicator.exchange": {
"total": 949.0628082010676,
"count": 63221,
"is_parallel": true,
"self": 949.0628082010676
},
"steps_from_proto": {
"total": 88.51933829513837,
"count": 63221,
"is_parallel": true,
"self": 17.000668136752665,
"children": {
"_process_rank_one_or_two_observation": {
"total": 71.5186701583857,
"count": 505768,
"is_parallel": true,
"self": 71.5186701583857
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 633.3775681739594,
"count": 63222,
"self": 2.1893480729108887,
"children": {
"process_trajectory": {
"total": 116.54729446803958,
"count": 63222,
"self": 116.31176654503906,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23552792300051806,
"count": 2,
"self": 0.23552792300051806
}
}
},
"_update_policy": {
"total": 514.6409256330089,
"count": 447,
"self": 287.1395376509572,
"children": {
"TorchPPOOptimizer.update": {
"total": 227.5013879820517,
"count": 22815,
"self": 227.5013879820517
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.69999746303074e-07,
"count": 1,
"self": 9.69999746303074e-07
},
"TrainerController._save_models": {
"total": 0.08130275500025164,
"count": 1,
"self": 0.001134723001086968,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08016803199916467,
"count": 1,
"self": 0.08016803199916467
}
}
}
}
}
}
}