ppo-Pyramids / run_logs /timers.json
Zarzd's picture
First Push
183ef78 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6499285101890564,
"min": 0.6499285101890564,
"max": 1.458738923072815,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 19404.265625,
"min": 19404.265625,
"max": 44252.3046875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989913.0,
"min": 29952.0,
"max": 989913.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989913.0,
"min": 29952.0,
"max": 989913.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.36428558826446533,
"min": -0.25685468316078186,
"max": 0.38812872767448425,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 94.7142562866211,
"min": -60.87455749511719,
"max": 102.85411071777344,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.3212348520755768,
"min": 0.006444373168051243,
"max": 0.5900049805641174,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 83.52106475830078,
"min": 1.6497595310211182,
"max": 139.8311767578125,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0698883832762173,
"min": 0.0655892882570557,
"max": 0.07408280933668493,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9784373658670422,
"min": 0.5008351513538871,
"max": 1.070999363262672,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.02414444522582926,
"min": 6.870102689748416e-05,
"max": 0.02414444522582926,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.3380222331616096,
"min": 0.0009618143765647783,
"max": 0.3380222331616096,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.57921175934286e-06,
"min": 7.57921175934286e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010610896463080004,
"min": 0.00010610896463080004,
"max": 0.0035069804310066,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10252637142857145,
"min": 0.10252637142857145,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4353692000000002,
"min": 1.3886848,
"max": 2.5689934,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002623845057142858,
"min": 0.0002623845057142858,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036733830800000015,
"min": 0.0036733830800000015,
"max": 0.11692244066,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.016326773911714554,
"min": 0.014099876396358013,
"max": 0.4648396074771881,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.22857482731342316,
"min": 0.19739827513694763,
"max": 3.2538771629333496,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 452.60655737704917,
"min": 452.60655737704917,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27609.0,
"min": 15984.0,
"max": 32696.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.3834065367452433,
"min": -1.0000000521540642,
"max": 1.3834065367452433,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 84.38779874145985,
"min": -31.99480167031288,
"max": 89.7881980985403,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.3834065367452433,
"min": -1.0000000521540642,
"max": 1.3834065367452433,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 84.38779874145985,
"min": -31.99480167031288,
"max": 89.7881980985403,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.07580724178210328,
"min": 0.07580724178210328,
"max": 9.374333899468184,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.6242417487083,
"min": 4.181749406852759,
"max": 149.98934239149094,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1738667675",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1738670772"
},
"total": 3097.681021646,
"count": 1,
"self": 1.1234495509997942,
"children": {
"run_training.setup": {
"total": 0.031750335999959134,
"count": 1,
"self": 0.031750335999959134
},
"TrainerController.start_learning": {
"total": 3096.525821759,
"count": 1,
"self": 2.501492321940532,
"children": {
"TrainerController._reset_env": {
"total": 2.746037672999819,
"count": 1,
"self": 2.746037672999819
},
"TrainerController.advance": {
"total": 3091.12402692906,
"count": 63398,
"self": 2.5999114520382136,
"children": {
"env_step": {
"total": 2011.2371428960137,
"count": 63398,
"self": 1844.8661358879335,
"children": {
"SubprocessEnvManager._take_step": {
"total": 164.98940622805685,
"count": 63398,
"self": 7.306160700069086,
"children": {
"TorchPolicy.evaluate": {
"total": 157.68324552798776,
"count": 62562,
"self": 157.68324552798776
}
}
},
"workers": {
"total": 1.381600780023291,
"count": 63398,
"self": 0.0,
"children": {
"worker_root": {
"total": 3088.690723013074,
"count": 63398,
"is_parallel": true,
"self": 1422.0819592690873,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0032984950000809476,
"count": 1,
"is_parallel": true,
"self": 0.0011395290000564273,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021589660000245203,
"count": 8,
"is_parallel": true,
"self": 0.0021589660000245203
}
}
},
"UnityEnvironment.step": {
"total": 0.06721743499997501,
"count": 1,
"is_parallel": true,
"self": 0.0008649499998227839,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0007009089999883145,
"count": 1,
"is_parallel": true,
"self": 0.0007009089999883145
},
"communicator.exchange": {
"total": 0.06329408000010517,
"count": 1,
"is_parallel": true,
"self": 0.06329408000010517
},
"steps_from_proto": {
"total": 0.0023574960000587453,
"count": 1,
"is_parallel": true,
"self": 0.0004886879999048688,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018688080001538765,
"count": 8,
"is_parallel": true,
"self": 0.0018688080001538765
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1666.6087637439869,
"count": 63397,
"is_parallel": true,
"self": 44.96348121878418,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 31.321608409073633,
"count": 63397,
"is_parallel": true,
"self": 31.321608409073633
},
"communicator.exchange": {
"total": 1464.1476928310874,
"count": 63397,
"is_parallel": true,
"self": 1464.1476928310874
},
"steps_from_proto": {
"total": 126.17598128504164,
"count": 63397,
"is_parallel": true,
"self": 27.36397507294373,
"children": {
"_process_rank_one_or_two_observation": {
"total": 98.81200621209791,
"count": 507176,
"is_parallel": true,
"self": 98.81200621209791
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1077.2869725810085,
"count": 63398,
"self": 4.545008039973709,
"children": {
"process_trajectory": {
"total": 163.4348043840355,
"count": 63398,
"self": 162.97399729303515,
"children": {
"RLTrainer._checkpoint": {
"total": 0.46080709100033346,
"count": 2,
"self": 0.46080709100033346
}
}
},
"_update_policy": {
"total": 909.3071601569993,
"count": 446,
"self": 367.22355963805126,
"children": {
"TorchPPOOptimizer.update": {
"total": 542.0836005189481,
"count": 22818,
"self": 542.0836005189481
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1060001270379871e-06,
"count": 1,
"self": 1.1060001270379871e-06
},
"TrainerController._save_models": {
"total": 0.15426372899946728,
"count": 1,
"self": 0.0028620759994737455,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15140165299999353,
"count": 1,
"self": 0.15140165299999353
}
}
}
}
}
}
}