ppo-Pyramid / run_logs /timers.json
Ellbendls's picture
First Push
5f8791b
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5505513548851013,
"min": 0.5505513548851013,
"max": 1.4167014360427856,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 16384.408203125,
"min": 16384.408203125,
"max": 42977.0546875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989983.0,
"min": 29952.0,
"max": 989983.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989983.0,
"min": 29952.0,
"max": 989983.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.1219722181558609,
"min": -0.10626282542943954,
"max": 0.20608457922935486,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 30.73699951171875,
"min": -25.60934066772461,
"max": 52.345481872558594,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.07707514613866806,
"min": -0.06596412509679794,
"max": 0.5060542821884155,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 19.422937393188477,
"min": -16.7548885345459,
"max": 119.93486022949219,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07018037533584993,
"min": 0.06451662967231936,
"max": 0.07363603323465213,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.982525254701899,
"min": 0.4848458311492417,
"max": 1.104540498519782,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.008825935397214104,
"min": 5.982214327082022e-05,
"max": 0.012629955665404145,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.12356309556099744,
"min": 0.0007776878625206629,
"max": 0.16009759975956211,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.646476022635719e-06,
"min": 7.646476022635719e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010705066431690006,
"min": 0.00010705066431690006,
"max": 0.0032242867252377993,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10254879285714287,
"min": 0.10254879285714287,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4356831,
"min": 1.3691136000000002,
"max": 2.3081813000000007,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026462440642857155,
"min": 0.00026462440642857155,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037047416900000017,
"min": 0.0037047416900000017,
"max": 0.10748874378000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.015060283243656158,
"min": 0.015060283243656158,
"max": 0.5361228585243225,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.21084396541118622,
"min": 0.21084396541118622,
"max": 3.7528598308563232,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 679.0227272727273,
"min": 613.2340425531914,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29877.0,
"min": 15984.0,
"max": 32653.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.5211860111286474,
"min": -1.0000000521540642,
"max": 0.8759574097521762,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 22.410998478531837,
"min": -32.000001668930054,
"max": 41.16999825835228,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.5211860111286474,
"min": -1.0000000521540642,
"max": 0.8759574097521762,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 22.410998478531837,
"min": -32.000001668930054,
"max": 41.16999825835228,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.1064517994811975,
"min": 0.09769848065990756,
"max": 11.091340923681855,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.577427377691492,
"min": 4.549132539192215,
"max": 177.46145477890968,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1687873896",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1687875893"
},
"total": 1997.097146092,
"count": 1,
"self": 1.2835085090000575,
"children": {
"run_training.setup": {
"total": 0.04179961799991361,
"count": 1,
"self": 0.04179961799991361
},
"TrainerController.start_learning": {
"total": 1995.771837965,
"count": 1,
"self": 1.3094397830095659,
"children": {
"TrainerController._reset_env": {
"total": 4.965006517999882,
"count": 1,
"self": 4.965006517999882
},
"TrainerController.advance": {
"total": 1989.3584154219902,
"count": 63222,
"self": 1.3741559219852206,
"children": {
"env_step": {
"total": 1355.3463093400565,
"count": 63222,
"self": 1246.398699968051,
"children": {
"SubprocessEnvManager._take_step": {
"total": 108.15227151904355,
"count": 63222,
"self": 4.671855593092005,
"children": {
"TorchPolicy.evaluate": {
"total": 103.48041592595155,
"count": 62579,
"self": 103.48041592595155
}
}
},
"workers": {
"total": 0.7953378529618931,
"count": 63222,
"self": 0.0,
"children": {
"worker_root": {
"total": 1990.9530861110247,
"count": 63222,
"is_parallel": true,
"self": 855.5327435499566,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018385179996585066,
"count": 1,
"is_parallel": true,
"self": 0.0005915100000493112,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012470079996091954,
"count": 8,
"is_parallel": true,
"self": 0.0012470079996091954
}
}
},
"UnityEnvironment.step": {
"total": 0.10106155199991917,
"count": 1,
"is_parallel": true,
"self": 0.0006685339994874084,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005184390001886641,
"count": 1,
"is_parallel": true,
"self": 0.0005184390001886641
},
"communicator.exchange": {
"total": 0.09784881099994891,
"count": 1,
"is_parallel": true,
"self": 0.09784881099994891
},
"steps_from_proto": {
"total": 0.0020257680002941925,
"count": 1,
"is_parallel": true,
"self": 0.00039084199988792534,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016349260004062671,
"count": 8,
"is_parallel": true,
"self": 0.0016349260004062671
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1135.420342561068,
"count": 63221,
"is_parallel": true,
"self": 32.729480024042914,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.11452285296855,
"count": 63221,
"is_parallel": true,
"self": 23.11452285296855
},
"communicator.exchange": {
"total": 980.3362975710629,
"count": 63221,
"is_parallel": true,
"self": 980.3362975710629
},
"steps_from_proto": {
"total": 99.2400421129937,
"count": 63221,
"is_parallel": true,
"self": 19.226253153165544,
"children": {
"_process_rank_one_or_two_observation": {
"total": 80.01378895982816,
"count": 505768,
"is_parallel": true,
"self": 80.01378895982816
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 632.6379501599486,
"count": 63222,
"self": 2.438642733942288,
"children": {
"process_trajectory": {
"total": 105.6273308300074,
"count": 63222,
"self": 105.37466987700782,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2526609529995767,
"count": 2,
"self": 0.2526609529995767
}
}
},
"_update_policy": {
"total": 524.5719765959989,
"count": 433,
"self": 336.15857428299796,
"children": {
"TorchPPOOptimizer.update": {
"total": 188.41340231300092,
"count": 22818,
"self": 188.41340231300092
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4550005289493129e-06,
"count": 1,
"self": 1.4550005289493129e-06
},
"TrainerController._save_models": {
"total": 0.13897478699982457,
"count": 1,
"self": 0.0018483389994798927,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13712644800034468,
"count": 1,
"self": 0.13712644800034468
}
}
}
}
}
}
}