pyramids / run_logs /timers.json
stinoco's picture
first try pyramids
41bbf8a
raw
history blame contribute delete
No virus
19.2 kB
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3908621668815613,
"min": 0.38359513878822327,
"max": 1.4590368270874023,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11807.1640625,
"min": 11415.791015625,
"max": 44261.33984375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989881.0,
"min": 29897.0,
"max": 989881.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989881.0,
"min": 29897.0,
"max": 989881.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6767734289169312,
"min": -0.10698991268873215,
"max": 0.7063584923744202,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 194.23397827148438,
"min": -25.356609344482422,
"max": 206.96304321289062,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.02619103156030178,
"min": -0.011437329463660717,
"max": 0.299892395734787,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.516826152801514,
"min": -3.2253270149230957,
"max": 72.2740707397461,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06528362203001356,
"min": 0.06506264701602048,
"max": 0.07333710537585872,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9139707084201898,
"min": 0.4930213629768227,
"max": 1.0716038072132505,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016887424933277845,
"min": 0.0004764606642033617,
"max": 0.016887424933277845,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23642394906588984,
"min": 0.005241067306236979,
"max": 0.23642394906588984,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.4584403710285675e-06,
"min": 7.4584403710285675e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010441816519439995,
"min": 0.00010441816519439995,
"max": 0.0035036387321204995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1024861142857143,
"min": 0.1024861142857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4348056000000002,
"min": 1.3886848,
"max": 2.5678794999999996,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000258362817142857,
"min": 0.000258362817142857,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036170794399999983,
"min": 0.0036170794399999983,
"max": 0.11681116205000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01021169126033783,
"min": 0.01021169126033783,
"max": 0.557073175907135,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14296367764472961,
"min": 0.14296367764472961,
"max": 3.8995120525360107,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 292.4271844660194,
"min": 261.88495575221236,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30120.0,
"min": 16824.0,
"max": 32131.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.707403828891424,
"min": -0.9999750521965325,
"max": 1.7204070628612442,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 177.5699982047081,
"min": -31.99920167028904,
"max": 194.4059981033206,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.707403828891424,
"min": -0.9999750521965325,
"max": 1.7204070628612442,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 177.5699982047081,
"min": -31.99920167028904,
"max": 194.4059981033206,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.030941790644227203,
"min": 0.030941790644227203,
"max": 11.355583505595432,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.2179462269996293,
"min": 3.117691306397319,
"max": 193.04491959512234,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678214761",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=new Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1678217253"
},
"total": 2492.4301391530003,
"count": 1,
"self": 0.5990411000002496,
"children": {
"run_training.setup": {
"total": 0.10817222899999024,
"count": 1,
"self": 0.10817222899999024
},
"TrainerController.start_learning": {
"total": 2491.722925824,
"count": 1,
"self": 1.518234807009776,
"children": {
"TrainerController._reset_env": {
"total": 9.287128419999988,
"count": 1,
"self": 9.287128419999988
},
"TrainerController.advance": {
"total": 2480.8281484669897,
"count": 64120,
"self": 1.6936405079850374,
"children": {
"env_step": {
"total": 1721.6618343609878,
"count": 64120,
"self": 1598.5353372779796,
"children": {
"SubprocessEnvManager._take_step": {
"total": 122.1850049990095,
"count": 64120,
"self": 5.092636900019045,
"children": {
"TorchPolicy.evaluate": {
"total": 117.09236809899045,
"count": 62578,
"self": 39.63625047299911,
"children": {
"TorchPolicy.sample_actions": {
"total": 77.45611762599134,
"count": 62578,
"self": 77.45611762599134
}
}
}
}
},
"workers": {
"total": 0.9414920839988099,
"count": 64120,
"self": 0.0,
"children": {
"worker_root": {
"total": 2485.651734907009,
"count": 64120,
"is_parallel": true,
"self": 1015.9665215099951,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0069005119999587805,
"count": 1,
"is_parallel": true,
"self": 0.003244921999964845,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0036555899999939356,
"count": 8,
"is_parallel": true,
"self": 0.0036555899999939356
}
}
},
"UnityEnvironment.step": {
"total": 0.04973598999998785,
"count": 1,
"is_parallel": true,
"self": 0.0005387390000350933,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048167799997145266,
"count": 1,
"is_parallel": true,
"self": 0.00048167799997145266
},
"communicator.exchange": {
"total": 0.04692355999998199,
"count": 1,
"is_parallel": true,
"self": 0.04692355999998199
},
"steps_from_proto": {
"total": 0.001792012999999315,
"count": 1,
"is_parallel": true,
"self": 0.00042532500003744644,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013666879999618686,
"count": 8,
"is_parallel": true,
"self": 0.0013666879999618686
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1469.685213397014,
"count": 64119,
"is_parallel": true,
"self": 33.606401556960236,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.789806417015598,
"count": 64119,
"is_parallel": true,
"self": 25.789806417015598
},
"communicator.exchange": {
"total": 1308.7842966580106,
"count": 64119,
"is_parallel": true,
"self": 1308.7842966580106
},
"steps_from_proto": {
"total": 101.50470876502749,
"count": 64119,
"is_parallel": true,
"self": 24.914584607213612,
"children": {
"_process_rank_one_or_two_observation": {
"total": 76.59012415781388,
"count": 512952,
"is_parallel": true,
"self": 76.59012415781388
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 757.4726735980167,
"count": 64120,
"self": 2.880847835018244,
"children": {
"process_trajectory": {
"total": 175.61445800100387,
"count": 64120,
"self": 175.41531721300385,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19914078800002244,
"count": 2,
"self": 0.19914078800002244
}
}
},
"_update_policy": {
"total": 578.9773677619946,
"count": 456,
"self": 224.34490202600097,
"children": {
"TorchPPOOptimizer.update": {
"total": 354.63246573599366,
"count": 22761,
"self": 354.63246573599366
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0200001270277426e-06,
"count": 1,
"self": 1.0200001270277426e-06
},
"TrainerController._save_models": {
"total": 0.08941311000035057,
"count": 1,
"self": 0.001447168000140664,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0879659420002099,
"count": 1,
"self": 0.0879659420002099
}
}
}
}
}
}
}