ppo-Pyramids / run_logs /timers.json
ThePianist's picture
First training of SnowballTarget
becb14c
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3716318607330322,
"min": 0.3716318607330322,
"max": 1.3881765604019165,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11119.2255859375,
"min": 11119.2255859375,
"max": 42111.72265625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989948.0,
"min": 29952.0,
"max": 989948.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989948.0,
"min": 29952.0,
"max": 989948.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5520221590995789,
"min": -0.0807473361492157,
"max": 0.6285156011581421,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 155.6702423095703,
"min": -19.540855407714844,
"max": 180.38397216796875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.006029200274497271,
"min": -0.012511473149061203,
"max": 0.5308569073677063,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.7002344131469727,
"min": -3.353074789047241,
"max": 127.93650817871094,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06681013170080925,
"min": 0.06439228096217513,
"max": 0.07316651910410396,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9353418438113295,
"min": 0.5069407713920648,
"max": 1.0781885947365606,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01794653715755668,
"min": 0.0007959364282679759,
"max": 0.01794653715755668,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.25125152020579355,
"min": 0.00955123713921571,
"max": 0.2575851337908306,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.678533154807148e-06,
"min": 7.678533154807148e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010749946416730006,
"min": 0.00010749946416730006,
"max": 0.0036335446888185,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10255947857142855,
"min": 0.10255947857142855,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4358326999999997,
"min": 1.3886848,
"max": 2.6111814999999994,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026569190928571446,
"min": 0.00026569190928571446,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003719686730000002,
"min": 0.003719686730000002,
"max": 0.12113703185,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.015160898678004742,
"min": 0.015160898678004742,
"max": 0.6804297566413879,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.21225258708000183,
"min": 0.21225258708000183,
"max": 4.763008117675781,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 317.34375,
"min": 310.6979166666667,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30465.0,
"min": 15984.0,
"max": 33097.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5784458133857697,
"min": -1.0000000521540642,
"max": 1.6893020641679566,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 151.5307980850339,
"min": -29.772801652550697,
"max": 162.17299816012383,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5784458133857697,
"min": -1.0000000521540642,
"max": 1.6893020641679566,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 151.5307980850339,
"min": -29.772801652550697,
"max": 162.17299816012383,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.049571789770804266,
"min": 0.049571789770804266,
"max": 13.399051713757217,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.75889181799721,
"min": 4.75889181799721,
"max": 214.38482742011547,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679513816",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679516006"
},
"total": 2190.490558073,
"count": 1,
"self": 0.48490632099947106,
"children": {
"run_training.setup": {
"total": 0.17927686699999867,
"count": 1,
"self": 0.17927686699999867
},
"TrainerController.start_learning": {
"total": 2189.8263748850004,
"count": 1,
"self": 1.5164754499974151,
"children": {
"TrainerController._reset_env": {
"total": 6.109026415000017,
"count": 1,
"self": 6.109026415000017
},
"TrainerController.advance": {
"total": 2182.1061725660024,
"count": 63965,
"self": 1.582868108011553,
"children": {
"env_step": {
"total": 1555.813771851005,
"count": 63965,
"self": 1439.362561247073,
"children": {
"SubprocessEnvManager._take_step": {
"total": 115.55060325494514,
"count": 63965,
"self": 4.801321585867299,
"children": {
"TorchPolicy.evaluate": {
"total": 110.74928166907785,
"count": 62548,
"self": 110.74928166907785
}
}
},
"workers": {
"total": 0.9006073489867958,
"count": 63965,
"self": 0.0,
"children": {
"worker_root": {
"total": 2184.6190477709947,
"count": 63965,
"is_parallel": true,
"self": 867.2697527770965,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018023000000084721,
"count": 1,
"is_parallel": true,
"self": 0.0005373500002860965,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012649499997223757,
"count": 8,
"is_parallel": true,
"self": 0.0012649499997223757
}
}
},
"UnityEnvironment.step": {
"total": 0.04486401300005127,
"count": 1,
"is_parallel": true,
"self": 0.0005117719999816472,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048426800003653625,
"count": 1,
"is_parallel": true,
"self": 0.00048426800003653625
},
"communicator.exchange": {
"total": 0.04228749699996115,
"count": 1,
"is_parallel": true,
"self": 0.04228749699996115
},
"steps_from_proto": {
"total": 0.0015804760000719398,
"count": 1,
"is_parallel": true,
"self": 0.00034896300053333107,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012315129995386087,
"count": 8,
"is_parallel": true,
"self": 0.0012315129995386087
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1317.3492949938982,
"count": 63964,
"is_parallel": true,
"self": 31.81441099199469,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.87449696990916,
"count": 63964,
"is_parallel": true,
"self": 22.87449696990916
},
"communicator.exchange": {
"total": 1168.9190391709853,
"count": 63964,
"is_parallel": true,
"self": 1168.9190391709853
},
"steps_from_proto": {
"total": 93.74134786100899,
"count": 63964,
"is_parallel": true,
"self": 20.54246162596428,
"children": {
"_process_rank_one_or_two_observation": {
"total": 73.19888623504471,
"count": 511712,
"is_parallel": true,
"self": 73.19888623504471
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 624.7095326069859,
"count": 63965,
"self": 2.959575394979538,
"children": {
"process_trajectory": {
"total": 119.7156706589999,
"count": 63965,
"self": 119.51752013500027,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1981505239996295,
"count": 2,
"self": 0.1981505239996295
}
}
},
"_update_policy": {
"total": 502.0342865530065,
"count": 456,
"self": 316.3847812570316,
"children": {
"TorchPPOOptimizer.update": {
"total": 185.6495052959749,
"count": 22821,
"self": 185.6495052959749
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0630001270328648e-06,
"count": 1,
"self": 1.0630001270328648e-06
},
"TrainerController._save_models": {
"total": 0.09469939100017655,
"count": 1,
"self": 0.001404193000325904,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09329519799985064,
"count": 1,
"self": 0.09329519799985064
}
}
}
}
}
}
}