ppo-PyramidsRND / run_logs /timers.json
polyconnect's picture
First Push
a949186 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.2011803835630417,
"min": 0.193767711520195,
"max": 1.5320097208023071,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 6054.724609375,
"min": 5760.32666015625,
"max": 46475.046875,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999931.0,
"min": 29952.0,
"max": 2999931.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999931.0,
"min": 29952.0,
"max": 2999931.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7848445177078247,
"min": -0.1989630162715912,
"max": 0.8722542524337769,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 233.09881591796875,
"min": -47.15423583984375,
"max": 264.29302978515625,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0014284075004979968,
"min": -0.015381427481770515,
"max": 0.24416615068912506,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.4242370128631592,
"min": -4.59904670715332,
"max": 59.08820724487305,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06740375975043408,
"min": 0.06378558602277647,
"max": 0.0748494335161153,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0110563962565113,
"min": 0.5239460346128071,
"max": 1.1151226392248645,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015133762459218917,
"min": 0.0005643838093559204,
"max": 0.017800361092117745,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22700643688828376,
"min": 0.005643838093559204,
"max": 0.24920505528964843,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.4831661723111103e-06,
"min": 1.4831661723111103e-06,
"max": 0.00029838354339596195,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.2247492584666655e-05,
"min": 2.2247492584666655e-05,
"max": 0.0037596765467745328,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10049435555555555,
"min": 0.10049435555555555,
"max": 0.19946118095238097,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5074153333333333,
"min": 1.3962282666666668,
"max": 2.752535466666667,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 5.938611999999997e-05,
"min": 5.938611999999997e-05,
"max": 0.009946171977142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008907917999999996,
"min": 0.0008907917999999996,
"max": 0.12533722412,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.005414103157818317,
"min": 0.005414103157818317,
"max": 0.40081527829170227,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.08121154457330704,
"min": 0.07452377676963806,
"max": 2.8057069778442383,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 242.10236220472441,
"min": 206.19285714285715,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30747.0,
"min": 15984.0,
"max": 33275.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.741038083793625,
"min": -1.0000000521540642,
"max": 1.7938071282846586,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 219.37079855799675,
"min": -30.999801620841026,
"max": 252.05299857258797,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.741038083793625,
"min": -1.0000000521540642,
"max": 1.7938071282846586,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 219.37079855799675,
"min": -30.999801620841026,
"max": 252.05299857258797,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.01365075544869782,
"min": 0.012193944957669773,
"max": 8.253531465306878,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.7199951865359253,
"min": 1.578216268455435,
"max": 132.05650344491005,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1718385532",
"python_version": "3.10.14 (main, Jun 14 2024, 18:23:26) [GCC 11.4.0]",
"command_line_arguments": "/home/ivan/.env/hf310/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1718388986"
},
"total": 3454.202459977998,
"count": 1,
"self": 0.2730079870088957,
"children": {
"run_training.setup": {
"total": 0.016478620993439108,
"count": 1,
"self": 0.016478620993439108
},
"TrainerController.start_learning": {
"total": 3453.9129733699956,
"count": 1,
"self": 3.1883098795078695,
"children": {
"TrainerController._reset_env": {
"total": 1.079360159987118,
"count": 1,
"self": 1.079360159987118
},
"TrainerController.advance": {
"total": 3449.5827327455045,
"count": 194529,
"self": 3.1571178306476213,
"children": {
"env_step": {
"total": 2299.304055316723,
"count": 194529,
"self": 2036.4229290371295,
"children": {
"SubprocessEnvManager._take_step": {
"total": 260.63575810962357,
"count": 194529,
"self": 8.209688429138623,
"children": {
"TorchPolicy.evaluate": {
"total": 252.42606968048494,
"count": 187559,
"self": 252.42606968048494
}
}
},
"workers": {
"total": 2.245368169969879,
"count": 194529,
"self": 0.0,
"children": {
"worker_root": {
"total": 3449.919804409088,
"count": 194529,
"is_parallel": true,
"self": 1638.3160552232293,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0011156299733556807,
"count": 1,
"is_parallel": true,
"self": 0.0003170139971189201,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007986159762367606,
"count": 8,
"is_parallel": true,
"self": 0.0007986159762367606
}
}
},
"UnityEnvironment.step": {
"total": 0.02225911198183894,
"count": 1,
"is_parallel": true,
"self": 0.0002020609681494534,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001858019968494773,
"count": 1,
"is_parallel": true,
"self": 0.0001858019968494773
},
"communicator.exchange": {
"total": 0.02120309800375253,
"count": 1,
"is_parallel": true,
"self": 0.02120309800375253
},
"steps_from_proto": {
"total": 0.0006681510130874813,
"count": 1,
"is_parallel": true,
"self": 0.00016805896302685142,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005000920500606298,
"count": 8,
"is_parallel": true,
"self": 0.0005000920500606298
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1811.6037491858588,
"count": 194528,
"is_parallel": true,
"self": 67.418146585871,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 31.605353854771238,
"count": 194528,
"is_parallel": true,
"self": 31.605353854771238
},
"communicator.exchange": {
"total": 1541.537474426499,
"count": 194528,
"is_parallel": true,
"self": 1541.537474426499
},
"steps_from_proto": {
"total": 171.04277431871742,
"count": 194528,
"is_parallel": true,
"self": 32.998097137140576,
"children": {
"_process_rank_one_or_two_observation": {
"total": 138.04467718157684,
"count": 1556224,
"is_parallel": true,
"self": 138.04467718157684
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1147.121559598134,
"count": 194529,
"self": 5.6806608503102325,
"children": {
"process_trajectory": {
"total": 226.40099165728316,
"count": 194529,
"self": 225.89157717325725,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5094144840259105,
"count": 6,
"self": 0.5094144840259105
}
}
},
"_update_policy": {
"total": 915.0399070905405,
"count": 1395,
"self": 550.9960433689994,
"children": {
"TorchPPOOptimizer.update": {
"total": 364.04386372154113,
"count": 68442,
"self": 364.04386372154113
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.560003384947777e-07,
"count": 1,
"self": 6.560003384947777e-07
},
"TrainerController._save_models": {
"total": 0.06256992899579927,
"count": 1,
"self": 0.00104069197550416,
"children": {
"RLTrainer._checkpoint": {
"total": 0.06152923702029511,
"count": 1,
"self": 0.06152923702029511
}
}
}
}
}
}
}