ppo-Pyramids / run_logs /timers.json
emmade-1999's picture
First Push
00da417
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6266112327575684,
"min": 0.6195098161697388,
"max": 1.4313980340957642,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 18858.4921875,
"min": 18666.369140625,
"max": 43422.890625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989904.0,
"min": 29952.0,
"max": 989904.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989904.0,
"min": 29952.0,
"max": 989904.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.10021860152482986,
"min": -0.11149105429649353,
"max": 0.16706934571266174,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 24.85421371459961,
"min": -26.42337989807129,
"max": 42.10147476196289,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.005289188586175442,
"min": -0.05889393761754036,
"max": 0.3789993226528168,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.3117188215255737,
"min": -14.841272354125977,
"max": 89.82283782958984,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06923651513225144,
"min": 0.06566447798953606,
"max": 0.0724332001237927,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9693112118515201,
"min": 0.4957873256297163,
"max": 1.0519123977539817,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.006032011421623229,
"min": 6.410518252799537e-05,
"max": 0.010225357106202293,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.08444815990272521,
"min": 0.0008333673728639398,
"max": 0.1434543826450619,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.2879332850071435e-06,
"min": 7.2879332850071435e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010203106599010001,
"min": 0.00010203106599010001,
"max": 0.0035084738305087996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10242927857142858,
"min": 0.10242927857142858,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4340099000000002,
"min": 1.3886848,
"max": 2.5694912,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002526849292857144,
"min": 0.0002526849292857144,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003537589010000001,
"min": 0.003537589010000001,
"max": 0.11697217088,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012031533755362034,
"min": 0.012031533755362034,
"max": 0.4214365482330322,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16844147443771362,
"min": 0.16844147443771362,
"max": 2.9500558376312256,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 712.8636363636364,
"min": 622.6304347826087,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31366.0,
"min": 15984.0,
"max": 33861.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.28679996203969826,
"min": -1.0000000521540642,
"max": 0.7685086582989796,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 12.619198329746723,
"min": -29.984001711010933,
"max": 35.35139828175306,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.28679996203969826,
"min": -1.0000000521540642,
"max": 0.7685086582989796,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 12.619198329746723,
"min": -29.984001711010933,
"max": 35.35139828175306,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.08704382547842794,
"min": 0.07937806729308289,
"max": 8.651869429275393,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.8299283210508293,
"min": 3.6433891475899145,
"max": 138.4299108684063,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1685637615",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1685640873"
},
"total": 3258.687321362,
"count": 1,
"self": 0.8521592490005787,
"children": {
"run_training.setup": {
"total": 0.0678623050000624,
"count": 1,
"self": 0.0678623050000624
},
"TrainerController.start_learning": {
"total": 3257.7672998079997,
"count": 1,
"self": 2.6144310609565764,
"children": {
"TrainerController._reset_env": {
"total": 1.0591109530000722,
"count": 1,
"self": 1.0591109530000722
},
"TrainerController.advance": {
"total": 3253.979553959043,
"count": 63292,
"self": 2.4429497231753885,
"children": {
"env_step": {
"total": 2078.0698802549414,
"count": 63292,
"self": 1935.141132777964,
"children": {
"SubprocessEnvManager._take_step": {
"total": 141.38698347103264,
"count": 63292,
"self": 7.0275143119583845,
"children": {
"TorchPolicy.evaluate": {
"total": 134.35946915907425,
"count": 62559,
"self": 134.35946915907425
}
}
},
"workers": {
"total": 1.5417640059447422,
"count": 63292,
"self": 0.0,
"children": {
"worker_root": {
"total": 3251.0592118920313,
"count": 63292,
"is_parallel": true,
"self": 1492.0053901019685,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003113027000154034,
"count": 1,
"is_parallel": true,
"self": 0.0009668839998084877,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002146143000345546,
"count": 8,
"is_parallel": true,
"self": 0.002146143000345546
}
}
},
"UnityEnvironment.step": {
"total": 0.06381458100008786,
"count": 1,
"is_parallel": true,
"self": 0.0006579739999779122,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00042191500006083515,
"count": 1,
"is_parallel": true,
"self": 0.00042191500006083515
},
"communicator.exchange": {
"total": 0.06040365700005168,
"count": 1,
"is_parallel": true,
"self": 0.06040365700005168
},
"steps_from_proto": {
"total": 0.002331034999997428,
"count": 1,
"is_parallel": true,
"self": 0.00047907599991958705,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018519590000778408,
"count": 8,
"is_parallel": true,
"self": 0.0018519590000778408
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1759.0538217900628,
"count": 63291,
"is_parallel": true,
"self": 46.297120909992145,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.852240827914102,
"count": 63291,
"is_parallel": true,
"self": 25.852240827914102
},
"communicator.exchange": {
"total": 1548.3821290810993,
"count": 63291,
"is_parallel": true,
"self": 1548.3821290810993
},
"steps_from_proto": {
"total": 138.52233097105727,
"count": 63291,
"is_parallel": true,
"self": 29.526155669067748,
"children": {
"_process_rank_one_or_two_observation": {
"total": 108.99617530198952,
"count": 506328,
"is_parallel": true,
"self": 108.99617530198952
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1173.4667239809264,
"count": 63292,
"self": 4.619700237959023,
"children": {
"process_trajectory": {
"total": 147.38542179796468,
"count": 63292,
"self": 147.0363191839656,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3491026139990936,
"count": 2,
"self": 0.3491026139990936
}
}
},
"_update_policy": {
"total": 1021.4616019450027,
"count": 448,
"self": 425.31837266501,
"children": {
"TorchPPOOptimizer.update": {
"total": 596.1432292799927,
"count": 22815,
"self": 596.1432292799927
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2919999790028669e-06,
"count": 1,
"self": 1.2919999790028669e-06
},
"TrainerController._save_models": {
"total": 0.11420254299991939,
"count": 1,
"self": 0.0018042030005744891,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1123983399993449,
"count": 1,
"self": 0.1123983399993449
}
}
}
}
}
}
}