pyramidsrnd / run_logs /timers.json
mrm8488's picture
First Pyramids
8b4e72d
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.14502044022083282,
"min": 0.12752866744995117,
"max": 1.4461236000061035,
"count": 166
},
"Pyramids.Policy.Entropy.sum": {
"value": 4390.05859375,
"min": 3852.385986328125,
"max": 43869.60546875,
"count": 166
},
"Pyramids.Step.mean": {
"value": 4979996.0,
"min": 29952.0,
"max": 4979996.0,
"count": 166
},
"Pyramids.Step.sum": {
"value": 4979996.0,
"min": 29952.0,
"max": 4979996.0,
"count": 166
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.839003324508667,
"min": -0.21277064085006714,
"max": 0.9030649662017822,
"count": 166
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 251.70098876953125,
"min": -50.42664337158203,
"max": 283.5624084472656,
"count": 166
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.005599901080131531,
"min": -0.3180496096611023,
"max": 1.1370110511779785,
"count": 166
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.6799702644348145,
"min": -85.55534362792969,
"max": 274.0196533203125,
"count": 166
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.031900718597517835,
"min": 0.024253821147361857,
"max": 0.0378008363732821,
"count": 166
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.4466100603652497,
"min": 0.2646058546129747,
"max": 0.5254435648287957,
"count": 166
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014076724435047561,
"min": 0.001039826701958385,
"max": 0.06370900165001374,
"count": 166
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19707414209066584,
"min": 0.011562729627864124,
"max": 0.8919260231001924,
"count": 166
},
"Pyramids.Policy.LearningRate.mean": {
"value": 2.0990493003500023e-06,
"min": 2.0990493003500023e-06,
"max": 0.0002990301260375771,
"count": 166
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.9386690204900035e-05,
"min": 2.9386690204900035e-05,
"max": 0.0039736534754489,
"count": 166
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10069965,
"min": 0.10069965,
"max": 0.19967670857142858,
"count": 166
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4097951,
"min": 1.39773696,
"max": 2.8245511,
"count": 166
},
"Pyramids.Policy.Beta.mean": {
"value": 7.989503500000008e-05,
"min": 7.989503500000008e-05,
"max": 0.009967703186285714,
"count": 166
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0011185304900000013,
"min": 0.0011185304900000013,
"max": 0.13247265488999999,
"count": 166
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008710531517863274,
"min": 0.008710531517863274,
"max": 1.3006354570388794,
"count": 166
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12194743752479553,
"min": 0.12194743752479553,
"max": 9.104448318481445,
"count": 166
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 217.4485294117647,
"min": 191.51700680272108,
"max": 999.0,
"count": 166
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29573.0,
"min": 15984.0,
"max": 33077.0,
"count": 166
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7664262636082015,
"min": -1.0000000521540642,
"max": 1.8087852218207097,
"count": 166
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 242.00039811432362,
"min": -31.998401656746864,
"max": 272.9329990595579,
"count": 166
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7664262636082015,
"min": -1.0000000521540642,
"max": 1.8087852218207097,
"count": 166
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 242.00039811432362,
"min": -31.998401656746864,
"max": 272.9329990595579,
"count": 166
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.019236186884948433,
"min": 0.01854462163040201,
"max": 14.69947513192892,
"count": 166
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.635357603237935,
"min": 2.635357603237935,
"max": 409.5954595208168,
"count": 166
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 166
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 166
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1659375787",
"python_version": "3.7.13 (default, Apr 24 2022, 01:04:09) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./trained-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1659386195"
},
"total": 10407.217677057999,
"count": 1,
"self": 0.5280591439986893,
"children": {
"run_training.setup": {
"total": 0.042697342999986176,
"count": 1,
"self": 0.042697342999986176
},
"TrainerController.start_learning": {
"total": 10406.646920571,
"count": 1,
"self": 7.817220536928289,
"children": {
"TrainerController._reset_env": {
"total": 9.272099446000084,
"count": 1,
"self": 9.272099446000084
},
"TrainerController.advance": {
"total": 10389.461723815073,
"count": 326524,
"self": 7.88366087736722,
"children": {
"env_step": {
"total": 7664.625016691063,
"count": 326524,
"self": 7140.773809630927,
"children": {
"SubprocessEnvManager._take_step": {
"total": 519.8157740460196,
"count": 326524,
"self": 23.09924651539302,
"children": {
"TorchPolicy.evaluate": {
"total": 496.71652753062654,
"count": 312567,
"self": 174.31968312556387,
"children": {
"TorchPolicy.sample_actions": {
"total": 322.39684440506267,
"count": 312567,
"self": 322.39684440506267
}
}
}
}
},
"workers": {
"total": 4.03543301411662,
"count": 326524,
"self": 0.0,
"children": {
"worker_root": {
"total": 10386.436381120398,
"count": 326524,
"is_parallel": true,
"self": 3768.504369527115,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007553469000072255,
"count": 1,
"is_parallel": true,
"self": 0.003791787000295699,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003761681999776556,
"count": 8,
"is_parallel": true,
"self": 0.003761681999776556
}
}
},
"UnityEnvironment.step": {
"total": 0.04410118600003443,
"count": 1,
"is_parallel": true,
"self": 0.0005384729998922921,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004733550000537434,
"count": 1,
"is_parallel": true,
"self": 0.0004733550000537434
},
"communicator.exchange": {
"total": 0.041371582000010676,
"count": 1,
"is_parallel": true,
"self": 0.041371582000010676
},
"steps_from_proto": {
"total": 0.0017177760000777198,
"count": 1,
"is_parallel": true,
"self": 0.00044001500009471783,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001277760999983002,
"count": 8,
"is_parallel": true,
"self": 0.001277760999983002
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 6617.932011593283,
"count": 326523,
"is_parallel": true,
"self": 140.71539358129394,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 115.27701619413165,
"count": 326523,
"is_parallel": true,
"self": 115.27701619413165
},
"communicator.exchange": {
"total": 5896.185486113087,
"count": 326523,
"is_parallel": true,
"self": 5896.185486113087
},
"steps_from_proto": {
"total": 465.75411570477047,
"count": 326523,
"is_parallel": true,
"self": 117.12614435011949,
"children": {
"_process_rank_one_or_two_observation": {
"total": 348.627971354651,
"count": 2612184,
"is_parallel": true,
"self": 348.627971354651
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2716.9530462466423,
"count": 326524,
"self": 14.606089183349013,
"children": {
"process_trajectory": {
"total": 811.7579510102942,
"count": 326524,
"self": 810.749741571294,
"children": {
"RLTrainer._checkpoint": {
"total": 1.0082094390002112,
"count": 10,
"self": 1.0082094390002112
}
}
},
"_update_policy": {
"total": 1890.5890060529991,
"count": 2334,
"self": 1037.4484621020451,
"children": {
"TorchPPOOptimizer.update": {
"total": 853.1405439509539,
"count": 28356,
"self": 853.1405439509539
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.207999957841821e-06,
"count": 1,
"self": 1.207999957841821e-06
},
"TrainerController._save_models": {
"total": 0.09587556499900529,
"count": 1,
"self": 0.0018247179978061467,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09405084700119914,
"count": 1,
"self": 0.09405084700119914
}
}
}
}
}
}
}