SakataHalmi's picture
First Push
cad3163
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5617032051086426,
"min": 0.5616452693939209,
"max": 1.397953987121582,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 16797.171875,
"min": 16741.521484375,
"max": 42408.33203125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989927.0,
"min": 29952.0,
"max": 989927.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989927.0,
"min": 29952.0,
"max": 989927.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.40104326605796814,
"min": -0.10656289011240005,
"max": 0.44305431842803955,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 105.47438049316406,
"min": -25.681655883789062,
"max": 120.06771850585938,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0432167686522007,
"min": -0.006907826289534569,
"max": 0.3975681960582733,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 11.366009712219238,
"min": -1.7131409645080566,
"max": 94.22366333007812,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06881605848826204,
"min": 0.06500479106279188,
"max": 0.07605875976289446,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9634248188356686,
"min": 0.5234826736649949,
"max": 1.088256313239553,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01438649218887024,
"min": 0.00031962836628717027,
"max": 0.016089552289333307,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20141089064418335,
"min": 0.004155168761733214,
"max": 0.22525373205066632,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.734804564621431e-06,
"min": 7.734804564621431e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010828726390470003,
"min": 0.00010828726390470003,
"max": 0.0035071100309633997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10257823571428573,
"min": 0.10257823571428573,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4360953000000003,
"min": 1.3886848,
"max": 2.5690365999999996,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026756574785714296,
"min": 0.00026756574785714296,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037459204700000014,
"min": 0.0037459204700000014,
"max": 0.11692675634,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013745754957199097,
"min": 0.013745754957199097,
"max": 0.5451947450637817,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.19244056940078735,
"min": 0.19244056940078735,
"max": 3.8163630962371826,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 429.60869565217394,
"min": 416.49295774647885,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29643.0,
"min": 15984.0,
"max": 32292.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4254144656917322,
"min": -1.0000000521540642,
"max": 1.4707943408841817,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 98.35359813272953,
"min": -31.998401656746864,
"max": 106.51179795712233,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4254144656917322,
"min": -1.0000000521540642,
"max": 1.4707943408841817,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 98.35359813272953,
"min": -31.998401656746864,
"max": 106.51179795712233,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.061058589730975284,
"min": 0.06059950195757362,
"max": 11.147597813978791,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.213042691437295,
"min": 4.213042691437295,
"max": 178.36156502366066,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1696083536",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1696085742"
},
"total": 2205.5096978150004,
"count": 1,
"self": 1.3923778540010971,
"children": {
"run_training.setup": {
"total": 0.042068976999871666,
"count": 1,
"self": 0.042068976999871666
},
"TrainerController.start_learning": {
"total": 2204.0752509839995,
"count": 1,
"self": 1.524763874142991,
"children": {
"TrainerController._reset_env": {
"total": 4.493136217999563,
"count": 1,
"self": 4.493136217999563
},
"TrainerController.advance": {
"total": 2197.898592585857,
"count": 63556,
"self": 1.5501165038403997,
"children": {
"env_step": {
"total": 1536.7668869770687,
"count": 63556,
"self": 1419.505300781735,
"children": {
"SubprocessEnvManager._take_step": {
"total": 116.36122389922548,
"count": 63556,
"self": 5.007872869305174,
"children": {
"TorchPolicy.evaluate": {
"total": 111.3533510299203,
"count": 62571,
"self": 111.3533510299203
}
}
},
"workers": {
"total": 0.9003622961081419,
"count": 63556,
"self": 0.0,
"children": {
"worker_root": {
"total": 2198.5452741888575,
"count": 63556,
"is_parallel": true,
"self": 901.4502270617413,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018764899996313034,
"count": 1,
"is_parallel": true,
"self": 0.0006471299993791035,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012293600002521998,
"count": 8,
"is_parallel": true,
"self": 0.0012293600002521998
}
}
},
"UnityEnvironment.step": {
"total": 0.07567123700027878,
"count": 1,
"is_parallel": true,
"self": 0.0005740220003644936,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005066370003987686,
"count": 1,
"is_parallel": true,
"self": 0.0005066370003987686
},
"communicator.exchange": {
"total": 0.07198048000009294,
"count": 1,
"is_parallel": true,
"self": 0.07198048000009294
},
"steps_from_proto": {
"total": 0.002610097999422578,
"count": 1,
"is_parallel": true,
"self": 0.0004008719988632947,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0022092260005592834,
"count": 8,
"is_parallel": true,
"self": 0.0022092260005592834
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1297.0950471271162,
"count": 63555,
"is_parallel": true,
"self": 34.76039005588609,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.83369851507541,
"count": 63555,
"is_parallel": true,
"self": 24.83369851507541
},
"communicator.exchange": {
"total": 1124.9869280600897,
"count": 63555,
"is_parallel": true,
"self": 1124.9869280600897
},
"steps_from_proto": {
"total": 112.51403049606506,
"count": 63555,
"is_parallel": true,
"self": 22.40281665492239,
"children": {
"_process_rank_one_or_two_observation": {
"total": 90.11121384114267,
"count": 508440,
"is_parallel": true,
"self": 90.11121384114267
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 659.5815891049479,
"count": 63556,
"self": 2.6765653039310564,
"children": {
"process_trajectory": {
"total": 116.97870641801183,
"count": 63556,
"self": 116.63384791201224,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3448585059995821,
"count": 2,
"self": 0.3448585059995821
}
}
},
"_update_policy": {
"total": 539.926317383005,
"count": 451,
"self": 350.8303022259797,
"children": {
"TorchPPOOptimizer.update": {
"total": 189.09601515702525,
"count": 22821,
"self": 189.09601515702525
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3799999578623101e-06,
"count": 1,
"self": 1.3799999578623101e-06
},
"TrainerController._save_models": {
"total": 0.15875692600002367,
"count": 1,
"self": 0.0018596009995235363,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15689732500050013,
"count": 1,
"self": 0.15689732500050013
}
}
}
}
}
}
}