PyramidsRND / run_logs /timers.json
traision's picture
First Push
e362f1c verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3090593218803406,
"min": 0.29969552159309387,
"max": 1.4599910974502563,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9252.0,
"min": 9072.3828125,
"max": 44290.2890625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989969.0,
"min": 29952.0,
"max": 989969.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989969.0,
"min": 29952.0,
"max": 989969.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5202628374099731,
"min": -0.11345171183347702,
"max": 0.5222572088241577,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 141.51148986816406,
"min": -27.341861724853516,
"max": 141.51148986816406,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.02299434319138527,
"min": -0.010421034879982471,
"max": 0.4460807144641876,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.254461288452148,
"min": -2.792837381362915,
"max": 105.72113037109375,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06868918770868775,
"min": 0.06623856824315975,
"max": 0.0735516142342293,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9616486279216285,
"min": 0.5148612996396051,
"max": 1.044684574531857,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014616191838978833,
"min": 0.00028738573535383455,
"max": 0.016489298325114334,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20462668574570367,
"min": 0.00316124308889218,
"max": 0.23085017655160067,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.478968935614284e-06,
"min": 7.478968935614284e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010470556509859997,
"min": 0.00010470556509859997,
"max": 0.0033822284725905996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249295714285714,
"min": 0.10249295714285714,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4349014,
"min": 1.3886848,
"max": 2.5276197,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002590464185714285,
"min": 0.0002590464185714285,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036266498599999996,
"min": 0.0036266498599999996,
"max": 0.11276819905999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.014492576010525227,
"min": 0.014492576010525227,
"max": 0.46366044878959656,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.20289605855941772,
"min": 0.20289605855941772,
"max": 3.2456231117248535,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 381.9125,
"min": 347.70588235294116,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30553.0,
"min": 15984.0,
"max": 34016.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.543062483239919,
"min": -1.0000000521540642,
"max": 1.6287552779211718,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 123.44499865919352,
"min": -30.99940161406994,
"max": 138.4441986232996,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.543062483239919,
"min": -1.0000000521540642,
"max": 1.6287552779211718,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 123.44499865919352,
"min": -30.99940161406994,
"max": 138.4441986232996,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05835482214024523,
"min": 0.05438552034628468,
"max": 9.965992421843112,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.668385771219619,
"min": 4.515560684492812,
"max": 159.45587874948978,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1754780184",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1754782356"
},
"total": 2171.9928126139994,
"count": 1,
"self": 0.48299024799962353,
"children": {
"run_training.setup": {
"total": 0.020082340000044496,
"count": 1,
"self": 0.020082340000044496
},
"TrainerController.start_learning": {
"total": 2171.4897400259997,
"count": 1,
"self": 1.3563397509824426,
"children": {
"TrainerController._reset_env": {
"total": 2.3960660120001194,
"count": 1,
"self": 2.3960660120001194
},
"TrainerController.advance": {
"total": 2167.6549590880168,
"count": 63799,
"self": 1.4016982070479571,
"children": {
"env_step": {
"total": 1501.6006381519474,
"count": 63799,
"self": 1350.907628273027,
"children": {
"SubprocessEnvManager._take_step": {
"total": 149.88941506400897,
"count": 63799,
"self": 4.606544288107671,
"children": {
"TorchPolicy.evaluate": {
"total": 145.2828707759013,
"count": 62559,
"self": 145.2828707759013
}
}
},
"workers": {
"total": 0.803594814911321,
"count": 63799,
"self": 0.0,
"children": {
"worker_root": {
"total": 2166.63190238591,
"count": 63799,
"is_parallel": true,
"self": 930.1250539659991,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018573540000943467,
"count": 1,
"is_parallel": true,
"self": 0.0006166590001157601,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012406949999785866,
"count": 8,
"is_parallel": true,
"self": 0.0012406949999785866
}
}
},
"UnityEnvironment.step": {
"total": 0.07549244499978158,
"count": 1,
"is_parallel": true,
"self": 0.0005474610002238478,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00044709799976772047,
"count": 1,
"is_parallel": true,
"self": 0.00044709799976772047
},
"communicator.exchange": {
"total": 0.07272639799975877,
"count": 1,
"is_parallel": true,
"self": 0.07272639799975877
},
"steps_from_proto": {
"total": 0.0017714880000312405,
"count": 1,
"is_parallel": true,
"self": 0.0004214690006847377,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013500189993465028,
"count": 8,
"is_parallel": true,
"self": 0.0013500189993465028
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1236.506848419911,
"count": 63798,
"is_parallel": true,
"self": 31.819523921988093,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.46254703400291,
"count": 63798,
"is_parallel": true,
"self": 23.46254703400291
},
"communicator.exchange": {
"total": 1083.6366623389513,
"count": 63798,
"is_parallel": true,
"self": 1083.6366623389513
},
"steps_from_proto": {
"total": 97.58811512496868,
"count": 63798,
"is_parallel": true,
"self": 19.489272191050077,
"children": {
"_process_rank_one_or_two_observation": {
"total": 78.0988429339186,
"count": 510384,
"is_parallel": true,
"self": 78.0988429339186
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 664.6526227290215,
"count": 63799,
"self": 2.588716452013614,
"children": {
"process_trajectory": {
"total": 128.0111472230028,
"count": 63799,
"self": 127.8174796690032,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19366755399960311,
"count": 2,
"self": 0.19366755399960311
}
}
},
"_update_policy": {
"total": 534.052759054005,
"count": 448,
"self": 299.5116212890257,
"children": {
"TorchPPOOptimizer.update": {
"total": 234.54113776497934,
"count": 22869,
"self": 234.54113776497934
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0270005077472888e-06,
"count": 1,
"self": 1.0270005077472888e-06
},
"TrainerController._save_models": {
"total": 0.08237414799987164,
"count": 1,
"self": 0.001406493000104092,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08096765499976755,
"count": 1,
"self": 0.08096765499976755
}
}
}
}
}
}
}