ppo-PyramidsRND / run_logs /timers.json
abhishekpradhan's picture
First Push
fadd5fd verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5916886925697327,
"min": 0.5885145664215088,
"max": 1.505496859550476,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 17845.330078125,
"min": 17354.1171875,
"max": 45670.75390625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989945.0,
"min": 29952.0,
"max": 989945.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989945.0,
"min": 29952.0,
"max": 989945.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.30596616864204407,
"min": -0.1004587784409523,
"max": 0.30596616864204407,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 80.46910095214844,
"min": -24.110107421875,
"max": 80.46910095214844,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.2756027281284332,
"min": -0.40640828013420105,
"max": 0.2756027281284332,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 72.4835205078125,
"min": -104.85333251953125,
"max": 72.4835205078125,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07299499735258994,
"min": 0.0650588906809025,
"max": 0.07391508124888248,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0949249602888491,
"min": 0.49850405713418844,
"max": 1.0949249602888491,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.04544123583412395,
"min": 0.00013786267628415727,
"max": 0.04544123583412395,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.6816185375118592,
"min": 0.0017922147916940444,
"max": 0.6816185375118592,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.507937497386668e-06,
"min": 7.507937497386668e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011261906246080001,
"min": 0.00011261906246080001,
"max": 0.0032552912149029995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250261333333334,
"min": 0.10250261333333334,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5375392,
"min": 1.3886848,
"max": 2.3850969999999996,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002600110720000001,
"min": 0.0002600110720000001,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0039001660800000015,
"min": 0.0039001660800000015,
"max": 0.1085311903,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009679129347205162,
"min": 0.009679129347205162,
"max": 0.3335517942905426,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14518694579601288,
"min": 0.1375589519739151,
"max": 2.334862470626831,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 513.8412698412699,
"min": 513.8412698412699,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32372.0,
"min": 15984.0,
"max": 33213.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.2003491784608553,
"min": -1.0000000521540642,
"max": 1.2003491784608553,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 75.62199824303389,
"min": -30.496001675724983,
"max": 75.62199824303389,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.2003491784608553,
"min": -1.0000000521540642,
"max": 1.2003491784608553,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 75.62199824303389,
"min": -30.496001675724983,
"max": 75.62199824303389,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05182602661033531,
"min": 0.05182602661033531,
"max": 6.687497523613274,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.2650396764511243,
"min": 2.9489239331742283,
"max": 106.99996037781239,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1742036000",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1742038165"
},
"total": 2165.0283848970003,
"count": 1,
"self": 0.47580551200007903,
"children": {
"run_training.setup": {
"total": 0.02015908699991087,
"count": 1,
"self": 0.02015908699991087
},
"TrainerController.start_learning": {
"total": 2164.532420298,
"count": 1,
"self": 1.337727658994936,
"children": {
"TrainerController._reset_env": {
"total": 2.4738161359998685,
"count": 1,
"self": 2.4738161359998685
},
"TrainerController.advance": {
"total": 2160.6320544590053,
"count": 63320,
"self": 1.409046139905513,
"children": {
"env_step": {
"total": 1485.1224861160858,
"count": 63320,
"self": 1328.0283551931361,
"children": {
"SubprocessEnvManager._take_step": {
"total": 156.2966909189638,
"count": 63320,
"self": 4.741240599965295,
"children": {
"TorchPolicy.evaluate": {
"total": 151.5554503189985,
"count": 62562,
"self": 151.5554503189985
}
}
},
"workers": {
"total": 0.797440003985912,
"count": 63320,
"self": 0.0,
"children": {
"worker_root": {
"total": 2159.524144812988,
"count": 63320,
"is_parallel": true,
"self": 943.8024561050256,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002355125999883967,
"count": 1,
"is_parallel": true,
"self": 0.0007959319998462888,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001559194000037678,
"count": 8,
"is_parallel": true,
"self": 0.001559194000037678
}
}
},
"UnityEnvironment.step": {
"total": 0.09869333500000721,
"count": 1,
"is_parallel": true,
"self": 0.000559686000315196,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00042618799989213585,
"count": 1,
"is_parallel": true,
"self": 0.00042618799989213585
},
"communicator.exchange": {
"total": 0.09594364299982772,
"count": 1,
"is_parallel": true,
"self": 0.09594364299982772
},
"steps_from_proto": {
"total": 0.0017638179999721615,
"count": 1,
"is_parallel": true,
"self": 0.000404335000212086,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013594829997600755,
"count": 8,
"is_parallel": true,
"self": 0.0013594829997600755
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1215.7216887079626,
"count": 63319,
"is_parallel": true,
"self": 31.730198557964968,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.250416356030428,
"count": 63319,
"is_parallel": true,
"self": 23.250416356030428
},
"communicator.exchange": {
"total": 1062.9444731569745,
"count": 63319,
"is_parallel": true,
"self": 1062.9444731569745
},
"steps_from_proto": {
"total": 97.79660063699271,
"count": 63319,
"is_parallel": true,
"self": 19.896709771073574,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.89989086591913,
"count": 506552,
"is_parallel": true,
"self": 77.89989086591913
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 674.1005222030137,
"count": 63320,
"self": 2.4676304970641922,
"children": {
"process_trajectory": {
"total": 126.26876715295066,
"count": 63320,
"self": 126.06463618395128,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20413096899937955,
"count": 2,
"self": 0.20413096899937955
}
}
},
"_update_policy": {
"total": 545.3641245529989,
"count": 440,
"self": 299.7092829199678,
"children": {
"TorchPPOOptimizer.update": {
"total": 245.65484163303108,
"count": 22815,
"self": 245.65484163303108
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.269997462979518e-07,
"count": 1,
"self": 9.269997462979518e-07
},
"TrainerController._save_models": {
"total": 0.08882111699995221,
"count": 1,
"self": 0.00174159200014401,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0870795249998082,
"count": 1,
"self": 0.0870795249998082
}
}
}
}
}
}
}