{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.4056590795516968, "min": 1.4056590795516968, "max": 1.4291362762451172, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 69726.3125, "min": 68858.6484375, "max": 76533.8359375, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 112.92776523702031, "min": 99.64788732394366, "max": 413.6776859504132, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 50027.0, "min": 48888.0, "max": 50220.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999828.0, "min": 49936.0, "max": 1999828.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999828.0, "min": 49936.0, "max": 1999828.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.2498528957366943, "min": 0.011342151090502739, "max": 2.385820150375366, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 996.684814453125, "min": 1.3610581159591675, "max": 1148.5516357421875, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.45141690444731, "min": 1.796138015637795, "max": 3.7247653547423836, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 1528.9776886701584, "min": 215.53656187653542, "max": 1741.523704946041, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.45141690444731, "min": 1.796138015637795, "max": 3.7247653547423836, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 1528.9776886701584, "min": 215.53656187653542, "max": 1741.523704946041, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.016375493091315245, "min": 0.01310714571865598, "max": 0.02057017867191462, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.03275098618263049, "min": 0.02621429143731196, "max": 0.06015977509954003, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.048273347939054176, "min": 0.022370987261335053, "max": 0.0661503962551554, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.09654669587810835, "min": 0.044741974522670105, "max": 0.18838233252366382, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 3.950798683099995e-06, "min": 3.950798683099995e-06, "max": 0.00029527192657602505, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 7.90159736619999e-06, "min": 7.90159736619999e-06, "max": 0.0008436874687708499, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10131690000000002, "min": 0.10131690000000002, "max": 0.19842397499999997, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.20263380000000003, "min": 0.20263380000000003, "max": 0.58122915, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 7.571330999999995e-05, "min": 7.571330999999995e-05, "max": 0.0049213563525000005, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.0001514266199999999, "min": 0.0001514266199999999, "max": 0.014063334585, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1691482645", "python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics", "mlagents_version": "0.31.0.dev0", "mlagents_envs_version": "0.31.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.11.0+cu102", "numpy_version": "1.21.2", "end_time_seconds": "1691485318" }, "total": 2673.175017628, "count": 1, "self": 0.4879953990002832, "children": { "run_training.setup": { "total": 0.04780004399981408, "count": 1, "self": 0.04780004399981408 }, "TrainerController.start_learning": { "total": 2672.639222185, "count": 1, "self": 4.744135461035057, "children": { "TrainerController._reset_env": { "total": 7.294520609000074, "count": 1, "self": 7.294520609000074 }, "TrainerController.advance": { "total": 2660.456266060965, "count": 231116, "self": 4.943309315166971, "children": { "env_step": { "total": 2051.171461742865, "count": 231116, "self": 1734.9841923480626, "children": { "SubprocessEnvManager._take_step": { "total": 313.12635762588525, "count": 231116, "self": 17.959384257906095, "children": { "TorchPolicy.evaluate": { "total": 295.16697336797915, "count": 222930, "self": 295.16697336797915 } } }, "workers": { "total": 3.0609117689173218, "count": 231116, "self": 0.0, "children": { "worker_root": { "total": 2664.4934775089114, "count": 231116, "is_parallel": true, "self": 1245.2547654699579, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.00103322399991157, "count": 1, "is_parallel": true, "self": 0.00030511800014210166, "children": { "_process_rank_one_or_two_observation": { "total": 0.0007281059997694683, "count": 2, "is_parallel": true, "self": 0.0007281059997694683 } } }, "UnityEnvironment.step": { "total": 0.05466423000007126, "count": 1, "is_parallel": true, "self": 0.00034799400032170524, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00021773699995719653, "count": 1, "is_parallel": true, "self": 0.00021773699995719653 }, "communicator.exchange": { "total": 0.053313232999926186, "count": 1, "is_parallel": true, "self": 0.053313232999926186 }, "steps_from_proto": { "total": 0.0007852659998661693, "count": 1, "is_parallel": true, "self": 0.00023997599987524154, "children": { "_process_rank_one_or_two_observation": { "total": 0.0005452899999909278, "count": 2, "is_parallel": true, "self": 0.0005452899999909278 } } } } } } }, "UnityEnvironment.step": { "total": 1419.2387120389535, "count": 231115, "is_parallel": true, "self": 43.94528819987431, "children": { "UnityEnvironment._generate_step_input": { "total": 89.81313504604009, "count": 231115, "is_parallel": true, "self": 89.81313504604009 }, "communicator.exchange": { "total": 1178.0865736080152, "count": 231115, "is_parallel": true, "self": 1178.0865736080152 }, "steps_from_proto": { "total": 107.3937151850239, "count": 231115, "is_parallel": true, "self": 38.34333968017472, "children": { "_process_rank_one_or_two_observation": { "total": 69.05037550484917, "count": 462230, "is_parallel": true, "self": 69.05037550484917 } } } } } } } } } } }, "trainer_advance": { "total": 604.3414950029328, "count": 231116, "self": 7.165314813027408, "children": { "process_trajectory": { "total": 141.66958528490545, "count": 231116, "self": 140.05183117590605, "children": { "RLTrainer._checkpoint": { "total": 1.6177541089994065, "count": 10, "self": 1.6177541089994065 } } }, "_update_policy": { "total": 455.50659490499993, "count": 96, "self": 393.00301379599273, "children": { "TorchPPOOptimizer.update": { "total": 62.5035811090072, "count": 2880, "self": 62.5035811090072 } } } } } } }, "trainer_threads": { "total": 9.649997991800774e-07, "count": 1, "self": 9.649997991800774e-07 }, "TrainerController._save_models": { "total": 0.14429908900001465, "count": 1, "self": 0.0024198800001613563, "children": { "RLTrainer._checkpoint": { "total": 0.1418792089998533, "count": 1, "self": 0.1418792089998533 } } } } } } }