{ "name": "root", "gauges": { "Huggy.Policy.Entropy.mean": { "value": 1.4009993076324463, "min": 1.4009993076324463, "max": 1.4273499250411987, "count": 40 }, "Huggy.Policy.Entropy.sum": { "value": 71180.5703125, "min": 67301.296875, "max": 79338.125, "count": 40 }, "Huggy.Environment.EpisodeLength.mean": { "value": 119.4433734939759, "min": 107.65217391304348, "max": 409.479674796748, "count": 40 }, "Huggy.Environment.EpisodeLength.sum": { "value": 49569.0, "min": 48946.0, "max": 50366.0, "count": 40 }, "Huggy.Step.mean": { "value": 1999900.0, "min": 49851.0, "max": 1999900.0, "count": 40 }, "Huggy.Step.sum": { "value": 1999900.0, "min": 49851.0, "max": 1999900.0, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.mean": { "value": 2.2281832695007324, "min": 0.16367073357105255, "max": 2.346702814102173, "count": 40 }, "Huggy.Policy.ExtrinsicValueEstimate.sum": { "value": 924.696044921875, "min": 19.96782875061035, "max": 1081.8299560546875, "count": 40 }, "Huggy.Environment.CumulativeReward.mean": { "value": 3.408450821077967, "min": 1.9532680611629956, "max": 3.809340357746201, "count": 40 }, "Huggy.Environment.CumulativeReward.sum": { "value": 1414.5070907473564, "min": 238.29870346188545, "max": 1657.0630556195974, "count": 40 }, "Huggy.Policy.ExtrinsicReward.mean": { "value": 3.408450821077967, "min": 1.9532680611629956, "max": 3.809340357746201, "count": 40 }, "Huggy.Policy.ExtrinsicReward.sum": { "value": 1414.5070907473564, "min": 238.29870346188545, "max": 1657.0630556195974, "count": 40 }, "Huggy.Losses.PolicyLoss.mean": { "value": 0.019234502796381194, "min": 0.0137969768945671, "max": 0.01978469823661726, "count": 40 }, "Huggy.Losses.PolicyLoss.sum": { "value": 0.057703508389143586, "min": 0.0275939537891342, "max": 0.05858066388464067, "count": 40 }, "Huggy.Losses.ValueLoss.mean": { "value": 0.048364897030923094, "min": 0.021151382693399987, "max": 0.059196685627102855, "count": 40 }, "Huggy.Losses.ValueLoss.sum": { "value": 0.1450946910927693, "min": 0.04230276538679997, "max": 0.1723491551975409, "count": 40 }, "Huggy.Policy.LearningRate.mean": { "value": 3.1042989652666557e-06, "min": 3.1042989652666557e-06, "max": 0.00029531587656137507, "count": 40 }, "Huggy.Policy.LearningRate.sum": { "value": 9.312896895799967e-06, "min": 9.312896895799967e-06, "max": 0.0008438698687100501, "count": 40 }, "Huggy.Policy.Epsilon.mean": { "value": 0.10103473333333333, "min": 0.10103473333333333, "max": 0.19843862500000004, "count": 40 }, "Huggy.Policy.Epsilon.sum": { "value": 0.3031042, "min": 0.20720855, "max": 0.58128995, "count": 40 }, "Huggy.Policy.Beta.mean": { "value": 6.163319333333317e-05, "min": 6.163319333333317e-05, "max": 0.004922087387500002, "count": 40 }, "Huggy.Policy.Beta.sum": { "value": 0.00018489957999999953, "min": 0.00018489957999999953, "max": 0.014066368505000003, "count": 40 }, "Huggy.IsTraining.mean": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 }, "Huggy.IsTraining.sum": { "value": 1.0, "min": 1.0, "max": 1.0, "count": 40 } }, "metadata": { "timer_format_version": "0.1.0", "start_time_seconds": "1671680038", "python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]", "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics", "mlagents_version": "0.29.0.dev0", "mlagents_envs_version": "0.29.0.dev0", "communication_protocol_version": "1.5.0", "pytorch_version": "1.8.1+cu102", "numpy_version": "1.21.6", "end_time_seconds": "1671682228" }, "total": 2190.59138312, "count": 1, "self": 0.39531578000014633, "children": { "run_training.setup": { "total": 0.10576051600003211, "count": 1, "self": 0.10576051600003211 }, "TrainerController.start_learning": { "total": 2190.090306824, "count": 1, "self": 3.61713112305506, "children": { "TrainerController._reset_env": { "total": 7.462019288000079, "count": 1, "self": 7.462019288000079 }, "TrainerController.advance": { "total": 2178.899687929945, "count": 230745, "self": 3.910393000870954, "children": { "env_step": { "total": 1720.6229707909692, "count": 230745, "self": 1448.5736024999474, "children": { "SubprocessEnvManager._take_step": { "total": 269.53935872302407, "count": 230745, "self": 13.966221385981726, "children": { "TorchPolicy.evaluate": { "total": 255.57313733704234, "count": 223046, "self": 63.966905134052695, "children": { "TorchPolicy.sample_actions": { "total": 191.60623220298964, "count": 223046, "self": 191.60623220298964 } } } } }, "workers": { "total": 2.510009567997713, "count": 230745, "self": 0.0, "children": { "worker_root": { "total": 2182.549331349965, "count": 230745, "is_parallel": true, "self": 987.6685983189525, "children": { "run_training.setup": { "total": 0.0, "count": 0, "is_parallel": true, "self": 0.0, "children": { "steps_from_proto": { "total": 0.002134173999934319, "count": 1, "is_parallel": true, "self": 0.00031930899990584294, "children": { "_process_rank_one_or_two_observation": { "total": 0.001814865000028476, "count": 2, "is_parallel": true, "self": 0.001814865000028476 } } }, "UnityEnvironment.step": { "total": 0.027282623000019157, "count": 1, "is_parallel": true, "self": 0.00028552100013712334, "children": { "UnityEnvironment._generate_step_input": { "total": 0.00016879199995400995, "count": 1, "is_parallel": true, "self": 0.00016879199995400995 }, "communicator.exchange": { "total": 0.026074307999920165, "count": 1, "is_parallel": true, "self": 0.026074307999920165 }, "steps_from_proto": { "total": 0.0007540020000078584, "count": 1, "is_parallel": true, "self": 0.00026486199999453675, "children": { "_process_rank_one_or_two_observation": { "total": 0.0004891400000133217, "count": 2, "is_parallel": true, "self": 0.0004891400000133217 } } } } } } }, "UnityEnvironment.step": { "total": 1194.8807330310126, "count": 230744, "is_parallel": true, "self": 34.429180225774644, "children": { "UnityEnvironment._generate_step_input": { "total": 75.46779444913466, "count": 230744, "is_parallel": true, "self": 75.46779444913466 }, "communicator.exchange": { "total": 992.5456359020294, "count": 230744, "is_parallel": true, "self": 992.5456359020294 }, "steps_from_proto": { "total": 92.43812245407378, "count": 230744, "is_parallel": true, "self": 37.92960895514693, "children": { "_process_rank_one_or_two_observation": { "total": 54.508513498926845, "count": 461488, "is_parallel": true, "self": 54.508513498926845 } } } } } } } } } } }, "trainer_advance": { "total": 454.36632413810446, "count": 230745, "self": 6.097745299105895, "children": { "process_trajectory": { "total": 137.5209885949962, "count": 230745, "self": 136.36911515999623, "children": { "RLTrainer._checkpoint": { "total": 1.1518734349999704, "count": 10, "self": 1.1518734349999704 } } }, "_update_policy": { "total": 310.74759024400237, "count": 97, "self": 258.1182828510059, "children": { "TorchPPOOptimizer.update": { "total": 52.62930739299645, "count": 2910, "self": 52.62930739299645 } } } } } } }, "trainer_threads": { "total": 1.3419999049801845e-06, "count": 1, "self": 1.3419999049801845e-06 }, "TrainerController._save_models": { "total": 0.11146714100004829, "count": 1, "self": 0.0019254049998380651, "children": { "RLTrainer._checkpoint": { "total": 0.10954173600021022, "count": 1, "self": 0.10954173600021022 } } } } } } }