ppo-Huggy / run_logs /timers.json
filodoxia's picture
Huggy
497f6a5 verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.406130075454712,
"min": 1.406130075454712,
"max": 1.4269648790359497,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69284.25,
"min": 69284.25,
"max": 76753.125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 101.02685950413223,
"min": 82.60367892976589,
"max": 374.8805970149254,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 48897.0,
"min": 48897.0,
"max": 50251.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999950.0,
"min": 49635.0,
"max": 1999950.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999950.0,
"min": 49635.0,
"max": 1999950.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4608442783355713,
"min": -0.0028934478759765625,
"max": 2.4608442783355713,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1191.048583984375,
"min": -0.3848285675048828,
"max": 1460.8388671875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7689705610521567,
"min": 1.6223638095801933,
"max": 3.8857100035131684,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1824.181751549244,
"min": 215.77438667416573,
"max": 2296.4546120762825,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7689705610521567,
"min": 1.6223638095801933,
"max": 3.8857100035131684,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1824.181751549244,
"min": 215.77438667416573,
"max": 2296.4546120762825,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.014411946734789025,
"min": 0.014101347862621575,
"max": 0.020223307570874264,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.043235840204367076,
"min": 0.02820269572524315,
"max": 0.06065688488985567,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05251448113057349,
"min": 0.023626790195703507,
"max": 0.07696366152829594,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15754344339172047,
"min": 0.047253580391407014,
"max": 0.2308909845848878,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.361648879483335e-06,
"min": 3.361648879483335e-06,
"max": 0.000295344976551675,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0084946638450005e-05,
"min": 1.0084946638450005e-05,
"max": 0.00084400366866545,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.1011205166666667,
"min": 0.1011205166666667,
"max": 0.19844832500000004,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3033615500000001,
"min": 0.20743820000000002,
"max": 0.5813345500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.591378166666669e-05,
"min": 6.591378166666669e-05,
"max": 0.004922571417500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019774134500000006,
"min": 0.00019774134500000006,
"max": 0.014068594044999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1713349984",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1713352325"
},
"total": 2341.254085644,
"count": 1,
"self": 0.43758135600046444,
"children": {
"run_training.setup": {
"total": 0.05481439999994109,
"count": 1,
"self": 0.05481439999994109
},
"TrainerController.start_learning": {
"total": 2340.7616898879996,
"count": 1,
"self": 4.223201808038084,
"children": {
"TrainerController._reset_env": {
"total": 3.3432973449999963,
"count": 1,
"self": 3.3432973449999963
},
"TrainerController.advance": {
"total": 2333.0687010499614,
"count": 232282,
"self": 4.548621603044012,
"children": {
"env_step": {
"total": 1850.347248331927,
"count": 232282,
"self": 1533.3305016820073,
"children": {
"SubprocessEnvManager._take_step": {
"total": 314.40085005192395,
"count": 232282,
"self": 16.170185971868705,
"children": {
"TorchPolicy.evaluate": {
"total": 298.23066408005525,
"count": 222970,
"self": 298.23066408005525
}
}
},
"workers": {
"total": 2.6158965979956292,
"count": 232282,
"self": 0.0,
"children": {
"worker_root": {
"total": 2333.7303290450286,
"count": 232282,
"is_parallel": true,
"self": 1092.7179632920115,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.000891230999968684,
"count": 1,
"is_parallel": true,
"self": 0.00020647600001666433,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006847549999520197,
"count": 2,
"is_parallel": true,
"self": 0.0006847549999520197
}
}
},
"UnityEnvironment.step": {
"total": 0.03016911899999286,
"count": 1,
"is_parallel": true,
"self": 0.00038497099990308925,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021668600004431937,
"count": 1,
"is_parallel": true,
"self": 0.00021668600004431937
},
"communicator.exchange": {
"total": 0.028807174999997187,
"count": 1,
"is_parallel": true,
"self": 0.028807174999997187
},
"steps_from_proto": {
"total": 0.0007602870000482653,
"count": 1,
"is_parallel": true,
"self": 0.00021992999995745777,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005403570000908076,
"count": 2,
"is_parallel": true,
"self": 0.0005403570000908076
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1241.012365753017,
"count": 232281,
"is_parallel": true,
"self": 38.652337352003315,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 81.0775670140896,
"count": 232281,
"is_parallel": true,
"self": 81.0775670140896
},
"communicator.exchange": {
"total": 1032.313648205006,
"count": 232281,
"is_parallel": true,
"self": 1032.313648205006
},
"steps_from_proto": {
"total": 88.96881318191822,
"count": 232281,
"is_parallel": true,
"self": 31.366760169986605,
"children": {
"_process_rank_one_or_two_observation": {
"total": 57.602053011931616,
"count": 464562,
"is_parallel": true,
"self": 57.602053011931616
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 478.17283111499034,
"count": 232282,
"self": 6.378482287878569,
"children": {
"process_trajectory": {
"total": 145.60671033611118,
"count": 232282,
"self": 144.33246715711118,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2742431789999955,
"count": 10,
"self": 1.2742431789999955
}
}
},
"_update_policy": {
"total": 326.1876384910006,
"count": 97,
"self": 262.8059809589938,
"children": {
"TorchPPOOptimizer.update": {
"total": 63.381657532006784,
"count": 2910,
"self": 63.381657532006784
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.780002644925844e-07,
"count": 1,
"self": 8.780002644925844e-07
},
"TrainerController._save_models": {
"total": 0.126488806999987,
"count": 1,
"self": 0.0018591619996186637,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12462964500036833,
"count": 1,
"self": 0.12462964500036833
}
}
}
}
}
}
}