ppo-Huggy / run_logs /timers.json
SorinAbrudan's picture
Huggy
a9ef1dd
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4105855226516724,
"min": 1.4105855226516724,
"max": 1.4298142194747925,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70409.375,
"min": 67499.8828125,
"max": 76898.875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 101.30327868852459,
"min": 84.2905982905983,
"max": 426.3813559322034,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49436.0,
"min": 49123.0,
"max": 50313.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999992.0,
"min": 49906.0,
"max": 1999992.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999992.0,
"min": 49906.0,
"max": 1999992.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.391770839691162,
"min": 0.06349783390760422,
"max": 2.431220293045044,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1167.1842041015625,
"min": 7.429246425628662,
"max": 1417.4013671875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.64361562176806,
"min": 1.7707241443742034,
"max": 3.9020118959596224,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1778.0844234228134,
"min": 207.1747248917818,
"max": 2261.3206192851067,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.64361562176806,
"min": 1.7707241443742034,
"max": 3.9020118959596224,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1778.0844234228134,
"min": 207.1747248917818,
"max": 2261.3206192851067,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017319297882366098,
"min": 0.013674316165876613,
"max": 0.02045741304755211,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.0519578936470983,
"min": 0.027348632331753227,
"max": 0.057039057363848164,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04432946931984689,
"min": 0.02160480764384071,
"max": 0.057212086601389774,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.13298840795954067,
"min": 0.04320961528768142,
"max": 0.17163625980416933,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.6233987922333404e-06,
"min": 3.6233987922333404e-06,
"max": 0.00029532532655822505,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0870196376700021e-05,
"min": 1.0870196376700021e-05,
"max": 0.0008441607186131001,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10120776666666668,
"min": 0.10120776666666668,
"max": 0.19844177500000001,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30362330000000004,
"min": 0.20757155,
"max": 0.5813869000000003,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.026755666666679e-05,
"min": 7.026755666666679e-05,
"max": 0.0049222445725,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002108026700000004,
"min": 0.0002108026700000004,
"max": 0.014071206309999997,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1672865396",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1672867703"
},
"total": 2307.099706162,
"count": 1,
"self": 0.3942841899997802,
"children": {
"run_training.setup": {
"total": 0.11617810600000666,
"count": 1,
"self": 0.11617810600000666
},
"TrainerController.start_learning": {
"total": 2306.5892438660003,
"count": 1,
"self": 4.042941902896473,
"children": {
"TrainerController._reset_env": {
"total": 7.227187415999879,
"count": 1,
"self": 7.227187415999879
},
"TrainerController.advance": {
"total": 2295.205310549104,
"count": 231772,
"self": 4.310477962066216,
"children": {
"env_step": {
"total": 1822.1369427470634,
"count": 231772,
"self": 1532.127206083183,
"children": {
"SubprocessEnvManager._take_step": {
"total": 287.3099387529685,
"count": 231772,
"self": 14.867405284001052,
"children": {
"TorchPolicy.evaluate": {
"total": 272.44253346896744,
"count": 223003,
"self": 68.28238476295746,
"children": {
"TorchPolicy.sample_actions": {
"total": 204.16014870600998,
"count": 223003,
"self": 204.16014870600998
}
}
}
}
},
"workers": {
"total": 2.6997979109119115,
"count": 231772,
"self": 0.0,
"children": {
"worker_root": {
"total": 2298.5345665530144,
"count": 231772,
"is_parallel": true,
"self": 1039.6470993042021,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021231540001735993,
"count": 1,
"is_parallel": true,
"self": 0.0003785780002090178,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017445759999645816,
"count": 2,
"is_parallel": true,
"self": 0.0017445759999645816
}
}
},
"UnityEnvironment.step": {
"total": 0.029932589000054577,
"count": 1,
"is_parallel": true,
"self": 0.00032410400012850005,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00017611299995223817,
"count": 1,
"is_parallel": true,
"self": 0.00017611299995223817
},
"communicator.exchange": {
"total": 0.028565724999907616,
"count": 1,
"is_parallel": true,
"self": 0.028565724999907616
},
"steps_from_proto": {
"total": 0.000866647000066223,
"count": 1,
"is_parallel": true,
"self": 0.000317336000080104,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000549310999986119,
"count": 2,
"is_parallel": true,
"self": 0.000549310999986119
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1258.8874672488123,
"count": 231771,
"is_parallel": true,
"self": 34.879309418910225,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 80.92074235605264,
"count": 231771,
"is_parallel": true,
"self": 80.92074235605264
},
"communicator.exchange": {
"total": 1042.2515490579117,
"count": 231771,
"is_parallel": true,
"self": 1042.2515490579117
},
"steps_from_proto": {
"total": 100.83586641593774,
"count": 231771,
"is_parallel": true,
"self": 41.85519944790144,
"children": {
"_process_rank_one_or_two_observation": {
"total": 58.9806669680363,
"count": 463542,
"is_parallel": true,
"self": 58.9806669680363
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 468.75788983997427,
"count": 231772,
"self": 6.626068380008746,
"children": {
"process_trajectory": {
"total": 150.3440324839662,
"count": 231772,
"self": 149.16091556796596,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1831169160002446,
"count": 10,
"self": 1.1831169160002446
}
}
},
"_update_policy": {
"total": 311.7877889759993,
"count": 97,
"self": 258.2426138760163,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.545175099983,
"count": 2910,
"self": 53.545175099983
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.570000318286475e-07,
"count": 1,
"self": 8.570000318286475e-07
},
"TrainerController._save_models": {
"total": 0.11380314100006217,
"count": 1,
"self": 0.002073097999982565,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1117300430000796,
"count": 1,
"self": 0.1117300430000796
}
}
}
}
}
}
}