ppo-Huggy / run_logs /timers.json
Forkits's picture
Huggy
19601e8
raw
history blame contribute delete
No virus
17.9 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4043697118759155,
"min": 1.4043697118759155,
"max": 1.4275445938110352,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69444.6796875,
"min": 69155.84375,
"max": 77451.4453125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 76.221875,
"min": 73.26002971768202,
"max": 388.47286821705427,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 48782.0,
"min": 48782.0,
"max": 50113.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999989.0,
"min": 49515.0,
"max": 1999989.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999989.0,
"min": 49515.0,
"max": 1999989.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4603159427642822,
"min": 0.0906737744808197,
"max": 2.517595052719116,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1574.6021728515625,
"min": 11.606243133544922,
"max": 1660.900146484375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7777288171462713,
"min": 1.877062070183456,
"max": 4.04488940010358,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2417.7464429736137,
"min": 240.26394498348236,
"max": 2624.803804576397,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7777288171462713,
"min": 1.877062070183456,
"max": 4.04488940010358,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2417.7464429736137,
"min": 240.26394498348236,
"max": 2624.803804576397,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.0172712624952611,
"min": 0.013500980833002055,
"max": 0.018682890287406433,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.051813787485783296,
"min": 0.02700196166600411,
"max": 0.05528347512329977,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.053492333284682704,
"min": 0.022602520789951085,
"max": 0.059987076992789906,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16047699985404812,
"min": 0.04520504157990217,
"max": 0.17472840460638206,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.800898733066672e-06,
"min": 3.800898733066672e-06,
"max": 0.00029534565155144997,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1402696199200015e-05,
"min": 1.1402696199200015e-05,
"max": 0.0008440633686455498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10126693333333332,
"min": 0.10126693333333332,
"max": 0.19844855000000006,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3038008,
"min": 0.20766025,
"max": 0.5813544500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.32199733333334e-05,
"min": 7.32199733333334e-05,
"max": 0.004922582645,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021965992000000018,
"min": 0.00021965992000000018,
"max": 0.014069587054999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675125829",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675128066"
},
"total": 2236.9738874689997,
"count": 1,
"self": 0.3817336969996177,
"children": {
"run_training.setup": {
"total": 0.1196741169999882,
"count": 1,
"self": 0.1196741169999882
},
"TrainerController.start_learning": {
"total": 2236.472479655,
"count": 1,
"self": 3.8268633709649293,
"children": {
"TrainerController._reset_env": {
"total": 10.749796591000006,
"count": 1,
"self": 10.749796591000006
},
"TrainerController.advance": {
"total": 2221.785305631035,
"count": 233144,
"self": 4.233784290940093,
"children": {
"env_step": {
"total": 1747.0585733550183,
"count": 233144,
"self": 1470.1871603100333,
"children": {
"SubprocessEnvManager._take_step": {
"total": 274.32136553896106,
"count": 233144,
"self": 14.390594223953883,
"children": {
"TorchPolicy.evaluate": {
"total": 259.9307713150072,
"count": 222926,
"self": 66.16976207897426,
"children": {
"TorchPolicy.sample_actions": {
"total": 193.76100923603292,
"count": 222926,
"self": 193.76100923603292
}
}
}
}
},
"workers": {
"total": 2.5500475060239864,
"count": 233144,
"self": 0.0,
"children": {
"worker_root": {
"total": 2228.613939547136,
"count": 233144,
"is_parallel": true,
"self": 1016.3875001481235,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002174168000010468,
"count": 1,
"is_parallel": true,
"self": 0.00035496099997089914,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018192070000395688,
"count": 2,
"is_parallel": true,
"self": 0.0018192070000395688
}
}
},
"UnityEnvironment.step": {
"total": 0.028918850999957613,
"count": 1,
"is_parallel": true,
"self": 0.0003215319999867461,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001917369999659968,
"count": 1,
"is_parallel": true,
"self": 0.0001917369999659968
},
"communicator.exchange": {
"total": 0.027477471999986847,
"count": 1,
"is_parallel": true,
"self": 0.027477471999986847
},
"steps_from_proto": {
"total": 0.0009281100000180231,
"count": 1,
"is_parallel": true,
"self": 0.00042483200002152444,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005032779999964987,
"count": 2,
"is_parallel": true,
"self": 0.0005032779999964987
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1212.2264393990124,
"count": 233143,
"is_parallel": true,
"self": 35.497743815129525,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.61966908886421,
"count": 233143,
"is_parallel": true,
"self": 75.61966908886421
},
"communicator.exchange": {
"total": 1008.512776934915,
"count": 233143,
"is_parallel": true,
"self": 1008.512776934915
},
"steps_from_proto": {
"total": 92.59624956010356,
"count": 233143,
"is_parallel": true,
"self": 38.5520223049528,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.04422725515076,
"count": 466286,
"is_parallel": true,
"self": 54.04422725515076
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 470.49294798507685,
"count": 233144,
"self": 6.141550114186998,
"children": {
"process_trajectory": {
"total": 150.93813374589138,
"count": 233144,
"self": 149.80174612689234,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1363876189990378,
"count": 10,
"self": 1.1363876189990378
}
}
},
"_update_policy": {
"total": 313.4132641249985,
"count": 97,
"self": 260.0034068680093,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.4098572569892,
"count": 2910,
"self": 53.4098572569892
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.599998520570807e-07,
"count": 1,
"self": 9.599998520570807e-07
},
"TrainerController._save_models": {
"total": 0.11051310199991349,
"count": 1,
"self": 0.002888772999995126,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10762432899991836,
"count": 1,
"self": 0.10762432899991836
}
}
}
}
}
}
}