ppo-Huggy / run_logs /timers.json
rgargente's picture
Huggy
e228117 verified
raw
history blame contribute delete
No virus
17.4 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4120055437088013,
"min": 1.4120055437088013,
"max": 1.4306491613388062,
"count": 27
},
"Huggy.Policy.Entropy.sum": {
"value": 69387.3671875,
"min": 69335.0546875,
"max": 77400.875,
"count": 27
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 84.51623931623932,
"min": 83.7911714770798,
"max": 414.55371900826447,
"count": 27
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49442.0,
"min": 49115.0,
"max": 50161.0,
"count": 27
},
"Huggy.Step.mean": {
"value": 1349980.0,
"min": 49595.0,
"max": 1349980.0,
"count": 27
},
"Huggy.Step.sum": {
"value": 1349980.0,
"min": 49595.0,
"max": 1349980.0,
"count": 27
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.443387031555176,
"min": 0.11236096173524857,
"max": 2.513777256011963,
"count": 27
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1429.38134765625,
"min": 13.483315467834473,
"max": 1465.4031982421875,
"count": 27
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.876356184278798,
"min": 1.991259807596604,
"max": 3.994456048829086,
"count": 27
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2267.6683678030968,
"min": 238.95117691159248,
"max": 2286.7436777353287,
"count": 27
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.876356184278798,
"min": 1.991259807596604,
"max": 3.994456048829086,
"count": 27
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2267.6683678030968,
"min": 238.95117691159248,
"max": 2286.7436777353287,
"count": 27
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017623089001184172,
"min": 0.014720112762976594,
"max": 0.01996028531058174,
"count": 27
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.035246178002368345,
"min": 0.029440225525953188,
"max": 0.05988085593174522,
"count": 27
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05559866341451804,
"min": 0.022532269793252152,
"max": 0.06097153342432446,
"count": 27
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.11119732682903608,
"min": 0.045064539586504304,
"max": 0.18291460027297338,
"count": 27
},
"Huggy.Policy.LearningRate.mean": {
"value": 0.00010092786635739997,
"min": 0.00010092786635739997,
"max": 0.0002953524015492,
"count": 27
},
"Huggy.Policy.LearningRate.sum": {
"value": 0.00020185573271479994,
"min": 0.00020185573271479994,
"max": 0.00084398446867185,
"count": 27
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.1336426,
"min": 0.1336426,
"max": 0.19845079999999996,
"count": 27
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.2672852,
"min": 0.2672852,
"max": 0.58132815,
"count": 27
},
"Huggy.Policy.Beta.mean": {
"value": 0.0016887657400000001,
"min": 0.0016887657400000001,
"max": 0.00492269492,
"count": 27
},
"Huggy.Policy.Beta.sum": {
"value": 0.0033775314800000003,
"min": 0.0033775314800000003,
"max": 0.014068274685,
"count": 27
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 27
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 27
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1707065674",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1707067378"
},
"total": 1703.253072619,
"count": 1,
"self": 0.2536526500000491,
"children": {
"run_training.setup": {
"total": 0.05089985600000091,
"count": 1,
"self": 0.05089985600000091
},
"TrainerController.start_learning": {
"total": 1702.948520113,
"count": 1,
"self": 3.2540399530118975,
"children": {
"TrainerController._reset_env": {
"total": 2.7074502859999825,
"count": 1,
"self": 2.7074502859999825
},
"TrainerController.advance": {
"total": 1696.8180641049883,
"count": 159309,
"self": 3.5057973349573786,
"children": {
"env_step": {
"total": 1375.5621974099656,
"count": 159309,
"self": 1139.1953778139587,
"children": {
"SubprocessEnvManager._take_step": {
"total": 234.25870933096587,
"count": 159309,
"self": 12.51078772196081,
"children": {
"TorchPolicy.evaluate": {
"total": 221.74792160900506,
"count": 153270,
"self": 221.74792160900506
}
}
},
"workers": {
"total": 2.1081102650409775,
"count": 159308,
"self": 0.0,
"children": {
"worker_root": {
"total": 1697.8109619660133,
"count": 159308,
"is_parallel": true,
"self": 770.9254011740464,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00098078399997803,
"count": 1,
"is_parallel": true,
"self": 0.0002828929999623142,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006978910000157157,
"count": 2,
"is_parallel": true,
"self": 0.0006978910000157157
}
}
},
"UnityEnvironment.step": {
"total": 0.032776919000014004,
"count": 1,
"is_parallel": true,
"self": 0.0003723500000774038,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021959499997592502,
"count": 1,
"is_parallel": true,
"self": 0.00021959499997592502
},
"communicator.exchange": {
"total": 0.031229973999984395,
"count": 1,
"is_parallel": true,
"self": 0.031229973999984395
},
"steps_from_proto": {
"total": 0.0009549999999762804,
"count": 1,
"is_parallel": true,
"self": 0.00024463799996965463,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007103620000066257,
"count": 2,
"is_parallel": true,
"self": 0.0007103620000066257
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 926.8855607919669,
"count": 159307,
"is_parallel": true,
"self": 28.697275015981063,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 60.549560500011125,
"count": 159307,
"is_parallel": true,
"self": 60.549560500011125
},
"communicator.exchange": {
"total": 772.8807589609507,
"count": 159307,
"is_parallel": true,
"self": 772.8807589609507
},
"steps_from_proto": {
"total": 64.7579663150239,
"count": 159307,
"is_parallel": true,
"self": 24.06324220091949,
"children": {
"_process_rank_one_or_two_observation": {
"total": 40.69472411410442,
"count": 318614,
"is_parallel": true,
"self": 40.69472411410442
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 317.75006936006514,
"count": 159308,
"self": 4.952455309995685,
"children": {
"process_trajectory": {
"total": 107.70797729106982,
"count": 159308,
"self": 106.95540628306969,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7525710080001318,
"count": 6,
"self": 0.7525710080001318
}
}
},
"_update_policy": {
"total": 205.08963675899963,
"count": 66,
"self": 162.70109769700002,
"children": {
"TorchPPOOptimizer.update": {
"total": 42.38853906199961,
"count": 1980,
"self": 42.38853906199961
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.7030001799867023e-06,
"count": 1,
"self": 1.7030001799867023e-06
},
"TrainerController._save_models": {
"total": 0.1689640659997167,
"count": 1,
"self": 0.0043373739999879035,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1646266919997288,
"count": 1,
"self": 0.1646266919997288
}
}
}
}
}
}
}