ppo-Huggy / run_logs /timers.json
johnnyf's picture
Huggy
9b0db3e verified
raw
history blame
No virus
17.4 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4055912494659424,
"min": 1.4055912494659424,
"max": 1.4293824434280396,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69298.4609375,
"min": 68389.6015625,
"max": 78632.515625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 80.2840909090909,
"min": 72.33431085043988,
"max": 400.752,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49455.0,
"min": 49303.0,
"max": 50278.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999991.0,
"min": 49999.0,
"max": 1999991.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999991.0,
"min": 49999.0,
"max": 1999991.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.481139659881592,
"min": 0.1996988207101822,
"max": 2.5650112628936768,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1528.382080078125,
"min": 24.762653350830078,
"max": 1669.506591796875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7662480544153745,
"min": 1.737678192315563,
"max": 4.013000390366441,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2320.0088015198708,
"min": 215.47209584712982,
"max": 2622.4938257336617,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7662480544153745,
"min": 1.737678192315563,
"max": 4.013000390366441,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2320.0088015198708,
"min": 215.47209584712982,
"max": 2622.4938257336617,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01699187721145184,
"min": 0.013666523584591535,
"max": 0.01996272164324182,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05097563163435552,
"min": 0.02733304716918307,
"max": 0.05485654180738493,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.056923088389966216,
"min": 0.019846302177757024,
"max": 0.06097604148089886,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17076926516989865,
"min": 0.03969260435551405,
"max": 0.18050543690721194,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.8503487165833324e-06,
"min": 3.8503487165833324e-06,
"max": 0.00029529900156699995,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1551046149749998e-05,
"min": 1.1551046149749998e-05,
"max": 0.0008440287186570999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10128341666666667,
"min": 0.10128341666666667,
"max": 0.19843300000000003,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30385025,
"min": 0.2076965,
"max": 0.5813429000000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.404249166666667e-05,
"min": 7.404249166666667e-05,
"max": 0.004921806699999999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.000222127475,
"min": 0.000222127475,
"max": 0.01406901071,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1715267449",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1715271540"
},
"total": 4091.189102212,
"count": 1,
"self": 0.5415943089997199,
"children": {
"run_training.setup": {
"total": 0.07518303699998796,
"count": 1,
"self": 0.07518303699998796
},
"TrainerController.start_learning": {
"total": 4090.572324866,
"count": 1,
"self": 7.188943778079647,
"children": {
"TrainerController._reset_env": {
"total": 3.4571188820000316,
"count": 1,
"self": 3.4571188820000316
},
"TrainerController.advance": {
"total": 4079.791853962921,
"count": 233340,
"self": 7.456218643150805,
"children": {
"env_step": {
"total": 2642.9867234789276,
"count": 233340,
"self": 2212.119800031269,
"children": {
"SubprocessEnvManager._take_step": {
"total": 425.97020370288124,
"count": 233340,
"self": 25.903391634985155,
"children": {
"TorchPolicy.evaluate": {
"total": 400.0668120678961,
"count": 222965,
"self": 400.0668120678961
}
}
},
"workers": {
"total": 4.8967197447777835,
"count": 233340,
"self": 0.0,
"children": {
"worker_root": {
"total": 4079.0629299091033,
"count": 233340,
"is_parallel": true,
"self": 2324.3888454011026,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0013939730000629424,
"count": 1,
"is_parallel": true,
"self": 0.00034563999997772044,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001048333000085222,
"count": 2,
"is_parallel": true,
"self": 0.001048333000085222
}
}
},
"UnityEnvironment.step": {
"total": 0.03472487999999885,
"count": 1,
"is_parallel": true,
"self": 0.0004700380000031146,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002270759999873917,
"count": 1,
"is_parallel": true,
"self": 0.0002270759999873917
},
"communicator.exchange": {
"total": 0.03313220099994396,
"count": 1,
"is_parallel": true,
"self": 0.03313220099994396
},
"steps_from_proto": {
"total": 0.00089556500006438,
"count": 1,
"is_parallel": true,
"self": 0.00024470000005294423,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006508650000114358,
"count": 2,
"is_parallel": true,
"self": 0.0006508650000114358
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1754.6740845080008,
"count": 233339,
"is_parallel": true,
"self": 55.88194237108473,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 102.45641629489967,
"count": 233339,
"is_parallel": true,
"self": 102.45641629489967
},
"communicator.exchange": {
"total": 1470.0702100211101,
"count": 233339,
"is_parallel": true,
"self": 1470.0702100211101
},
"steps_from_proto": {
"total": 126.26551582090622,
"count": 233339,
"is_parallel": true,
"self": 41.4320239370802,
"children": {
"_process_rank_one_or_two_observation": {
"total": 84.83349188382601,
"count": 466678,
"is_parallel": true,
"self": 84.83349188382601
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1429.3489118408424,
"count": 233340,
"self": 11.334735557812792,
"children": {
"process_trajectory": {
"total": 231.80257769602918,
"count": 233340,
"self": 230.40864755602934,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3939301399998385,
"count": 10,
"self": 1.3939301399998385
}
}
},
"_update_policy": {
"total": 1186.2115985870005,
"count": 97,
"self": 314.33764145000055,
"children": {
"TorchPPOOptimizer.update": {
"total": 871.8739571369999,
"count": 2910,
"self": 871.8739571369999
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0809999366756529e-06,
"count": 1,
"self": 1.0809999366756529e-06
},
"TrainerController._save_models": {
"total": 0.13440716199966118,
"count": 1,
"self": 0.005817705999106693,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1285894560005545,
"count": 1,
"self": 0.1285894560005545
}
}
}
}
}
}
}