ppo-Huggy / run_logs /timers.json
DanGalt's picture
Firsh Push
f3a853b
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.3994039297103882,
"min": 1.3993937969207764,
"max": 1.4245476722717285,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69960.3984375,
"min": 69231.59375,
"max": 75283.90625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 73.68656716417911,
"min": 72.40029325513197,
"max": 397.54761904761904,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49370.0,
"min": 49281.0,
"max": 50091.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999938.0,
"min": 49467.0,
"max": 1999938.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999938.0,
"min": 49467.0,
"max": 1999938.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4632201194763184,
"min": 0.07959351688623428,
"max": 2.521146535873413,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1650.357421875,
"min": 9.949189186096191,
"max": 1678.3341064453125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.758223769735934,
"min": 1.8009398732185364,
"max": 4.0343744610388255,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2518.009925723076,
"min": 225.11748415231705,
"max": 2602.9359052181244,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.758223769735934,
"min": 1.8009398732185364,
"max": 4.0343744610388255,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2518.009925723076,
"min": 225.11748415231705,
"max": 2602.9359052181244,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015645556367235258,
"min": 0.013512798470037524,
"max": 0.01984713100052128,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.046936669101705776,
"min": 0.027025596940075048,
"max": 0.055323402830981645,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05684505216777325,
"min": 0.023095472001781067,
"max": 0.0662593117604653,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17053515650331974,
"min": 0.046190944003562134,
"max": 0.19388122943540415,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.504448831883335e-06,
"min": 3.504448831883335e-06,
"max": 0.00029536537654487494,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0513346495650005e-05,
"min": 1.0513346495650005e-05,
"max": 0.0008441280186239998,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.1011681166666667,
"min": 0.1011681166666667,
"max": 0.19845512500000004,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3035043500000001,
"min": 0.20746934999999997,
"max": 0.5813759999999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.82890216666667e-05,
"min": 6.82890216666667e-05,
"max": 0.004922910737500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020486706500000008,
"min": 0.00020486706500000008,
"max": 0.014070662400000003,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673451969",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673454149"
},
"total": 2180.3383999400003,
"count": 1,
"self": 0.3919928000004802,
"children": {
"run_training.setup": {
"total": 0.11575262100001282,
"count": 1,
"self": 0.11575262100001282
},
"TrainerController.start_learning": {
"total": 2179.830654519,
"count": 1,
"self": 3.80404423895925,
"children": {
"TrainerController._reset_env": {
"total": 7.911584020000021,
"count": 1,
"self": 7.911584020000021
},
"TrainerController.advance": {
"total": 2167.9946955330406,
"count": 233429,
"self": 4.16504011891675,
"children": {
"env_step": {
"total": 1699.3463472410788,
"count": 233429,
"self": 1426.9996681451184,
"children": {
"SubprocessEnvManager._take_step": {
"total": 269.78803430596173,
"count": 233429,
"self": 14.074400346960715,
"children": {
"TorchPolicy.evaluate": {
"total": 255.71363395900102,
"count": 222970,
"self": 63.7924925078471,
"children": {
"TorchPolicy.sample_actions": {
"total": 191.92114145115391,
"count": 222970,
"self": 191.92114145115391
}
}
}
}
},
"workers": {
"total": 2.5586447899985387,
"count": 233429,
"self": 0.0,
"children": {
"worker_root": {
"total": 2172.0600009709797,
"count": 233429,
"is_parallel": true,
"self": 998.401029876019,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019123870000044008,
"count": 1,
"is_parallel": true,
"self": 0.00029742200001692254,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016149649999874782,
"count": 2,
"is_parallel": true,
"self": 0.0016149649999874782
}
}
},
"UnityEnvironment.step": {
"total": 0.026183049999985997,
"count": 1,
"is_parallel": true,
"self": 0.00027240799994388,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000185330000022077,
"count": 1,
"is_parallel": true,
"self": 0.000185330000022077
},
"communicator.exchange": {
"total": 0.024839861000032215,
"count": 1,
"is_parallel": true,
"self": 0.024839861000032215
},
"steps_from_proto": {
"total": 0.0008854509999878246,
"count": 1,
"is_parallel": true,
"self": 0.0003917689999752838,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004936820000125408,
"count": 2,
"is_parallel": true,
"self": 0.0004936820000125408
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1173.6589710949606,
"count": 233428,
"is_parallel": true,
"self": 34.11996873998805,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.0666777310862,
"count": 233428,
"is_parallel": true,
"self": 75.0666777310862
},
"communicator.exchange": {
"total": 973.0735423809647,
"count": 233428,
"is_parallel": true,
"self": 973.0735423809647
},
"steps_from_proto": {
"total": 91.39878224292181,
"count": 233428,
"is_parallel": true,
"self": 37.28218599985888,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.11659624306293,
"count": 466856,
"is_parallel": true,
"self": 54.11659624306293
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 464.48330817304526,
"count": 233429,
"self": 6.06346902899827,
"children": {
"process_trajectory": {
"total": 150.25780772104855,
"count": 233429,
"self": 149.06690155704854,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1909061640000118,
"count": 10,
"self": 1.1909061640000118
}
}
},
"_update_policy": {
"total": 308.16203142299844,
"count": 97,
"self": 254.72117612000056,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.440855302997875,
"count": 2910,
"self": 53.440855302997875
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.88000010693213e-07,
"count": 1,
"self": 9.88000010693213e-07
},
"TrainerController._save_models": {
"total": 0.12032973899977151,
"count": 1,
"self": 0.002121317999808525,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11820842099996298,
"count": 1,
"self": 0.11820842099996298
}
}
}
}
}
}
}