ppo-Huggy / run_logs /timers.json
Kertn's picture
Huggy
018bec4
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.400079369544983,
"min": 1.400079369544983,
"max": 1.425079345703125,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70855.21875,
"min": 67336.046875,
"max": 77302.3671875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 112.48409090909091,
"min": 87.56637168141593,
"max": 382.2748091603053,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49493.0,
"min": 48741.0,
"max": 50290.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999959.0,
"min": 49953.0,
"max": 1999959.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999959.0,
"min": 49953.0,
"max": 1999959.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.31423282623291,
"min": 0.03759172931313515,
"max": 2.4437615871429443,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1015.9481811523438,
"min": 4.886924743652344,
"max": 1368.9163818359375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.3358705596391593,
"min": 1.8032091546517153,
"max": 3.8842845660461407,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1464.447175681591,
"min": 234.41719010472298,
"max": 2130.6100431084633,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.3358705596391593,
"min": 1.8032091546517153,
"max": 3.8842845660461407,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1464.447175681591,
"min": 234.41719010472298,
"max": 2130.6100431084633,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01677813312012909,
"min": 0.013408388028771394,
"max": 0.01976350649040089,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05033439936038728,
"min": 0.026816776057542787,
"max": 0.059290519471202674,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04151269824554523,
"min": 0.024570287515719734,
"max": 0.059256939714153606,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.12453809473663568,
"min": 0.04914057503143947,
"max": 0.17652526845534644,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.1422489526166665e-06,
"min": 3.1422489526166665e-06,
"max": 0.00029528842657052494,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.42674685785e-06,
"min": 9.42674685785e-06,
"max": 0.0008439675186774999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10104738333333335,
"min": 0.10104738333333335,
"max": 0.19842947499999997,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30314215000000005,
"min": 0.20727505000000007,
"max": 0.5813225,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.226442833333335e-05,
"min": 6.226442833333335e-05,
"max": 0.0049216308024999985,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00018679328500000004,
"min": 0.00018679328500000004,
"max": 0.014067992749999998,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690488784",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690491239"
},
"total": 2455.417852863,
"count": 1,
"self": 0.8333251269996254,
"children": {
"run_training.setup": {
"total": 0.05801180299999942,
"count": 1,
"self": 0.05801180299999942
},
"TrainerController.start_learning": {
"total": 2454.526515933,
"count": 1,
"self": 4.2938972849410675,
"children": {
"TrainerController._reset_env": {
"total": 5.389385431999983,
"count": 1,
"self": 5.389385431999983
},
"TrainerController.advance": {
"total": 2444.652304420059,
"count": 231898,
"self": 4.431108043223048,
"children": {
"env_step": {
"total": 1884.574202334972,
"count": 231898,
"self": 1588.7732852000404,
"children": {
"SubprocessEnvManager._take_step": {
"total": 292.9420035239924,
"count": 231898,
"self": 16.798881594970283,
"children": {
"TorchPolicy.evaluate": {
"total": 276.14312192902213,
"count": 223172,
"self": 276.14312192902213
}
}
},
"workers": {
"total": 2.8589136109389983,
"count": 231898,
"self": 0.0,
"children": {
"worker_root": {
"total": 2446.735071609989,
"count": 231898,
"is_parallel": true,
"self": 1148.5169897949247,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.000972388000036517,
"count": 1,
"is_parallel": true,
"self": 0.0002574130000425612,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007149749999939559,
"count": 2,
"is_parallel": true,
"self": 0.0007149749999939559
}
}
},
"UnityEnvironment.step": {
"total": 0.031760394000002634,
"count": 1,
"is_parallel": true,
"self": 0.00036927100006778346,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002922759999819391,
"count": 1,
"is_parallel": true,
"self": 0.0002922759999819391
},
"communicator.exchange": {
"total": 0.030329028999972252,
"count": 1,
"is_parallel": true,
"self": 0.030329028999972252
},
"steps_from_proto": {
"total": 0.0007698179999806598,
"count": 1,
"is_parallel": true,
"self": 0.00021509800001240365,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005547199999682562,
"count": 2,
"is_parallel": true,
"self": 0.0005547199999682562
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1298.2180818150644,
"count": 231897,
"is_parallel": true,
"self": 40.38398818200744,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 83.59408208801034,
"count": 231897,
"is_parallel": true,
"self": 83.59408208801034
},
"communicator.exchange": {
"total": 1076.9199807789787,
"count": 231897,
"is_parallel": true,
"self": 1076.9199807789787
},
"steps_from_proto": {
"total": 97.32003076606793,
"count": 231897,
"is_parallel": true,
"self": 35.47787288804312,
"children": {
"_process_rank_one_or_two_observation": {
"total": 61.84215787802481,
"count": 463794,
"is_parallel": true,
"self": 61.84215787802481
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 555.6469940418641,
"count": 231898,
"self": 6.57679492694831,
"children": {
"process_trajectory": {
"total": 135.83107949491392,
"count": 231898,
"self": 134.3665880419141,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4644914529998232,
"count": 10,
"self": 1.4644914529998232
}
}
},
"_update_policy": {
"total": 413.2391196200018,
"count": 97,
"self": 353.08369469500883,
"children": {
"TorchPPOOptimizer.update": {
"total": 60.155424924992985,
"count": 2910,
"self": 60.155424924992985
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.7890001799969468e-06,
"count": 1,
"self": 1.7890001799969468e-06
},
"TrainerController._save_models": {
"total": 0.19092700700002752,
"count": 1,
"self": 0.002841067000190378,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18808593999983714,
"count": 1,
"self": 0.18808593999983714
}
}
}
}
}
}
}