ppo-Huggy / run_logs /timers.json
aestes's picture
Huggy
939f517
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4010369777679443,
"min": 1.4010369777679443,
"max": 1.4279608726501465,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 66893.9140625,
"min": 66893.9140625,
"max": 77328.78125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 119.10791366906474,
"min": 94.64435946462716,
"max": 380.6212121212121,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49668.0,
"min": 49071.0,
"max": 50242.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999337.0,
"min": 49738.0,
"max": 1999337.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999337.0,
"min": 49738.0,
"max": 1999337.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.302340269088745,
"min": 0.008680038154125214,
"max": 2.378091812133789,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 960.075927734375,
"min": 1.1370849609375,
"max": 1207.6920166015625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.5404951342289968,
"min": 1.751283003969957,
"max": 3.913444149444809,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1476.3864709734917,
"min": 229.41807352006435,
"max": 1966.310101032257,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.5404951342289968,
"min": 1.751283003969957,
"max": 3.913444149444809,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1476.3864709734917,
"min": 229.41807352006435,
"max": 1966.310101032257,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018690276772960916,
"min": 0.012564418473796954,
"max": 0.020111255764519834,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05607083031888275,
"min": 0.025128836947593908,
"max": 0.060333767293559507,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.044084138754341334,
"min": 0.02101296993593375,
"max": 0.07207807538410028,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.132252416263024,
"min": 0.0420259398718675,
"max": 0.21623422615230087,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.178348940583339e-06,
"min": 3.178348940583339e-06,
"max": 0.000295269001577,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.535046821750017e-06,
"min": 9.535046821750017e-06,
"max": 0.0008438287687237499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10105941666666667,
"min": 0.10105941666666667,
"max": 0.19842300000000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30317825,
"min": 0.20724775,
"max": 0.58127625,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.286489166666677e-05,
"min": 6.286489166666677e-05,
"max": 0.0049213077000000004,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001885946750000003,
"min": 0.0001885946750000003,
"max": 0.014065684875,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670851179",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670853392"
},
"total": 2212.24891041,
"count": 1,
"self": 0.38967187699972783,
"children": {
"run_training.setup": {
"total": 0.11127775199997814,
"count": 1,
"self": 0.11127775199997814
},
"TrainerController.start_learning": {
"total": 2211.747960781,
"count": 1,
"self": 3.9807950619460826,
"children": {
"TrainerController._reset_env": {
"total": 10.020979356999987,
"count": 1,
"self": 10.020979356999987
},
"TrainerController.advance": {
"total": 2197.6317818760544,
"count": 231283,
"self": 3.9617086070620644,
"children": {
"env_step": {
"total": 1719.914049251014,
"count": 231283,
"self": 1442.6354788540025,
"children": {
"SubprocessEnvManager._take_step": {
"total": 274.7112381819836,
"count": 231283,
"self": 14.3986495931141,
"children": {
"TorchPolicy.evaluate": {
"total": 260.3125885888695,
"count": 222816,
"self": 64.8831556888357,
"children": {
"TorchPolicy.sample_actions": {
"total": 195.42943290003382,
"count": 222816,
"self": 195.42943290003382
}
}
}
}
},
"workers": {
"total": 2.567332215027932,
"count": 231283,
"self": 0.0,
"children": {
"worker_root": {
"total": 2203.937780265882,
"count": 231283,
"is_parallel": true,
"self": 1017.1824911429521,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021917920000760205,
"count": 1,
"is_parallel": true,
"self": 0.0003014940000412025,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001890298000034818,
"count": 2,
"is_parallel": true,
"self": 0.001890298000034818
}
}
},
"UnityEnvironment.step": {
"total": 0.026603526999906535,
"count": 1,
"is_parallel": true,
"self": 0.0002856799998198767,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00017409800000223186,
"count": 1,
"is_parallel": true,
"self": 0.00017409800000223186
},
"communicator.exchange": {
"total": 0.02539183800001865,
"count": 1,
"is_parallel": true,
"self": 0.02539183800001865
},
"steps_from_proto": {
"total": 0.0007519110000657747,
"count": 1,
"is_parallel": true,
"self": 0.00028933800001595955,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00046257300004981516,
"count": 2,
"is_parallel": true,
"self": 0.00046257300004981516
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1186.7552891229298,
"count": 231282,
"is_parallel": true,
"self": 34.637032786040436,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.88306821498645,
"count": 231282,
"is_parallel": true,
"self": 75.88306821498645
},
"communicator.exchange": {
"total": 984.9272104729215,
"count": 231282,
"is_parallel": true,
"self": 984.9272104729215
},
"steps_from_proto": {
"total": 91.30797764898125,
"count": 231282,
"is_parallel": true,
"self": 37.46673452811274,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.84124312086851,
"count": 462564,
"is_parallel": true,
"self": 53.84124312086851
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 473.75602401797835,
"count": 231283,
"self": 6.148897882952497,
"children": {
"process_trajectory": {
"total": 139.63319430202807,
"count": 231283,
"self": 139.15225000302814,
"children": {
"RLTrainer._checkpoint": {
"total": 0.48094429899992974,
"count": 4,
"self": 0.48094429899992974
}
}
},
"_update_policy": {
"total": 327.9739318329978,
"count": 97,
"self": 273.91548920499974,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.05844262799803,
"count": 2910,
"self": 54.05844262799803
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.889996934158262e-07,
"count": 1,
"self": 8.889996934158262e-07
},
"TrainerController._save_models": {
"total": 0.11440359699963665,
"count": 1,
"self": 0.0019003569996129954,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11250324000002365,
"count": 1,
"self": 0.11250324000002365
}
}
}
}
}
}
}