ppo-Huggy / run_logs /timers.json
yuanzi1983918's picture
Huggy
1a03a2c
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4054149389266968,
"min": 1.4054149389266968,
"max": 1.4284106492996216,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70530.75,
"min": 69194.4140625,
"max": 76763.234375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 82.1523178807947,
"min": 81.62148760330578,
"max": 410.6639344262295,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49620.0,
"min": 48939.0,
"max": 50101.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999871.0,
"min": 49832.0,
"max": 1999871.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999871.0,
"min": 49832.0,
"max": 1999871.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.438931703567505,
"min": 0.0032370223198086023,
"max": 2.4800257682800293,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1475.5537109375,
"min": 0.39167970418930054,
"max": 1475.5537109375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.709846794112655,
"min": 1.715446182518951,
"max": 3.9455936284127358,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2244.457310438156,
"min": 207.5689880847931,
"max": 2260.8322855830193,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.709846794112655,
"min": 1.715446182518951,
"max": 3.9455936284127358,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2244.457310438156,
"min": 207.5689880847931,
"max": 2260.8322855830193,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016388515260364833,
"min": 0.014283688926176789,
"max": 0.020347534387838095,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.0491655457810945,
"min": 0.028567377852353578,
"max": 0.054821102791659845,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.060021475454171504,
"min": 0.022460032254457474,
"max": 0.060021475454171504,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.18006442636251452,
"min": 0.04492006450891495,
"max": 0.18006442636251452,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.1500989499999974e-06,
"min": 3.1500989499999974e-06,
"max": 0.0002953654515448499,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.450296849999993e-06,
"min": 9.450296849999993e-06,
"max": 0.0008441361186213,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10105000000000001,
"min": 0.10105000000000001,
"max": 0.19845515000000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30315000000000003,
"min": 0.20722970000000002,
"max": 0.5813787000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.239499999999996e-05,
"min": 6.239499999999996e-05,
"max": 0.004922911985,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00018718499999999987,
"min": 0.00018718499999999987,
"max": 0.014070797130000003,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690229464",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690231919"
},
"total": 2455.280086517,
"count": 1,
"self": 0.7300835220003137,
"children": {
"run_training.setup": {
"total": 0.05247113299998318,
"count": 1,
"self": 0.05247113299998318
},
"TrainerController.start_learning": {
"total": 2454.497531862,
"count": 1,
"self": 4.403187370012802,
"children": {
"TrainerController._reset_env": {
"total": 5.76819392699997,
"count": 1,
"self": 5.76819392699997
},
"TrainerController.advance": {
"total": 2444.1463567369874,
"count": 232542,
"self": 4.460955657943487,
"children": {
"env_step": {
"total": 1885.0576356229285,
"count": 232542,
"self": 1592.0210191549452,
"children": {
"SubprocessEnvManager._take_step": {
"total": 290.1389838290252,
"count": 232542,
"self": 16.245412047993227,
"children": {
"TorchPolicy.evaluate": {
"total": 273.893571781032,
"count": 222970,
"self": 273.893571781032
}
}
},
"workers": {
"total": 2.8976326389581573,
"count": 232542,
"self": 0.0,
"children": {
"worker_root": {
"total": 2446.5449558329974,
"count": 232542,
"is_parallel": true,
"self": 1145.6597187409861,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0012273140000047533,
"count": 1,
"is_parallel": true,
"self": 0.0003689739999686026,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008583400000361507,
"count": 2,
"is_parallel": true,
"self": 0.0008583400000361507
}
}
},
"UnityEnvironment.step": {
"total": 0.028466074999982993,
"count": 1,
"is_parallel": true,
"self": 0.0003360029999157632,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002158210000402505,
"count": 1,
"is_parallel": true,
"self": 0.0002158210000402505
},
"communicator.exchange": {
"total": 0.027197907000015675,
"count": 1,
"is_parallel": true,
"self": 0.027197907000015675
},
"steps_from_proto": {
"total": 0.0007163440000113042,
"count": 1,
"is_parallel": true,
"self": 0.00022002500008966308,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004963189999216411,
"count": 2,
"is_parallel": true,
"self": 0.0004963189999216411
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1300.8852370920113,
"count": 232541,
"is_parallel": true,
"self": 39.99522469596195,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 80.4625429600045,
"count": 232541,
"is_parallel": true,
"self": 80.4625429600045
},
"communicator.exchange": {
"total": 1084.7957662049964,
"count": 232541,
"is_parallel": true,
"self": 1084.7957662049964
},
"steps_from_proto": {
"total": 95.63170323104839,
"count": 232541,
"is_parallel": true,
"self": 34.71776995011453,
"children": {
"_process_rank_one_or_two_observation": {
"total": 60.91393328093386,
"count": 465082,
"is_parallel": true,
"self": 60.91393328093386
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 554.6277654561154,
"count": 232542,
"self": 6.685059009215138,
"children": {
"process_trajectory": {
"total": 138.3218467968996,
"count": 232542,
"self": 136.95409627289968,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3677505239999164,
"count": 10,
"self": 1.3677505239999164
}
}
},
"_update_policy": {
"total": 409.6208596500006,
"count": 97,
"self": 349.9666797089991,
"children": {
"TorchPPOOptimizer.update": {
"total": 59.65417994100153,
"count": 2910,
"self": 59.65417994100153
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.8639998415892478e-06,
"count": 1,
"self": 1.8639998415892478e-06
},
"TrainerController._save_models": {
"total": 0.17979196399983266,
"count": 1,
"self": 0.0027320760000293376,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17705988799980332,
"count": 1,
"self": 0.17705988799980332
}
}
}
}
}
}
}