ppo-Huggy / run_logs /timers.json
quartz14's picture
Huggy
9c0c082
raw
history blame contribute delete
No virus
17.9 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4047127962112427,
"min": 1.4047127962112427,
"max": 1.4283555746078491,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71839.8203125,
"min": 67945.90625,
"max": 77352.9765625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 118.0048076923077,
"min": 89.46,
"max": 417.97520661157023,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49090.0,
"min": 48779.0,
"max": 50575.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999336.0,
"min": 49946.0,
"max": 1999336.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999336.0,
"min": 49946.0,
"max": 1999336.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.366665840148926,
"min": 0.12775073945522308,
"max": 2.479139566421509,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 984.532958984375,
"min": 15.33008861541748,
"max": 1363.5267333984375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.495123111284696,
"min": 1.8439792235692343,
"max": 3.938506314542034,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1453.9712142944336,
"min": 221.2775068283081,
"max": 2100.8497069478035,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.495123111284696,
"min": 1.8439792235692343,
"max": 3.938506314542034,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1453.9712142944336,
"min": 221.2775068283081,
"max": 2100.8497069478035,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015589005539853436,
"min": 0.014693524490576237,
"max": 0.019237564144229207,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03117801107970687,
"min": 0.029387048981152474,
"max": 0.05771269243268762,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.0412093786833187,
"min": 0.022622665359328192,
"max": 0.05837095559885104,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.0824187573666374,
"min": 0.045245330718656385,
"max": 0.1637020250161489,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.645448451550004e-06,
"min": 4.645448451550004e-06,
"max": 0.0002952762015745999,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.290896903100008e-06,
"min": 9.290896903100008e-06,
"max": 0.0008437447687517498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10154845000000003,
"min": 0.10154845000000003,
"max": 0.19842540000000003,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20309690000000005,
"min": 0.20309690000000005,
"max": 0.58124825,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.726765500000007e-05,
"min": 8.726765500000007e-05,
"max": 0.004921427460000001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00017453531000000014,
"min": 0.00017453531000000014,
"max": 0.014064287675000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1672023179",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1672025582"
},
"total": 2402.5602121449997,
"count": 1,
"self": 0.8910205739994126,
"children": {
"run_training.setup": {
"total": 0.12195726700002751,
"count": 1,
"self": 0.12195726700002751
},
"TrainerController.start_learning": {
"total": 2401.547234304,
"count": 1,
"self": 4.3386962258737185,
"children": {
"TrainerController._reset_env": {
"total": 7.795945598999992,
"count": 1,
"self": 7.795945598999992
},
"TrainerController.advance": {
"total": 2389.1712998681264,
"count": 231881,
"self": 4.685468119244433,
"children": {
"env_step": {
"total": 1895.451465299908,
"count": 231881,
"self": 1594.8159172188598,
"children": {
"SubprocessEnvManager._take_step": {
"total": 297.7781908850145,
"count": 231881,
"self": 15.373134274001757,
"children": {
"TorchPolicy.evaluate": {
"total": 282.40505661101275,
"count": 223160,
"self": 69.64444262013728,
"children": {
"TorchPolicy.sample_actions": {
"total": 212.76061399087547,
"count": 223160,
"self": 212.76061399087547
}
}
}
}
},
"workers": {
"total": 2.857357196033604,
"count": 231881,
"self": 0.0,
"children": {
"worker_root": {
"total": 2392.504615255028,
"count": 231881,
"is_parallel": true,
"self": 1080.2541278510053,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002419148999990739,
"count": 1,
"is_parallel": true,
"self": 0.0004654079999681926,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019537410000225464,
"count": 2,
"is_parallel": true,
"self": 0.0019537410000225464
}
}
},
"UnityEnvironment.step": {
"total": 0.030702663999989,
"count": 1,
"is_parallel": true,
"self": 0.0002977119999059141,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002177200000232915,
"count": 1,
"is_parallel": true,
"self": 0.0002177200000232915
},
"communicator.exchange": {
"total": 0.0292514570000435,
"count": 1,
"is_parallel": true,
"self": 0.0292514570000435
},
"steps_from_proto": {
"total": 0.0009357750000162923,
"count": 1,
"is_parallel": true,
"self": 0.0003602159999900323,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00057555900002626,
"count": 2,
"is_parallel": true,
"self": 0.00057555900002626
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1312.2504874040228,
"count": 231880,
"is_parallel": true,
"self": 36.758323150953174,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 79.65445999099694,
"count": 231880,
"is_parallel": true,
"self": 79.65445999099694
},
"communicator.exchange": {
"total": 1096.1884774780228,
"count": 231880,
"is_parallel": true,
"self": 1096.1884774780228
},
"steps_from_proto": {
"total": 99.64922678404997,
"count": 231880,
"is_parallel": true,
"self": 40.426710220022244,
"children": {
"_process_rank_one_or_two_observation": {
"total": 59.22251656402773,
"count": 463760,
"is_parallel": true,
"self": 59.22251656402773
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 489.0343664489741,
"count": 231881,
"self": 6.798170144940229,
"children": {
"process_trajectory": {
"total": 155.83898706503368,
"count": 231881,
"self": 154.43479558703308,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4041914780005982,
"count": 10,
"self": 1.4041914780005982
}
}
},
"_update_policy": {
"total": 326.3972092390002,
"count": 96,
"self": 271.6712304870129,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.72597875198727,
"count": 2880,
"self": 54.72597875198727
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.695999799267156e-06,
"count": 1,
"self": 1.695999799267156e-06
},
"TrainerController._save_models": {
"total": 0.24129091500026334,
"count": 1,
"self": 0.004348834000211355,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23694208100005199,
"count": 1,
"self": 0.23694208100005199
}
}
}
}
}
}
}