ppo-Huggy / run_logs /timers.json
magleb's picture
Huggy
154c238
raw
history blame contribute delete
No virus
17.9 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4042824506759644,
"min": 1.404280424118042,
"max": 1.427475094795227,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69229.71875,
"min": 67679.0390625,
"max": 76322.9296875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 98.46613545816733,
"min": 84.81132075471699,
"max": 367.7867647058824,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49430.0,
"min": 48822.0,
"max": 50177.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999991.0,
"min": 49933.0,
"max": 1999991.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999991.0,
"min": 49933.0,
"max": 1999991.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.386636734008789,
"min": 0.04439346119761467,
"max": 2.488935708999634,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1198.0916748046875,
"min": 5.993117332458496,
"max": 1376.8602294921875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7438378493149442,
"min": 1.7572343583460208,
"max": 3.9067121361319725,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1879.406600356102,
"min": 237.2266383767128,
"max": 2073.7337233424187,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7438378493149442,
"min": 1.7572343583460208,
"max": 3.9067121361319725,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1879.406600356102,
"min": 237.2266383767128,
"max": 2073.7337233424187,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017310392824098624,
"min": 0.013680245113694886,
"max": 0.019160458279657178,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03462078564819725,
"min": 0.029078229745209683,
"max": 0.05311459859367461,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05415175346036752,
"min": 0.02329581342637539,
"max": 0.062319244754811126,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.10830350692073504,
"min": 0.04659162685275078,
"max": 0.17442926776905854,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.587023471024999e-06,
"min": 4.587023471024999e-06,
"max": 0.000295330426556525,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.174046942049998e-06,
"min": 9.174046942049998e-06,
"max": 0.00084382996872335,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.101528975,
"min": 0.101528975,
"max": 0.19844347499999998,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20305795,
"min": 0.20305795,
"max": 0.58127665,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.629585249999997e-05,
"min": 8.629585249999997e-05,
"max": 0.0049223294025,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00017259170499999994,
"min": 0.00017259170499999994,
"max": 0.014065704835,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670499208",
"python_version": "3.8.15 (default, Oct 12 2022, 19:14:39) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670501379"
},
"total": 2171.425925439,
"count": 1,
"self": 0.43752959699986604,
"children": {
"run_training.setup": {
"total": 0.10612733599998592,
"count": 1,
"self": 0.10612733599998592
},
"TrainerController.start_learning": {
"total": 2170.882268506,
"count": 1,
"self": 3.786739732940532,
"children": {
"TrainerController._reset_env": {
"total": 10.202819991999945,
"count": 1,
"self": 10.202819991999945
},
"TrainerController.advance": {
"total": 2156.7824192480593,
"count": 231811,
"self": 3.8671736778705963,
"children": {
"env_step": {
"total": 1696.0691717641078,
"count": 231811,
"self": 1423.0637248121855,
"children": {
"SubprocessEnvManager._take_step": {
"total": 270.42609450596296,
"count": 231811,
"self": 14.181226623961265,
"children": {
"TorchPolicy.evaluate": {
"total": 256.2448678820017,
"count": 222970,
"self": 64.01354100414187,
"children": {
"TorchPolicy.sample_actions": {
"total": 192.23132687785983,
"count": 222970,
"self": 192.23132687785983
}
}
}
}
},
"workers": {
"total": 2.57935244595933,
"count": 231811,
"self": 0.0,
"children": {
"worker_root": {
"total": 2163.2892791870368,
"count": 231811,
"is_parallel": true,
"self": 996.1028065629723,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0025062259999231173,
"count": 1,
"is_parallel": true,
"self": 0.00032503699992503243,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002181188999998085,
"count": 2,
"is_parallel": true,
"self": 0.002181188999998085
}
}
},
"UnityEnvironment.step": {
"total": 0.02656476799995744,
"count": 1,
"is_parallel": true,
"self": 0.0002744209998581937,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019106800004919933,
"count": 1,
"is_parallel": true,
"self": 0.00019106800004919933
},
"communicator.exchange": {
"total": 0.025405771000009736,
"count": 1,
"is_parallel": true,
"self": 0.025405771000009736
},
"steps_from_proto": {
"total": 0.0006935080000403104,
"count": 1,
"is_parallel": true,
"self": 0.00022941300005641097,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00046409499998389947,
"count": 2,
"is_parallel": true,
"self": 0.00046409499998389947
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1167.1864726240644,
"count": 231810,
"is_parallel": true,
"self": 34.12437916291424,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 72.50989591602547,
"count": 231810,
"is_parallel": true,
"self": 72.50989591602547
},
"communicator.exchange": {
"total": 969.4781967350931,
"count": 231810,
"is_parallel": true,
"self": 969.4781967350931
},
"steps_from_proto": {
"total": 91.07400081003152,
"count": 231810,
"is_parallel": true,
"self": 37.27670605115895,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.79729475887257,
"count": 463620,
"is_parallel": true,
"self": 53.79729475887257
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 456.8460738060812,
"count": 231811,
"self": 6.205763407211407,
"children": {
"process_trajectory": {
"total": 141.31939528287012,
"count": 231811,
"self": 140.8671851038696,
"children": {
"RLTrainer._checkpoint": {
"total": 0.45221017900053084,
"count": 4,
"self": 0.45221017900053084
}
}
},
"_update_policy": {
"total": 309.3209151159997,
"count": 96,
"self": 256.4436033819917,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.877311734008,
"count": 2880,
"self": 52.877311734008
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.840000529948156e-07,
"count": 1,
"self": 9.840000529948156e-07
},
"TrainerController._save_models": {
"total": 0.11028854899996077,
"count": 1,
"self": 0.002010999000049196,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10827754999991157,
"count": 1,
"self": 0.10827754999991157
}
}
}
}
}
}
}