VectorZhao's picture
First Push
710bd30 verified
raw
history blame
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7537766695022583,
"min": 0.7522686719894409,
"max": 2.8702094554901123,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7230.22607421875,
"min": 7230.22607421875,
"max": 29425.38671875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.203457832336426,
"min": 0.5029815435409546,
"max": 13.203457832336426,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2574.67431640625,
"min": 97.57842254638672,
"max": 2674.311279296875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0627030210447379,
"min": 0.06079320902487476,
"max": 0.07632508308526274,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2508120841789516,
"min": 0.24317283609949905,
"max": 0.3689189619900015,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2004328974906136,
"min": 0.14402431735253035,
"max": 0.3026282609823872,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8017315899624544,
"min": 0.5760972694101214,
"max": 1.328447002114034,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.113636363636363,
"min": 3.5,
"max": 26.145454545454545,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1149.0,
"min": 154.0,
"max": 1438.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.113636363636363,
"min": 3.5,
"max": 26.145454545454545,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1149.0,
"min": 154.0,
"max": 1438.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1720665922",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1720666358"
},
"total": 436.1564724389999,
"count": 1,
"self": 0.749434213000086,
"children": {
"run_training.setup": {
"total": 0.0594970629999807,
"count": 1,
"self": 0.0594970629999807
},
"TrainerController.start_learning": {
"total": 435.34754116299985,
"count": 1,
"self": 0.5444111249926209,
"children": {
"TrainerController._reset_env": {
"total": 2.966460878000021,
"count": 1,
"self": 2.966460878000021
},
"TrainerController.advance": {
"total": 431.7038277080071,
"count": 18212,
"self": 0.24990485600483225,
"children": {
"env_step": {
"total": 431.4539228520023,
"count": 18212,
"self": 277.6908973889981,
"children": {
"SubprocessEnvManager._take_step": {
"total": 153.49746748500343,
"count": 18212,
"self": 1.4197553710057491,
"children": {
"TorchPolicy.evaluate": {
"total": 152.07771211399768,
"count": 18212,
"self": 152.07771211399768
}
}
},
"workers": {
"total": 0.2655579780007429,
"count": 18212,
"self": 0.0,
"children": {
"worker_root": {
"total": 434.1427230960089,
"count": 18212,
"is_parallel": true,
"self": 219.6754797370079,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.008058209999944665,
"count": 1,
"is_parallel": true,
"self": 0.006542978999846127,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001515231000098538,
"count": 10,
"is_parallel": true,
"self": 0.001515231000098538
}
}
},
"UnityEnvironment.step": {
"total": 0.03496240599997691,
"count": 1,
"is_parallel": true,
"self": 0.0006895019998864882,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003858190000300965,
"count": 1,
"is_parallel": true,
"self": 0.0003858190000300965
},
"communicator.exchange": {
"total": 0.032036325000035504,
"count": 1,
"is_parallel": true,
"self": 0.032036325000035504
},
"steps_from_proto": {
"total": 0.0018507600000248203,
"count": 1,
"is_parallel": true,
"self": 0.0003616709998368606,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014890890001879598,
"count": 10,
"is_parallel": true,
"self": 0.0014890890001879598
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 214.46724335900103,
"count": 18211,
"is_parallel": true,
"self": 9.883536121016618,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.117900230000828,
"count": 18211,
"is_parallel": true,
"self": 5.117900230000828
},
"communicator.exchange": {
"total": 167.3734838009815,
"count": 18211,
"is_parallel": true,
"self": 167.3734838009815
},
"steps_from_proto": {
"total": 32.092323207002096,
"count": 18211,
"is_parallel": true,
"self": 5.954861513973924,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.137461693028172,
"count": 182110,
"is_parallel": true,
"self": 26.137461693028172
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00014581200002794503,
"count": 1,
"self": 0.00014581200002794503,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 426.4779849859575,
"count": 652771,
"is_parallel": true,
"self": 13.37359963702852,
"children": {
"process_trajectory": {
"total": 237.04462613792919,
"count": 652771,
"is_parallel": true,
"self": 235.93147604492913,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1131500930000584,
"count": 4,
"is_parallel": true,
"self": 1.1131500930000584
}
}
},
"_update_policy": {
"total": 176.0597592109998,
"count": 90,
"is_parallel": true,
"self": 54.70465525000736,
"children": {
"TorchPPOOptimizer.update": {
"total": 121.35510396099244,
"count": 4587,
"is_parallel": true,
"self": 121.35510396099244
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1326956400000654,
"count": 1,
"self": 0.0012819999999464926,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1314136400001189,
"count": 1,
"self": 0.1314136400001189
}
}
}
}
}
}
}