nafizshahriar's picture
First Push
0ee6910 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7726489901542664,
"min": 0.7726489901542664,
"max": 2.8605544567108154,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7377.25244140625,
"min": 7377.25244140625,
"max": 29294.9375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.967904090881348,
"min": 0.28040969371795654,
"max": 12.967904090881348,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2528.7412109375,
"min": 54.399478912353516,
"max": 2624.72021484375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07009870851263758,
"min": 0.061665209231803464,
"max": 0.07308759916021361,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2803948340505503,
"min": 0.24666083692721386,
"max": 0.36543799580106806,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.17251205155808552,
"min": 0.12651740327733113,
"max": 0.27992340723673503,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.6900482062323421,
"min": 0.5060696131093245,
"max": 1.3996170361836753,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.5,
"min": 3.4318181818181817,
"max": 25.618181818181817,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1122.0,
"min": 151.0,
"max": 1409.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.5,
"min": 3.4318181818181817,
"max": 25.618181818181817,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1122.0,
"min": 151.0,
"max": 1409.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1722706072",
"python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/ml-agents/config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1722706540"
},
"total": 467.88920735099964,
"count": 1,
"self": 0.43512233199999173,
"children": {
"run_training.setup": {
"total": 0.052998580999883416,
"count": 1,
"self": 0.052998580999883416
},
"TrainerController.start_learning": {
"total": 467.40108643799977,
"count": 1,
"self": 0.6065676420171258,
"children": {
"TrainerController._reset_env": {
"total": 2.78681579699969,
"count": 1,
"self": 2.78681579699969
},
"TrainerController.advance": {
"total": 463.9142595059825,
"count": 18201,
"self": 0.2826862159627126,
"children": {
"env_step": {
"total": 463.6315732900198,
"count": 18201,
"self": 302.23251782893794,
"children": {
"SubprocessEnvManager._take_step": {
"total": 161.105843019046,
"count": 18201,
"self": 1.5414790660065592,
"children": {
"TorchPolicy.evaluate": {
"total": 159.56436395303945,
"count": 18201,
"self": 159.56436395303945
}
}
},
"workers": {
"total": 0.29321244203583774,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 466.034930859983,
"count": 18201,
"is_parallel": true,
"self": 235.04968218599197,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005459182999857148,
"count": 1,
"is_parallel": true,
"self": 0.0038527000001522538,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016064829997048946,
"count": 10,
"is_parallel": true,
"self": 0.0016064829997048946
}
}
},
"UnityEnvironment.step": {
"total": 0.07642821000035838,
"count": 1,
"is_parallel": true,
"self": 0.0006731830003445793,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00046493200034092297,
"count": 1,
"is_parallel": true,
"self": 0.00046493200034092297
},
"communicator.exchange": {
"total": 0.07336246699969706,
"count": 1,
"is_parallel": true,
"self": 0.07336246699969706
},
"steps_from_proto": {
"total": 0.0019276279999758117,
"count": 1,
"is_parallel": true,
"self": 0.0003656720000435598,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015619559999322519,
"count": 10,
"is_parallel": true,
"self": 0.0015619559999322519
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 230.985248673991,
"count": 18200,
"is_parallel": true,
"self": 10.348204545098724,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.487509755021165,
"count": 18200,
"is_parallel": true,
"self": 5.487509755021165
},
"communicator.exchange": {
"total": 180.40262584992433,
"count": 18200,
"is_parallel": true,
"self": 180.40262584992433
},
"steps_from_proto": {
"total": 34.746908523946786,
"count": 18200,
"is_parallel": true,
"self": 6.626260535732399,
"children": {
"_process_rank_one_or_two_observation": {
"total": 28.120647988214387,
"count": 182000,
"is_parallel": true,
"self": 28.120647988214387
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00013639799999509705,
"count": 1,
"self": 0.00013639799999509705,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 458.26471983739566,
"count": 682827,
"is_parallel": true,
"self": 14.329395926619782,
"children": {
"process_trajectory": {
"total": 254.6593319897779,
"count": 682827,
"is_parallel": true,
"self": 253.59699602177716,
"children": {
"RLTrainer._checkpoint": {
"total": 1.0623359680007525,
"count": 4,
"is_parallel": true,
"self": 1.0623359680007525
}
}
},
"_update_policy": {
"total": 189.27599192099797,
"count": 90,
"is_parallel": true,
"self": 59.47363935997464,
"children": {
"TorchPPOOptimizer.update": {
"total": 129.80235256102333,
"count": 4584,
"is_parallel": true,
"self": 129.80235256102333
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09330709500045486,
"count": 1,
"self": 0.001007911000669992,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09229918399978487,
"count": 1,
"self": 0.09229918399978487
}
}
}
}
}
}
}