HuggingMachines's picture
First Push
77f0f6d verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.849920392036438,
"min": 0.8069454431533813,
"max": 0.8786962032318115,
"count": 15
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 16828.423828125,
"min": 16193.92578125,
"max": 18016.787109375,
"count": 15
},
"SnowballTarget.Step.mean": {
"value": 699936.0,
"min": 419992.0,
"max": 699936.0,
"count": 15
},
"SnowballTarget.Step.sum": {
"value": 699936.0,
"min": 419992.0,
"max": 699936.0,
"count": 15
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 14.376312255859375,
"min": 13.69563102722168,
"max": 14.376312255859375,
"count": 15
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 5736.1484375,
"min": 5368.6875,
"max": 5807.10546875,
"count": 15
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.04974580502446905,
"min": 0.04285651367657313,
"max": 0.05344135037443333,
"count": 15
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.44771224522022146,
"min": 0.38570862308915815,
"max": 0.48528646325242636,
"count": 15
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.16737105604261163,
"min": 0.1584306804256307,
"max": 0.2147602600355943,
"count": 15
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.5063395043835046,
"min": 1.425876123830676,
"max": 1.9328423403203487,
"count": 15
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 4.056098647999997e-06,
"min": 4.056098647999997e-06,
"max": 0.00012379891587657144,
"count": 15
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.650488783199997e-05,
"min": 3.650488783199997e-05,
"max": 0.001114190242889143,
"count": 15
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.101352,
"min": 0.101352,
"max": 0.14126628571428573,
"count": 15
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.912168,
"min": 0.912168,
"max": 1.2713965714285715,
"count": 15
},
"SnowballTarget.Policy.Beta.mean": {
"value": 7.746479999999995e-05,
"min": 7.746479999999995e-05,
"max": 0.002069187657142857,
"count": 15
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0006971831999999995,
"min": 0.0006971831999999995,
"max": 0.018622688914285715,
"count": 15
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 15
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 19701.0,
"min": 19701.0,
"max": 21890.0,
"count": 15
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 28.2020202020202,
"min": 26.694736842105264,
"max": 28.2020202020202,
"count": 15
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 2792.0,
"min": 2536.0,
"max": 2982.0,
"count": 15
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 28.2020202020202,
"min": 26.694736842105264,
"max": 28.2020202020202,
"count": 15
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 2792.0,
"min": 2536.0,
"max": 2982.0,
"count": 15
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 15
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 15
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1765051355",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1765052166"
},
"total": 811.095951625,
"count": 1,
"self": 0.9339186759998483,
"children": {
"run_training.setup": {
"total": 0.02745952400027818,
"count": 1,
"self": 0.02745952400027818
},
"TrainerController.start_learning": {
"total": 810.1345734249999,
"count": 1,
"self": 0.7897105160268438,
"children": {
"TrainerController._reset_env": {
"total": 2.8735850279999795,
"count": 1,
"self": 2.8735850279999795
},
"TrainerController.advance": {
"total": 806.2926777669727,
"count": 27264,
"self": 0.7776902450095804,
"children": {
"env_step": {
"total": 614.5986771189073,
"count": 27264,
"self": 470.6666514869321,
"children": {
"SubprocessEnvManager._take_step": {
"total": 143.42557077898073,
"count": 27264,
"self": 2.4565840349691825,
"children": {
"TorchPolicy.evaluate": {
"total": 140.96898674401154,
"count": 27264,
"self": 140.96898674401154
}
}
},
"workers": {
"total": 0.5064548529944659,
"count": 27264,
"self": 0.0,
"children": {
"worker_root": {
"total": 806.6488648819268,
"count": 27264,
"is_parallel": true,
"self": 395.7431057329668,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002360758000122587,
"count": 1,
"is_parallel": true,
"self": 0.0006580539993592538,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017027040007633332,
"count": 10,
"is_parallel": true,
"self": 0.0017027040007633332
}
}
},
"UnityEnvironment.step": {
"total": 0.086901601000136,
"count": 1,
"is_parallel": true,
"self": 0.0006693960003758548,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000463283000044612,
"count": 1,
"is_parallel": true,
"self": 0.000463283000044612
},
"communicator.exchange": {
"total": 0.0795213619999231,
"count": 1,
"is_parallel": true,
"self": 0.0795213619999231
},
"steps_from_proto": {
"total": 0.006247559999792429,
"count": 1,
"is_parallel": true,
"self": 0.0034369569993941695,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0028106030003982596,
"count": 10,
"is_parallel": true,
"self": 0.0028106030003982596
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 410.90575914896,
"count": 27263,
"is_parallel": true,
"self": 18.421328414990967,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 9.948173833980036,
"count": 27263,
"is_parallel": true,
"self": 9.948173833980036
},
"communicator.exchange": {
"total": 316.5323121510296,
"count": 27263,
"is_parallel": true,
"self": 316.5323121510296
},
"steps_from_proto": {
"total": 66.00394474895938,
"count": 27263,
"is_parallel": true,
"self": 11.638041289069406,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.36590345988998,
"count": 272630,
"is_parallel": true,
"self": 54.36590345988998
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 190.91631040305583,
"count": 27264,
"self": 1.0692236440149827,
"children": {
"process_trajectory": {
"total": 70.5709728070392,
"count": 27264,
"self": 69.70160377103912,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8693690360000801,
"count": 6,
"self": 0.8693690360000801
}
}
},
"_update_policy": {
"total": 119.27611395200165,
"count": 136,
"self": 65.56878470301626,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.70732924898539,
"count": 3264,
"self": 53.70732924898539
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2880000213044696e-06,
"count": 1,
"self": 1.2880000213044696e-06
},
"TrainerController._save_models": {
"total": 0.17859882600032506,
"count": 1,
"self": 0.0016665440007272991,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17693228199959776,
"count": 1,
"self": 0.17693228199959776
}
}
}
}
}
}
}