smiley-maker's picture
First Commit!!
8d39fb6 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.3256784677505493,
"min": 1.3256784677505493,
"max": 2.4978251457214355,
"count": 8
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 13722.09765625,
"min": 7473.4931640625,
"max": 23391.26171875,
"count": 8
},
"SnowballTarget.Step.mean": {
"value": 99960.0,
"min": 29944.0,
"max": 99960.0,
"count": 8
},
"SnowballTarget.Step.sum": {
"value": 99960.0,
"min": 29944.0,
"max": 99960.0,
"count": 8
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 9.735746383666992,
"min": 2.981156349182129,
"max": 9.735746383666992,
"count": 8
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1986.09228515625,
"min": 160.98243713378906,
"max": 1986.09228515625,
"count": 8
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 8
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 2189.0,
"max": 10945.0,
"count": 8
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06703904597191414,
"min": 0.055809841973920506,
"max": 0.076901034450195,
"count": 8
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3351952298595707,
"min": 0.055809841973920506,
"max": 0.38450517225097497,
"count": 8
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.229793911763266,
"min": 0.229793911763266,
"max": 0.2871344493446397,
"count": 8
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.14896955881633,
"min": 0.24725254625082016,
"max": 1.3874146216640286,
"count": 8
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 0.000157176047608,
"min": 0.000157176047608,
"max": 0.00025617601460799997,
"count": 8
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.00078588023804,
"min": 0.00025617601460799997,
"max": 0.00116538011154,
"count": 8
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.152392,
"min": 0.152392,
"max": 0.185392,
"count": 8
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.76196,
"min": 0.185392,
"max": 0.88846,
"count": 8
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0026243608000000003,
"min": 0.0026243608000000003,
"max": 0.0042710608,
"count": 8
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.013121804,
"min": 0.0042710608,
"max": 0.019434154,
"count": 8
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 21.818181818181817,
"min": 8.818181818181818,
"max": 21.818181818181817,
"count": 8
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1200.0,
"min": 97.0,
"max": 1200.0,
"count": 8
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 21.818181818181817,
"min": 8.818181818181818,
"max": 21.818181818181817,
"count": 8
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1200.0,
"min": 97.0,
"max": 1200.0,
"count": 8
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 8
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 8
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1707073141",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1707073310"
},
"total": 169.00839194200012,
"count": 1,
"self": 0.005654195000033724,
"children": {
"run_training.setup": {
"total": 0.046330031000024974,
"count": 1,
"self": 0.046330031000024974
},
"TrainerController.start_learning": {
"total": 168.95640771600006,
"count": 1,
"self": 1.16319599098847,
"children": {
"TrainerController._reset_env": {
"total": 2.032633347000001,
"count": 1,
"self": 2.032633347000001
},
"TrainerController.advance": {
"total": 165.67177896601152,
"count": 6798,
"self": 0.10150164502215375,
"children": {
"env_step": {
"total": 165.57027732098936,
"count": 6798,
"self": 106.36081562098116,
"children": {
"SubprocessEnvManager._take_step": {
"total": 59.104578080007286,
"count": 6798,
"self": 0.5459434789966053,
"children": {
"TorchPolicy.evaluate": {
"total": 58.55863460101068,
"count": 6798,
"self": 58.55863460101068
}
}
},
"workers": {
"total": 0.10488362000091911,
"count": 6797,
"self": 0.0,
"children": {
"worker_root": {
"total": 167.47266976998844,
"count": 6797,
"is_parallel": true,
"self": 83.92561759099226,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0024009389999264386,
"count": 1,
"is_parallel": true,
"self": 0.0006657589999576885,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00173517999996875,
"count": 10,
"is_parallel": true,
"self": 0.00173517999996875
}
}
},
"UnityEnvironment.step": {
"total": 0.03696543500018379,
"count": 1,
"is_parallel": true,
"self": 0.0006904960000611027,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004026940000585455,
"count": 1,
"is_parallel": true,
"self": 0.0004026940000585455
},
"communicator.exchange": {
"total": 0.03376693199993497,
"count": 1,
"is_parallel": true,
"self": 0.03376693199993497
},
"steps_from_proto": {
"total": 0.002105313000129172,
"count": 1,
"is_parallel": true,
"self": 0.00039066600015758013,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017146469999715919,
"count": 10,
"is_parallel": true,
"self": 0.0017146469999715919
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 83.54705217899618,
"count": 6796,
"is_parallel": true,
"self": 3.9798186829823408,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.0643686680059545,
"count": 6796,
"is_parallel": true,
"self": 2.0643686680059545
},
"communicator.exchange": {
"total": 64.55227144500577,
"count": 6796,
"is_parallel": true,
"self": 64.55227144500577
},
"steps_from_proto": {
"total": 12.950593383002115,
"count": 6796,
"is_parallel": true,
"self": 2.373907895006141,
"children": {
"_process_rank_one_or_two_observation": {
"total": 10.576685487995974,
"count": 67960,
"is_parallel": true,
"self": 10.576685487995974
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0007921210001313739,
"count": 1,
"self": 0.0007921210001313739,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 164.39003223298323,
"count": 262604,
"is_parallel": true,
"self": 5.6990499909875325,
"children": {
"process_trajectory": {
"total": 91.33518825799456,
"count": 262604,
"is_parallel": true,
"self": 90.7733276689944,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5618605890001618,
"count": 2,
"is_parallel": true,
"self": 0.5618605890001618
}
}
},
"_update_policy": {
"total": 67.35579398400114,
"count": 34,
"is_parallel": true,
"self": 18.783813645005466,
"children": {
"TorchPPOOptimizer.update": {
"total": 48.57198033899567,
"count": 1731,
"is_parallel": true,
"self": 48.57198033899567
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.08800729099993987,
"count": 1,
"self": 0.001211968000006891,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08679532299993298,
"count": 1,
"self": 0.08679532299993298
}
}
}
}
}
}
}