jake-walker's picture
Initial commit
7f03235
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7534700036048889,
"min": 0.7534700036048889,
"max": 2.8699440956115723,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7194.1318359375,
"min": 7194.1318359375,
"max": 29422.666015625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.691561698913574,
"min": 0.43297579884529114,
"max": 12.691561698913574,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2474.8544921875,
"min": 83.99730682373047,
"max": 2558.1533203125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06477662945404768,
"min": 0.06311868497460975,
"max": 0.074396863625289,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2591065178161907,
"min": 0.2591065178161907,
"max": 0.37198431812644495,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20713315590047368,
"min": 0.1116226599197926,
"max": 0.27924890693496257,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8285326236018947,
"min": 0.4464906396791704,
"max": 1.396244534674813,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.954545454545453,
"min": 3.659090909090909,
"max": 25.254545454545454,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1098.0,
"min": 161.0,
"max": 1389.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.954545454545453,
"min": 3.659090909090909,
"max": 25.254545454545454,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1098.0,
"min": 161.0,
"max": 1389.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1697206621",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1697207063"
},
"total": 442.206734751,
"count": 1,
"self": 0.4314583709999624,
"children": {
"run_training.setup": {
"total": 0.08559794400002829,
"count": 1,
"self": 0.08559794400002829
},
"TrainerController.start_learning": {
"total": 441.689678436,
"count": 1,
"self": 0.5123677180029631,
"children": {
"TrainerController._reset_env": {
"total": 8.031943664999972,
"count": 1,
"self": 8.031943664999972
},
"TrainerController.advance": {
"total": 433.0620779499971,
"count": 18199,
"self": 0.23857868299512575,
"children": {
"env_step": {
"total": 432.823499267002,
"count": 18199,
"self": 294.34832965301354,
"children": {
"SubprocessEnvManager._take_step": {
"total": 138.22426689199648,
"count": 18199,
"self": 1.3497687449929572,
"children": {
"TorchPolicy.evaluate": {
"total": 136.87449814700352,
"count": 18199,
"self": 136.87449814700352
}
}
},
"workers": {
"total": 0.25090272199196306,
"count": 18199,
"self": 0.0,
"children": {
"worker_root": {
"total": 440.5599570399983,
"count": 18199,
"is_parallel": true,
"self": 214.81654641000353,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00789940099997466,
"count": 1,
"is_parallel": true,
"self": 0.005782267000085994,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002117133999888665,
"count": 10,
"is_parallel": true,
"self": 0.002117133999888665
}
}
},
"UnityEnvironment.step": {
"total": 0.032601045000035356,
"count": 1,
"is_parallel": true,
"self": 0.00040285200003609134,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004104760000132046,
"count": 1,
"is_parallel": true,
"self": 0.0004104760000132046
},
"communicator.exchange": {
"total": 0.030622972999992726,
"count": 1,
"is_parallel": true,
"self": 0.030622972999992726
},
"steps_from_proto": {
"total": 0.001164743999993334,
"count": 1,
"is_parallel": true,
"self": 0.00027354599995987883,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008911980000334552,
"count": 10,
"is_parallel": true,
"self": 0.0008911980000334552
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 225.7434106299948,
"count": 18198,
"is_parallel": true,
"self": 10.110358904983116,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.111241722003399,
"count": 18198,
"is_parallel": true,
"self": 5.111241722003399
},
"communicator.exchange": {
"total": 178.46551988499874,
"count": 18198,
"is_parallel": true,
"self": 178.46551988499874
},
"steps_from_proto": {
"total": 32.05629011800954,
"count": 18198,
"is_parallel": true,
"self": 5.825964421993717,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.230325696015825,
"count": 181980,
"is_parallel": true,
"self": 26.230325696015825
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00011291499993149046,
"count": 1,
"self": 0.00011291499993149046,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 429.57185089402054,
"count": 453771,
"is_parallel": true,
"self": 9.497245402023054,
"children": {
"process_trajectory": {
"total": 244.4802662369973,
"count": 453771,
"is_parallel": true,
"self": 243.71116015999723,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7691060770000604,
"count": 4,
"is_parallel": true,
"self": 0.7691060770000604
}
}
},
"_update_policy": {
"total": 175.5943392550002,
"count": 90,
"is_parallel": true,
"self": 53.35668271100246,
"children": {
"TorchPPOOptimizer.update": {
"total": 122.23765654399773,
"count": 4584,
"is_parallel": true,
"self": 122.23765654399773
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.08317618800003856,
"count": 1,
"self": 0.000812296000049173,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08236389199998939,
"count": 1,
"self": 0.08236389199998939
}
}
}
}
}
}
}