jonduea's picture
First Push
27e9442
raw
history blame
No virus
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 2.7644643783569336,
"min": 2.7644643783569336,
"max": 2.8233444690704346,
"count": 31
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 212.86375427246094,
"min": 183.86073303222656,
"max": 2463.22705078125,
"count": 31
},
"SnowballTarget.Step.mean": {
"value": 199960.0,
"min": 184968.0,
"max": 199960.0,
"count": 31
},
"SnowballTarget.Step.sum": {
"value": 199960.0,
"min": 184968.0,
"max": 199960.0,
"count": 31
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.8804264068603516,
"min": 2.6963953971862793,
"max": 2.988704204559326,
"count": 31
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 23.043411254882812,
"min": 10.785581588745117,
"max": 51.96919250488281,
"count": 31
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 31
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 31
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 7
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 2189.0,
"min": 2189.0,
"max": 2189.0,
"count": 7
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06333703348668766,
"min": 0.058399054214104955,
"max": 0.08776450733392838,
"count": 6
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.06333703348668766,
"min": 0.058399054214104955,
"max": 0.08776450733392838,
"count": 6
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.27091147765225054,
"min": 0.20008490892017589,
"max": 0.27091147765225054,
"count": 6
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.27091147765225054,
"min": 0.20008490892017589,
"max": 0.27091147765225054,
"count": 6
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.0889891200000003e-07,
"min": 1.0889891200000003e-07,
"max": 6.588934120000005e-07,
"count": 6
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.0889891200000003e-07,
"min": 1.0889891200000003e-07,
"max": 6.588934120000005e-07,
"count": 6
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.102176,
"min": 0.102176,
"max": 0.11317600000000001,
"count": 6
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.102176,
"min": 0.102176,
"max": 0.11317600000000001,
"count": 6
},
"SnowballTarget.Policy.Beta.mean": {
"value": 1.0979200000000003e-05,
"min": 1.0979200000000003e-05,
"max": 1.5929200000000007e-05,
"count": 6
},
"SnowballTarget.Policy.Beta.sum": {
"value": 1.0979200000000003e-05,
"min": 1.0979200000000003e-05,
"max": 1.5929200000000007e-05,
"count": 6
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 5.909090909090909,
"min": 3.5,
"max": 6.181818181818182,
"count": 7
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 65.0,
"min": 7.0,
"max": 68.0,
"count": 7
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 5.909090909090909,
"min": 3.5,
"max": 6.181818181818182,
"count": 7
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 65.0,
"min": 7.0,
"max": 68.0,
"count": 7
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1701532572",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --resume --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1701532646"
},
"total": 73.80729216400005,
"count": 1,
"self": 1.0500653790004435,
"children": {
"run_training.setup": {
"total": 0.20530081799961408,
"count": 1,
"self": 0.20530081799961408
},
"TrainerController.start_learning": {
"total": 72.55192596699999,
"count": 1,
"self": 0.08717217499315666,
"children": {
"TrainerController._reset_env": {
"total": 10.285466686999825,
"count": 1,
"self": 10.285466686999825
},
"TrainerController.advance": {
"total": 61.920349600007285,
"count": 1415,
"self": 0.03446931900407435,
"children": {
"env_step": {
"total": 61.88588028100321,
"count": 1415,
"self": 39.68546892700351,
"children": {
"SubprocessEnvManager._take_step": {
"total": 22.17643904599845,
"count": 1415,
"self": 0.34156328198741903,
"children": {
"TorchPolicy.evaluate": {
"total": 21.83487576401103,
"count": 1415,
"self": 21.83487576401103
}
}
},
"workers": {
"total": 0.023972308001248166,
"count": 1415,
"self": 0.0,
"children": {
"worker_root": {
"total": 72.35766457700265,
"count": 1415,
"is_parallel": true,
"self": 52.11620357800848,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0029400109997368418,
"count": 1,
"is_parallel": true,
"self": 0.0008444869999948423,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020955239997419994,
"count": 10,
"is_parallel": true,
"self": 0.0020955239997419994
}
}
},
"UnityEnvironment.step": {
"total": 0.07016501599991898,
"count": 1,
"is_parallel": true,
"self": 0.0006563909996657458,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003816600001300685,
"count": 1,
"is_parallel": true,
"self": 0.0003816600001300685
},
"communicator.exchange": {
"total": 0.06717049000008046,
"count": 1,
"is_parallel": true,
"self": 0.06717049000008046
},
"steps_from_proto": {
"total": 0.001956475000042701,
"count": 1,
"is_parallel": true,
"self": 0.00036165600022286526,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015948189998198359,
"count": 10,
"is_parallel": true,
"self": 0.0015948189998198359
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 20.24146099899417,
"count": 1414,
"is_parallel": true,
"self": 0.8418219600007433,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.40412465499412065,
"count": 1414,
"is_parallel": true,
"self": 0.40412465499412065
},
"communicator.exchange": {
"total": 16.377537276004205,
"count": 1414,
"is_parallel": true,
"self": 16.377537276004205
},
"steps_from_proto": {
"total": 2.6179771079951024,
"count": 1414,
"is_parallel": true,
"self": 0.4715468080012215,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2.146430299993881,
"count": 14140,
"is_parallel": true,
"self": 2.146430299993881
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.725699965201784e-05,
"count": 1,
"self": 9.725699965201784e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 61.41551269196634,
"count": 80656,
"is_parallel": true,
"self": 1.2501088620283554,
"children": {
"process_trajectory": {
"total": 36.636185387938895,
"count": 80656,
"is_parallel": true,
"self": 36.11438245993895,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5218029279999428,
"count": 1,
"is_parallel": true,
"self": 0.5218029279999428
}
}
},
"_update_policy": {
"total": 23.52921844199909,
"count": 6,
"is_parallel": true,
"self": 14.409687215000304,
"children": {
"TorchPPOOptimizer.update": {
"total": 9.119531226998788,
"count": 303,
"is_parallel": true,
"self": 9.119531226998788
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.25884024800006955,
"count": 1,
"self": 0.002457770000091841,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2563824779999777,
"count": 1,
"self": 0.2563824779999777
}
}
}
}
}
}
}