nakanolab's picture
time_horizon: 256
a9777d8
raw
history blame contribute delete
No virus
18.9 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0957363843917847,
"min": 1.0957363843917847,
"max": 2.849642753601074,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9678.6396484375,
"min": 9678.6396484375,
"max": 31440.107421875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199800.0,
"min": 9800.0,
"max": 199800.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199800.0,
"min": 9800.0,
"max": 199800.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.482769966125488,
"min": 0.389544278383255,
"max": 12.482769966125488,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 624.1384887695312,
"min": 19.087669372558594,
"max": 624.1384887695312,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.38,
"min": 3.693877551020408,
"max": 24.84,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1219.0,
"min": 181.0,
"max": 1242.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.38,
"min": 3.693877551020408,
"max": 24.84,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1219.0,
"min": 181.0,
"max": 1242.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06480660689190057,
"min": 0.05863059075993221,
"max": 0.07388876557496249,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2592264275676023,
"min": 0.23452236303972884,
"max": 0.36944382787481245,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20724252589485226,
"min": 0.1440576102000241,
"max": 0.287794894958828,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.828970103579409,
"min": 0.5762304408000964,
"max": 1.4084345534736034,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.950097350000008e-06,
"min": 7.950097350000008e-06,
"max": 0.00029175000274999995,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.180038940000003e-05,
"min": 3.180038940000003e-05,
"max": 0.0013845000385,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10265,
"min": 0.10265,
"max": 0.19725000000000004,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.4106,
"min": 0.4106,
"max": 0.9615,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00014223500000000013,
"min": 0.00014223500000000013,
"max": 0.004862775,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005689400000000005,
"min": 0.0005689400000000005,
"max": 0.023078849999999998,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676896906",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1676897363"
},
"total": 457.367736304,
"count": 1,
"self": 0.40621549499996945,
"children": {
"run_training.setup": {
"total": 0.18221349999998893,
"count": 1,
"self": 0.18221349999998893
},
"TrainerController.start_learning": {
"total": 456.77930730900005,
"count": 1,
"self": 0.5282974679996641,
"children": {
"TrainerController._reset_env": {
"total": 9.044605313000034,
"count": 1,
"self": 9.044605313000034
},
"TrainerController.advance": {
"total": 447.08655201100055,
"count": 18212,
"self": 0.283805839002639,
"children": {
"env_step": {
"total": 446.8027461719979,
"count": 18212,
"self": 292.45729088797987,
"children": {
"SubprocessEnvManager._take_step": {
"total": 154.07393198100806,
"count": 18212,
"self": 1.5030741130132128,
"children": {
"TorchPolicy.evaluate": {
"total": 152.57085786799485,
"count": 18212,
"self": 35.15360209999369,
"children": {
"TorchPolicy.sample_actions": {
"total": 117.41725576800116,
"count": 18212,
"self": 117.41725576800116
}
}
}
}
},
"workers": {
"total": 0.2715233030099853,
"count": 18212,
"self": 0.0,
"children": {
"worker_root": {
"total": 455.2804496790076,
"count": 18212,
"is_parallel": true,
"self": 216.87443295401397,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0064261009999881935,
"count": 1,
"is_parallel": true,
"self": 0.004096816999890507,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0023292840000976867,
"count": 10,
"is_parallel": true,
"self": 0.0023292840000976867
}
}
},
"UnityEnvironment.step": {
"total": 0.03874918499991509,
"count": 1,
"is_parallel": true,
"self": 0.000554188999899452,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003313879999495839,
"count": 1,
"is_parallel": true,
"self": 0.0003313879999495839
},
"communicator.exchange": {
"total": 0.035616284000070664,
"count": 1,
"is_parallel": true,
"self": 0.035616284000070664
},
"steps_from_proto": {
"total": 0.0022473239999953876,
"count": 1,
"is_parallel": true,
"self": 0.0004434669999682228,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018038570000271648,
"count": 10,
"is_parallel": true,
"self": 0.0018038570000271648
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 238.4060167249936,
"count": 18211,
"is_parallel": true,
"self": 9.53180797202674,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.567055799991294,
"count": 18211,
"is_parallel": true,
"self": 5.567055799991294
},
"communicator.exchange": {
"total": 190.95423976098982,
"count": 18211,
"is_parallel": true,
"self": 190.95423976098982
},
"steps_from_proto": {
"total": 32.35291319198575,
"count": 18211,
"is_parallel": true,
"self": 7.0060446799647025,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.346868512021047,
"count": 182110,
"is_parallel": true,
"self": 25.346868512021047
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00026620199992066773,
"count": 1,
"self": 0.00026620199992066773,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 444.32295641398196,
"count": 339008,
"is_parallel": true,
"self": 8.448316760924968,
"children": {
"process_trajectory": {
"total": 204.50525988505694,
"count": 339008,
"is_parallel": true,
"self": 203.38160973805714,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1236501469998075,
"count": 4,
"is_parallel": true,
"self": 1.1236501469998075
}
}
},
"_update_policy": {
"total": 231.36937976800004,
"count": 90,
"is_parallel": true,
"self": 69.09464373500555,
"children": {
"TorchPPOOptimizer.update": {
"total": 162.2747360329945,
"count": 4590,
"is_parallel": true,
"self": 162.2747360329945
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1195863149998786,
"count": 1,
"self": 0.0008663209998758248,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11871999400000277,
"count": 1,
"self": 0.11871999400000277
}
}
}
}
}
}
}