actualbrain's picture
First Push
7cc9146 verified
raw
history blame contribute delete
No virus
18.4 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.6550472974777222,
"min": 1.6550472974777222,
"max": 2.856945037841797,
"count": 10
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 16894.72265625,
"min": 16486.537109375,
"max": 29320.826171875,
"count": 10
},
"SnowballTarget.Step.mean": {
"value": 99960.0,
"min": 9952.0,
"max": 99960.0,
"count": 10
},
"SnowballTarget.Step.sum": {
"value": 99960.0,
"min": 9952.0,
"max": 99960.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 9.16292953491211,
"min": 0.3482355773448944,
"max": 9.16292953491211,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1869.2376708984375,
"min": 67.55770111083984,
"max": 1869.2376708984375,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07037000068241969,
"min": 0.06126856921507758,
"max": 0.07437954536055937,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.35185000341209843,
"min": 0.24507427686031033,
"max": 0.35185000341209843,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2372448506892896,
"min": 0.17486804560986438,
"max": 0.2636776950721647,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.186224253446448,
"min": 0.6994721824394575,
"max": 1.2565156297356477,
"count": 10
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 0.00027440009451199995,
"min": 0.00027440009451199995,
"max": 0.004729400005412,
"count": 10
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.0013720004725599998,
"min": 0.0013720004725599998,
"max": 0.02117200007656,
"count": 10
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.105488,
"min": 0.105488,
"max": 0.194588,
"count": 10
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.52744,
"min": 0.4615520000000001,
"max": 0.92344,
"count": 10
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0002838512,
"min": 0.0002838512,
"max": 0.0047299412,
"count": 10
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.001419256,
"min": 0.001419256,
"max": 0.021179656,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 19.69090909090909,
"min": 2.9545454545454546,
"max": 19.69090909090909,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1083.0,
"min": 130.0,
"max": 1083.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 19.69090909090909,
"min": 2.9545454545454546,
"max": 19.69090909090909,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1083.0,
"min": 130.0,
"max": 1083.0,
"count": 10
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1717186618",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1717186974"
},
"total": 356.0612305249999,
"count": 1,
"self": 0.5948508750000201,
"children": {
"run_training.setup": {
"total": 0.1112556709999808,
"count": 1,
"self": 0.1112556709999808
},
"TrainerController.start_learning": {
"total": 355.3551239789999,
"count": 1,
"self": 0.5846271469893054,
"children": {
"TrainerController._reset_env": {
"total": 3.7871027640001103,
"count": 1,
"self": 3.7871027640001103
},
"TrainerController.advance": {
"total": 350.8835018260104,
"count": 9135,
"self": 0.26525037100373083,
"children": {
"env_step": {
"total": 350.61825145500666,
"count": 9135,
"self": 267.7970364420064,
"children": {
"SubprocessEnvManager._take_step": {
"total": 82.55653434499595,
"count": 9135,
"self": 1.6457394019978437,
"children": {
"TorchPolicy.evaluate": {
"total": 80.91079494299811,
"count": 9135,
"self": 80.91079494299811
}
}
},
"workers": {
"total": 0.2646806680043028,
"count": 9135,
"self": 0.0,
"children": {
"worker_root": {
"total": 354.0849314050056,
"count": 9135,
"is_parallel": true,
"self": 169.12195152500726,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003826389999858293,
"count": 1,
"is_parallel": true,
"self": 0.0009254590004275087,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002900930999430784,
"count": 10,
"is_parallel": true,
"self": 0.002900930999430784
}
}
},
"UnityEnvironment.step": {
"total": 0.05640318200016736,
"count": 1,
"is_parallel": true,
"self": 0.001118759000519276,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005493629998909455,
"count": 1,
"is_parallel": true,
"self": 0.0005493629998909455
},
"communicator.exchange": {
"total": 0.05197613899986209,
"count": 1,
"is_parallel": true,
"self": 0.05197613899986209
},
"steps_from_proto": {
"total": 0.0027589209998950537,
"count": 1,
"is_parallel": true,
"self": 0.0005764130000898149,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002182507999805239,
"count": 10,
"is_parallel": true,
"self": 0.002182507999805239
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 184.96297987999833,
"count": 9134,
"is_parallel": true,
"self": 8.825369824971403,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.404201518008676,
"count": 9134,
"is_parallel": true,
"self": 4.404201518008676
},
"communicator.exchange": {
"total": 147.01039572201194,
"count": 9134,
"is_parallel": true,
"self": 147.01039572201194
},
"steps_from_proto": {
"total": 24.723012815006314,
"count": 9134,
"is_parallel": true,
"self": 5.067369048961609,
"children": {
"_process_rank_one_or_two_observation": {
"total": 19.655643766044705,
"count": 91340,
"is_parallel": true,
"self": 19.655643766044705
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0007086890000209678,
"count": 1,
"self": 0.0007086890000209678,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 346.652125065978,
"count": 440241,
"is_parallel": true,
"self": 11.077732069043122,
"children": {
"process_trajectory": {
"total": 192.25149183093595,
"count": 440241,
"is_parallel": true,
"self": 191.90913960993612,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3423522209998282,
"count": 2,
"is_parallel": true,
"self": 0.3423522209998282
}
}
},
"_update_policy": {
"total": 143.32290116599893,
"count": 45,
"is_parallel": true,
"self": 39.24553519200094,
"children": {
"TorchPPOOptimizer.update": {
"total": 104.07736597399798,
"count": 2292,
"is_parallel": true,
"self": 104.07736597399798
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09918355300010262,
"count": 1,
"self": 0.0014053019999664684,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09777825100013615,
"count": 1,
"self": 0.09777825100013615
}
}
}
}
}
}
}