rng0x17's picture
initial commit
9c88084
raw
history blame contribute delete
No virus
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.974486231803894,
"min": 0.974486231803894,
"max": 2.8657336235046387,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9336.552734375,
"min": 9336.552734375,
"max": 29411.0234375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.342032432556152,
"min": 0.35241949558258057,
"max": 12.342032432556152,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2406.6962890625,
"min": 68.369384765625,
"max": 2481.345703125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.704545454545453,
"min": 3.477272727272727,
"max": 24.704545454545453,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1087.0,
"min": 153.0,
"max": 1351.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.704545454545453,
"min": 3.477272727272727,
"max": 24.704545454545453,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1087.0,
"min": 153.0,
"max": 1351.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07113881957169117,
"min": 0.06274146055682864,
"max": 0.07494897813744858,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2845552782867647,
"min": 0.25096584222731455,
"max": 0.37287962123189256,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18944861067860735,
"min": 0.12386798256706885,
"max": 0.27545347914976237,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7577944427144294,
"min": 0.4954719302682754,
"max": 1.3772673957488117,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.00029184900271699997,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19728300000000004,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048644217,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679830251",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679830856"
},
"total": 604.0923880720001,
"count": 1,
"self": 0.5925703709999652,
"children": {
"run_training.setup": {
"total": 0.13554485800000293,
"count": 1,
"self": 0.13554485800000293
},
"TrainerController.start_learning": {
"total": 603.3642728430001,
"count": 1,
"self": 0.8794790199920044,
"children": {
"TrainerController._reset_env": {
"total": 7.1141263199999685,
"count": 1,
"self": 7.1141263199999685
},
"TrainerController.advance": {
"total": 595.2123622680082,
"count": 18204,
"self": 0.463740429007089,
"children": {
"env_step": {
"total": 594.7486218390011,
"count": 18204,
"self": 478.22026647897565,
"children": {
"SubprocessEnvManager._take_step": {
"total": 116.10711015701531,
"count": 18204,
"self": 3.347268108024082,
"children": {
"TorchPolicy.evaluate": {
"total": 112.75984204899123,
"count": 18204,
"self": 112.75984204899123
}
}
},
"workers": {
"total": 0.42124520301013035,
"count": 18204,
"self": 0.0,
"children": {
"worker_root": {
"total": 601.005759104999,
"count": 18204,
"is_parallel": true,
"self": 265.99731075799593,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007329570000024432,
"count": 1,
"is_parallel": true,
"self": 0.004543495999882907,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002786074000141525,
"count": 10,
"is_parallel": true,
"self": 0.002786074000141525
}
}
},
"UnityEnvironment.step": {
"total": 0.041934557999979916,
"count": 1,
"is_parallel": true,
"self": 0.0006841479997774513,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00030385700006263505,
"count": 1,
"is_parallel": true,
"self": 0.00030385700006263505
},
"communicator.exchange": {
"total": 0.03870181800004957,
"count": 1,
"is_parallel": true,
"self": 0.03870181800004957
},
"steps_from_proto": {
"total": 0.002244735000090259,
"count": 1,
"is_parallel": true,
"self": 0.00046523499986506067,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017795000002251982,
"count": 10,
"is_parallel": true,
"self": 0.0017795000002251982
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 335.0084483470031,
"count": 18203,
"is_parallel": true,
"self": 13.919644676994267,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.544915995999304,
"count": 18203,
"is_parallel": true,
"self": 7.544915995999304
},
"communicator.exchange": {
"total": 268.16701480100994,
"count": 18203,
"is_parallel": true,
"self": 268.16701480100994
},
"steps_from_proto": {
"total": 45.37687287299957,
"count": 18203,
"is_parallel": true,
"self": 9.428728843958083,
"children": {
"_process_rank_one_or_two_observation": {
"total": 35.94814402904149,
"count": 182030,
"is_parallel": true,
"self": 35.94814402904149
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00018465699986336404,
"count": 1,
"self": 0.00018465699986336404,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 590.0530862379886,
"count": 594923,
"is_parallel": true,
"self": 14.989465029985354,
"children": {
"process_trajectory": {
"total": 320.783296517003,
"count": 594923,
"is_parallel": true,
"self": 319.378295071003,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4050014460000284,
"count": 4,
"is_parallel": true,
"self": 1.4050014460000284
}
}
},
"_update_policy": {
"total": 254.28032469100026,
"count": 90,
"is_parallel": true,
"self": 88.59249956000144,
"children": {
"TorchPPOOptimizer.update": {
"total": 165.68782513099882,
"count": 4587,
"is_parallel": true,
"self": 165.68782513099882
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.15812057800008006,
"count": 1,
"self": 0.0012159710001924395,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15690460699988762,
"count": 1,
"self": 0.15690460699988762
}
}
}
}
}
}
}