kraken2404's picture
First Agent
82813f0
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.3657820224761963,
"min": 1.3657820224761963,
"max": 2.8814616203308105,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 13040.486328125,
"min": 13040.486328125,
"max": 29509.048828125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.422035217285156,
"min": 0.5063137412071228,
"max": 12.422035217285156,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2422.296875,
"min": 98.22486877441406,
"max": 2510.35009765625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07248690631069407,
"min": 0.06377157429119061,
"max": 0.0742890913651197,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2899476252427763,
"min": 0.25508629716476244,
"max": 0.3714454568255985,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20028863467422187,
"min": 0.11901058816104469,
"max": 0.26494974672209987,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8011545386968875,
"min": 0.47604235264417877,
"max": 1.2758128000240703,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 5.388009730600003e-05,
"min": 5.388009730600003e-05,
"max": 0.001945880002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.0002155203892240001,
"min": 0.0002155203892240001,
"max": 0.00923440003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.09999999999999998,
"min": 0.09999999999999998,
"max": 0.09999999999999998,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.3999999999999999,
"min": 0.3999999999999999,
"max": 0.4999999999999999,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.772727272727273,
"min": 3.1136363636363638,
"max": 24.90909090909091,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1090.0,
"min": 137.0,
"max": 1349.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.772727272727273,
"min": 3.1136363636363638,
"max": 24.90909090909091,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1090.0,
"min": 137.0,
"max": 1349.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679218020",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget2 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679218475"
},
"total": 455.1994181780001,
"count": 1,
"self": 0.7272712530002536,
"children": {
"run_training.setup": {
"total": 0.10010213699979431,
"count": 1,
"self": 0.10010213699979431
},
"TrainerController.start_learning": {
"total": 454.37204478800004,
"count": 1,
"self": 0.5487199020112712,
"children": {
"TrainerController._reset_env": {
"total": 5.719490780999877,
"count": 1,
"self": 5.719490780999877
},
"TrainerController.advance": {
"total": 447.8756048009891,
"count": 18212,
"self": 0.2805861969495709,
"children": {
"env_step": {
"total": 447.5950186040395,
"count": 18212,
"self": 324.19736295908706,
"children": {
"SubprocessEnvManager._take_step": {
"total": 123.13395416901858,
"count": 18212,
"self": 1.8465203180162462,
"children": {
"TorchPolicy.evaluate": {
"total": 121.28743385100233,
"count": 18212,
"self": 121.28743385100233
}
}
},
"workers": {
"total": 0.2637014759338854,
"count": 18212,
"self": 0.0,
"children": {
"worker_root": {
"total": 452.8898813449723,
"count": 18212,
"is_parallel": true,
"self": 213.36200566694788,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019253939999543945,
"count": 1,
"is_parallel": true,
"self": 0.0006357800002660952,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012896139996882994,
"count": 10,
"is_parallel": true,
"self": 0.0012896139996882994
}
}
},
"UnityEnvironment.step": {
"total": 0.04557048400010899,
"count": 1,
"is_parallel": true,
"self": 0.0005901250001443259,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003715530001500156,
"count": 1,
"is_parallel": true,
"self": 0.0003715530001500156
},
"communicator.exchange": {
"total": 0.04270577699981004,
"count": 1,
"is_parallel": true,
"self": 0.04270577699981004
},
"steps_from_proto": {
"total": 0.001903029000004608,
"count": 1,
"is_parallel": true,
"self": 0.0005148420000296028,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013881869999750052,
"count": 10,
"is_parallel": true,
"self": 0.0013881869999750052
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 239.52787567802443,
"count": 18211,
"is_parallel": true,
"self": 9.361195565980324,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.033057282023492,
"count": 18211,
"is_parallel": true,
"self": 5.033057282023492
},
"communicator.exchange": {
"total": 195.07069511900363,
"count": 18211,
"is_parallel": true,
"self": 195.07069511900363
},
"steps_from_proto": {
"total": 30.062927711016982,
"count": 18211,
"is_parallel": true,
"self": 5.953288311899087,
"children": {
"_process_rank_one_or_two_observation": {
"total": 24.109639399117896,
"count": 182110,
"is_parallel": true,
"self": 24.109639399117896
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00011423099977037054,
"count": 1,
"self": 0.00011423099977037054,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 444.7551320571033,
"count": 390689,
"is_parallel": true,
"self": 9.272329466279643,
"children": {
"process_trajectory": {
"total": 244.8193140678236,
"count": 390689,
"is_parallel": true,
"self": 243.39723775682432,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4220763109992731,
"count": 4,
"is_parallel": true,
"self": 1.4220763109992731
}
}
},
"_update_policy": {
"total": 190.66348852300007,
"count": 90,
"is_parallel": true,
"self": 73.08300338100662,
"children": {
"TorchPPOOptimizer.update": {
"total": 117.58048514199345,
"count": 4587,
"is_parallel": true,
"self": 117.58048514199345
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.2281150730000263,
"count": 1,
"self": 0.001159741999799735,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22695533100022658,
"count": 1,
"self": 0.22695533100022658
}
}
}
}
}
}
}