0xid's picture
First push
d222557
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.821297287940979,
"min": 0.7926107048988342,
"max": 1.4947452545166016,
"count": 38
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7886.91796875,
"min": 7733.5029296875,
"max": 13534.400390625,
"count": 38
},
"SnowballTarget.Step.mean": {
"value": 499952.0,
"min": 129968.0,
"max": 499952.0,
"count": 38
},
"SnowballTarget.Step.sum": {
"value": 499952.0,
"min": 129968.0,
"max": 499952.0,
"count": 38
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.559098243713379,
"min": 10.318947792053223,
"max": 13.739968299865723,
"count": 38
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2644.024169921875,
"min": 1630.3936767578125,
"max": 2802.95361328125,
"count": 38
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 38
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 6567.0,
"max": 10945.0,
"count": 38
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07246993667407273,
"min": 0.061514482307921205,
"max": 0.0753171756865306,
"count": 38
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2898797466962909,
"min": 0.18857296156242156,
"max": 0.36524559652966937,
"count": 38
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1782236401283858,
"min": 0.17702512793681202,
"max": 0.23096839651173234,
"count": 38
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7128945605135432,
"min": 0.6884002067039118,
"max": 1.1548419825586618,
"count": 38
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 3.290498903200002e-06,
"min": 3.290498903200002e-06,
"max": 0.00022439042520320003,
"count": 38
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.3161995612800007e-05,
"min": 1.3161995612800007e-05,
"max": 0.001095552134816,
"count": 38
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.1010968,
"min": 0.1010968,
"max": 0.17479679999999997,
"count": 38
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.4043872,
"min": 0.4043872,
"max": 0.865184,
"count": 38
},
"SnowballTarget.Policy.Beta.mean": {
"value": 6.473032000000003e-05,
"min": 6.473032000000003e-05,
"max": 0.003742360320000001,
"count": 38
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00025892128000000014,
"min": 0.00025892128000000014,
"max": 0.0182726816,
"count": 38
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.65909090909091,
"min": 22.636363636363637,
"max": 27.054545454545455,
"count": 38
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1173.0,
"min": 747.0,
"max": 1488.0,
"count": 38
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.65909090909091,
"min": 22.636363636363637,
"max": 27.054545454545455,
"count": 38
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1173.0,
"min": 747.0,
"max": 1488.0,
"count": 38
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 38
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 38
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673378613",
"python_version": "3.8.16+ (heads/3.8-dirty:266a502edf, Jan 10 2023, 19:44:43) \n[GCC 7.5.0]",
"command_line_arguments": "/home/micha/notes/topics/rl/ml-agents/venv/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.19.3",
"end_time_seconds": "1673380199"
},
"total": 1586.09758491721,
"count": 1,
"self": 0.3224212434142828,
"children": {
"run_training.setup": {
"total": 0.012747642584145069,
"count": 1,
"self": 0.012747642584145069
},
"TrainerController.start_learning": {
"total": 1585.7624160312116,
"count": 1,
"self": 1.1248658187687397,
"children": {
"TrainerController._reset_env": {
"total": 5.69324498064816,
"count": 1,
"self": 5.69324498064816
},
"TrainerController.advance": {
"total": 1578.8146093562245,
"count": 34406,
"self": 0.4858091492205858,
"children": {
"env_step": {
"total": 1578.328800207004,
"count": 34406,
"self": 608.1526623778045,
"children": {
"SubprocessEnvManager._take_step": {
"total": 969.6756205102429,
"count": 34406,
"self": 2.8051088796928525,
"children": {
"TorchPolicy.evaluate": {
"total": 966.87051163055,
"count": 34406,
"self": 499.39449591748416,
"children": {
"TorchPolicy.sample_actions": {
"total": 467.47601571306586,
"count": 34406,
"self": 467.47601571306586
}
}
}
}
},
"workers": {
"total": 0.5005173189565539,
"count": 34406,
"self": 0.0,
"children": {
"worker_root": {
"total": 1584.2428595563397,
"count": 34406,
"is_parallel": true,
"self": 1212.4513867869973,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002203693613409996,
"count": 1,
"is_parallel": true,
"self": 0.0004792986437678337,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017243949696421623,
"count": 10,
"is_parallel": true,
"self": 0.0017243949696421623
}
}
},
"UnityEnvironment.step": {
"total": 0.02497137989848852,
"count": 1,
"is_parallel": true,
"self": 0.00035837385803461075,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00030953623354434967,
"count": 1,
"is_parallel": true,
"self": 0.00030953623354434967
},
"communicator.exchange": {
"total": 0.02264922484755516,
"count": 1,
"is_parallel": true,
"self": 0.02264922484755516
},
"steps_from_proto": {
"total": 0.0016542449593544006,
"count": 1,
"is_parallel": true,
"self": 0.00026795268058776855,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001386292278766632,
"count": 10,
"is_parallel": true,
"self": 0.001386292278766632
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 371.79147276934236,
"count": 34405,
"is_parallel": true,
"self": 12.873356358148158,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 8.182147434912622,
"count": 34405,
"is_parallel": true,
"self": 8.182147434912622
},
"communicator.exchange": {
"total": 291.2151619521901,
"count": 34405,
"is_parallel": true,
"self": 291.2151619521901
},
"steps_from_proto": {
"total": 59.52080702409148,
"count": 34405,
"is_parallel": true,
"self": 8.202465694397688,
"children": {
"_process_rank_one_or_two_observation": {
"total": 51.318341329693794,
"count": 344050,
"is_parallel": true,
"self": 51.318341329693794
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00011739134788513184,
"count": 1,
"self": 0.00011739134788513184,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1571.6894609583542,
"count": 2389636,
"is_parallel": true,
"self": 19.37277008034289,
"children": {
"process_trajectory": {
"total": 929.6372295645997,
"count": 2389636,
"is_parallel": true,
"self": 927.0598383825272,
"children": {
"RLTrainer._checkpoint": {
"total": 2.5773911820724607,
"count": 8,
"is_parallel": true,
"self": 2.5773911820724607
}
}
},
"_update_policy": {
"total": 622.6794613134116,
"count": 171,
"is_parallel": true,
"self": 99.54105281922966,
"children": {
"TorchPPOOptimizer.update": {
"total": 523.1384084941819,
"count": 8718,
"is_parallel": true,
"self": 523.1384084941819
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1295784842222929,
"count": 1,
"self": 0.0010177632793784142,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12856072094291449,
"count": 1,
"self": 0.12856072094291449
}
}
}
}
}
}
}