Agneev's picture
First Push
6541ce4
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9027432203292847,
"min": 0.8886774182319641,
"max": 2.865996837615967,
"count": 25
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8589.6015625,
"min": 8465.541015625,
"max": 29413.724609375,
"count": 25
},
"SnowballTarget.Step.mean": {
"value": 249944.0,
"min": 9952.0,
"max": 249944.0,
"count": 25
},
"SnowballTarget.Step.sum": {
"value": 249944.0,
"min": 9952.0,
"max": 249944.0,
"count": 25
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.0246000289917,
"min": 0.3905719518661499,
"max": 13.030131340026855,
"count": 25
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2526.7724609375,
"min": 75.77095794677734,
"max": 2671.177001953125,
"count": 25
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 25
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 25
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0660443901213904,
"min": 0.0623700422681092,
"max": 0.073655227968451,
"count": 25
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2641775604855616,
"min": 0.2494801690724368,
"max": 0.368276139842255,
"count": 25
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18798936282594997,
"min": 0.14285855851930512,
"max": 0.28174183466563035,
"count": 25
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7519574513037999,
"min": 0.5714342340772205,
"max": 1.3732448193956823,
"count": 25
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 5.7456980848e-06,
"min": 5.7456980848e-06,
"max": 0.0002935056021648,
"count": 25
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.29827923392e-05,
"min": 2.29827923392e-05,
"max": 0.0014081280306239997,
"count": 25
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10191520000000001,
"min": 0.10191520000000001,
"max": 0.19783520000000002,
"count": 25
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.40766080000000005,
"min": 0.40766080000000005,
"max": 0.9693760000000002,
"count": 25
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00010556848000000002,
"min": 0.00010556848000000002,
"max": 0.004891976480000001,
"count": 25
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0004222739200000001,
"min": 0.0004222739200000001,
"max": 0.023471862399999998,
"count": 25
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.90909090909091,
"min": 3.3863636363636362,
"max": 26.022222222222222,
"count": 25
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1140.0,
"min": 149.0,
"max": 1417.0,
"count": 25
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.90909090909091,
"min": 3.3863636363636362,
"max": 26.022222222222222,
"count": 25
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1140.0,
"min": 149.0,
"max": 1417.0,
"count": 25
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 25
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 25
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680965607",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1680966180"
},
"total": 573.625759773,
"count": 1,
"self": 0.7449315029998616,
"children": {
"run_training.setup": {
"total": 0.11715758700006518,
"count": 1,
"self": 0.11715758700006518
},
"TrainerController.start_learning": {
"total": 572.7636706830001,
"count": 1,
"self": 0.6902432800093266,
"children": {
"TrainerController._reset_env": {
"total": 4.731449273999942,
"count": 1,
"self": 4.731449273999942
},
"TrainerController.advance": {
"total": 567.1304799789905,
"count": 22739,
"self": 0.3552740759826065,
"children": {
"env_step": {
"total": 566.7752059030079,
"count": 22739,
"self": 417.0431853780324,
"children": {
"SubprocessEnvManager._take_step": {
"total": 149.39359234996095,
"count": 22739,
"self": 2.057052624943708,
"children": {
"TorchPolicy.evaluate": {
"total": 147.33653972501725,
"count": 22739,
"self": 147.33653972501725
}
}
},
"workers": {
"total": 0.3384281750145419,
"count": 22739,
"self": 0.0,
"children": {
"worker_root": {
"total": 571.0822307699516,
"count": 22739,
"is_parallel": true,
"self": 263.0863309479471,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00551649399994858,
"count": 1,
"is_parallel": true,
"self": 0.0039796349999505765,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015368589999980031,
"count": 10,
"is_parallel": true,
"self": 0.0015368589999980031
}
}
},
"UnityEnvironment.step": {
"total": 0.05870996599992395,
"count": 1,
"is_parallel": true,
"self": 0.0005982010000025184,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00036842999998043524,
"count": 1,
"is_parallel": true,
"self": 0.00036842999998043524
},
"communicator.exchange": {
"total": 0.05605310000009922,
"count": 1,
"is_parallel": true,
"self": 0.05605310000009922
},
"steps_from_proto": {
"total": 0.0016902349998417776,
"count": 1,
"is_parallel": true,
"self": 0.0003575609998733853,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013326739999683923,
"count": 10,
"is_parallel": true,
"self": 0.0013326739999683923
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 307.9958998220045,
"count": 22738,
"is_parallel": true,
"self": 12.339288863027377,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.677939529005016,
"count": 22738,
"is_parallel": true,
"self": 6.677939529005016
},
"communicator.exchange": {
"total": 249.75194673997976,
"count": 22738,
"is_parallel": true,
"self": 249.75194673997976
},
"steps_from_proto": {
"total": 39.22672468999235,
"count": 22738,
"is_parallel": true,
"self": 7.7075016479846,
"children": {
"_process_rank_one_or_two_observation": {
"total": 31.519223042007752,
"count": 227380,
"is_parallel": true,
"self": 31.519223042007752
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.2509000144928e-05,
"count": 1,
"self": 6.2509000144928e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 563.0555842489307,
"count": 499853,
"is_parallel": true,
"self": 11.804565787029787,
"children": {
"process_trajectory": {
"total": 308.9915419829015,
"count": 499853,
"is_parallel": true,
"self": 308.04244291990176,
"children": {
"RLTrainer._checkpoint": {
"total": 0.9490990629997214,
"count": 5,
"is_parallel": true,
"self": 0.9490990629997214
}
}
},
"_update_policy": {
"total": 242.25947647899943,
"count": 113,
"is_parallel": true,
"self": 92.56769424300501,
"children": {
"TorchPPOOptimizer.update": {
"total": 149.69178223599442,
"count": 5760,
"is_parallel": true,
"self": 149.69178223599442
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.2114356410002074,
"count": 1,
"self": 0.0011193720001756446,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21031626900003175,
"count": 1,
"self": 0.21031626900003175
}
}
}
}
}
}
}