indigorange's picture
First Push
96c2eb4
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.008610725402832,
"min": 1.008610725402832,
"max": 2.8703370094299316,
"count": 25
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9619.1201171875,
"min": 9619.1201171875,
"max": 29426.6953125,
"count": 25
},
"SnowballTarget.Step.mean": {
"value": 249944.0,
"min": 9952.0,
"max": 249944.0,
"count": 25
},
"SnowballTarget.Step.sum": {
"value": 249944.0,
"min": 9952.0,
"max": 249944.0,
"count": 25
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.132675170898438,
"min": 0.35344401001930237,
"max": 13.161680221557617,
"count": 25
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2547.739013671875,
"min": 68.5681381225586,
"max": 2698.14453125,
"count": 25
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 25
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 25
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06846887276342653,
"min": 0.05939498994506293,
"max": 0.07701259125115387,
"count": 25
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2738754910537061,
"min": 0.23757995978025173,
"max": 0.36153619497895756,
"count": 25
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21997608324768497,
"min": 0.11581499640848122,
"max": 0.26882794667400567,
"count": 25
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8799043329907399,
"min": 0.4632599856339249,
"max": 1.28254123090529,
"count": 25
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 5.7456980848e-06,
"min": 5.7456980848e-06,
"max": 0.0002935056021648,
"count": 25
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.29827923392e-05,
"min": 2.29827923392e-05,
"max": 0.0014081280306239997,
"count": 25
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10191520000000001,
"min": 0.10191520000000001,
"max": 0.19783520000000002,
"count": 25
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.40766080000000005,
"min": 0.40766080000000005,
"max": 0.9693760000000002,
"count": 25
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00010556848000000002,
"min": 0.00010556848000000002,
"max": 0.004891976480000001,
"count": 25
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0004222739200000001,
"min": 0.0004222739200000001,
"max": 0.023471862399999998,
"count": 25
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.704545454545453,
"min": 3.4318181818181817,
"max": 25.84090909090909,
"count": 25
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1131.0,
"min": 151.0,
"max": 1408.0,
"count": 25
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.704545454545453,
"min": 3.4318181818181817,
"max": 25.84090909090909,
"count": 25
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1131.0,
"min": 151.0,
"max": 1408.0,
"count": 25
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 25
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 25
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1684917674",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1684918268"
},
"total": 594.219254884,
"count": 1,
"self": 0.7783775389999619,
"children": {
"run_training.setup": {
"total": 0.043960198999968725,
"count": 1,
"self": 0.043960198999968725
},
"TrainerController.start_learning": {
"total": 593.3969171460001,
"count": 1,
"self": 0.6750813979961094,
"children": {
"TrainerController._reset_env": {
"total": 4.093987562999985,
"count": 1,
"self": 4.093987562999985
},
"TrainerController.advance": {
"total": 588.4053498020041,
"count": 22762,
"self": 0.3597285649952937,
"children": {
"env_step": {
"total": 588.0456212370088,
"count": 22762,
"self": 430.3969051300109,
"children": {
"SubprocessEnvManager._take_step": {
"total": 157.3093551560076,
"count": 22762,
"self": 2.2951351120127583,
"children": {
"TorchPolicy.evaluate": {
"total": 155.01422004399484,
"count": 22762,
"self": 155.01422004399484
}
}
},
"workers": {
"total": 0.3393609509903399,
"count": 22762,
"self": 0.0,
"children": {
"worker_root": {
"total": 591.2233689639983,
"count": 22762,
"is_parallel": true,
"self": 279.21967331499127,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0058886670000219965,
"count": 1,
"is_parallel": true,
"self": 0.004427869000096507,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014607979999254894,
"count": 10,
"is_parallel": true,
"self": 0.0014607979999254894
}
}
},
"UnityEnvironment.step": {
"total": 0.07970665800002052,
"count": 1,
"is_parallel": true,
"self": 0.0006010990000504535,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004211660000237316,
"count": 1,
"is_parallel": true,
"self": 0.0004211660000237316
},
"communicator.exchange": {
"total": 0.07653630399994427,
"count": 1,
"is_parallel": true,
"self": 0.07653630399994427
},
"steps_from_proto": {
"total": 0.0021480890000020736,
"count": 1,
"is_parallel": true,
"self": 0.0004005749999009822,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017475140001010914,
"count": 10,
"is_parallel": true,
"self": 0.0017475140001010914
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 312.003695649007,
"count": 22761,
"is_parallel": true,
"self": 12.303091519005761,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.899662049998653,
"count": 22761,
"is_parallel": true,
"self": 6.899662049998653
},
"communicator.exchange": {
"total": 249.53905326799918,
"count": 22761,
"is_parallel": true,
"self": 249.53905326799918
},
"steps_from_proto": {
"total": 43.261888812003406,
"count": 22761,
"is_parallel": true,
"self": 8.446482088030848,
"children": {
"_process_rank_one_or_two_observation": {
"total": 34.81540672397256,
"count": 227610,
"is_parallel": true,
"self": 34.81540672397256
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.10779999810984e-05,
"count": 1,
"self": 4.10779999810984e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 583.7435733379529,
"count": 555008,
"is_parallel": true,
"self": 12.628126165955337,
"children": {
"process_trajectory": {
"total": 319.4838405589981,
"count": 555008,
"is_parallel": true,
"self": 317.5131209979981,
"children": {
"RLTrainer._checkpoint": {
"total": 1.97071956100001,
"count": 5,
"is_parallel": true,
"self": 1.97071956100001
}
}
},
"_update_policy": {
"total": 251.6316066129994,
"count": 113,
"is_parallel": true,
"self": 97.88961931900707,
"children": {
"TorchPPOOptimizer.update": {
"total": 153.74198729399234,
"count": 5760,
"is_parallel": true,
"self": 153.74198729399234
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.22245730499992078,
"count": 1,
"self": 0.0010960589999058357,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22136124600001494,
"count": 1,
"self": 0.22136124600001494
}
}
}
}
}
}
}