morganjeffries's picture
I swear I did this before
4204b12
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.14456307888031,
"min": 1.14456307888031,
"max": 2.853330135345459,
"count": 10
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 56529.97265625,
"min": 56529.97265625,
"max": 144127.40625,
"count": 10
},
"SnowballTarget.Step.mean": {
"value": 499976.0,
"min": 49936.0,
"max": 499976.0,
"count": 10
},
"SnowballTarget.Step.sum": {
"value": 499976.0,
"min": 49936.0,
"max": 499976.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.437644958496094,
"min": 0.5111746788024902,
"max": 11.437644958496094,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 11471.9580078125,
"min": 507.0852966308594,
"max": 11471.9580078125,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 50347.0,
"min": 48158.0,
"max": 50347.0,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 23.8300395256917,
"min": 3.7644628099173554,
"max": 23.8300395256917,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 6029.0,
"min": 911.0,
"max": 6029.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 23.8300395256917,
"min": 3.7644628099173554,
"max": 23.8300395256917,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 6029.0,
"min": 911.0,
"max": 6029.0,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.02266195354341107,
"min": 0.022510104025132024,
"max": 0.02555250440277935,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.09064781417364429,
"min": 0.09064781417364429,
"max": 0.12776252201389676,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.27012798810998595,
"min": 0.12411952037364243,
"max": 0.3050958347320557,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.0805119524399438,
"min": 0.49647808149456973,
"max": 1.5254791736602784,
"count": 10
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.4576495141199999e-05,
"min": 1.4576495141199999e-05,
"max": 0.0002841864052712,
"count": 10
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 5.8305980564799996e-05,
"min": 5.8305980564799996e-05,
"max": 0.0012800256733248002,
"count": 10
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.1048588,
"min": 0.1048588,
"max": 0.19472879999999998,
"count": 10
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.4194352,
"min": 0.4194352,
"max": 0.9266752000000001,
"count": 10
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00025245412000000005,
"min": 0.00025245412000000005,
"max": 0.004736967120000001,
"count": 10
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0010098164800000002,
"min": 0.0010098164800000002,
"max": 0.021341092479999997,
"count": 10
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676836347",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1676837463"
},
"total": 1115.854177459,
"count": 1,
"self": 0.43564135900010115,
"children": {
"run_training.setup": {
"total": 0.1131103699999585,
"count": 1,
"self": 0.1131103699999585
},
"TrainerController.start_learning": {
"total": 1115.30542573,
"count": 1,
"self": 1.3106038930179693,
"children": {
"TrainerController._reset_env": {
"total": 9.239542474000018,
"count": 1,
"self": 9.239542474000018
},
"TrainerController.advance": {
"total": 1104.5851296179821,
"count": 45488,
"self": 0.6592771809814622,
"children": {
"env_step": {
"total": 1103.9258524370007,
"count": 45488,
"self": 756.1124641999972,
"children": {
"SubprocessEnvManager._take_step": {
"total": 347.13027627100655,
"count": 45488,
"self": 3.641823583037649,
"children": {
"TorchPolicy.evaluate": {
"total": 343.4884526879689,
"count": 45488,
"self": 77.07385322697223,
"children": {
"TorchPolicy.sample_actions": {
"total": 266.41459946099667,
"count": 45488,
"self": 266.41459946099667
}
}
}
}
},
"workers": {
"total": 0.6831119659968863,
"count": 45488,
"self": 0.0,
"children": {
"worker_root": {
"total": 1111.6398618989551,
"count": 45488,
"is_parallel": true,
"self": 503.50860422096025,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00622724200002267,
"count": 1,
"is_parallel": true,
"self": 0.0033533979997173446,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0028738440003053256,
"count": 10,
"is_parallel": true,
"self": 0.0028738440003053256
}
}
},
"UnityEnvironment.step": {
"total": 0.05420534800009591,
"count": 1,
"is_parallel": true,
"self": 0.0006135870000889554,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00035287300011077605,
"count": 1,
"is_parallel": true,
"self": 0.00035287300011077605
},
"communicator.exchange": {
"total": 0.0511304479998671,
"count": 1,
"is_parallel": true,
"self": 0.0511304479998671
},
"steps_from_proto": {
"total": 0.002108440000029077,
"count": 1,
"is_parallel": true,
"self": 0.00048381600004177017,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016246239999873069,
"count": 10,
"is_parallel": true,
"self": 0.0016246239999873069
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 608.1312576779949,
"count": 45487,
"is_parallel": true,
"self": 24.03848778701058,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 13.614876801995706,
"count": 45487,
"is_parallel": true,
"self": 13.614876801995706
},
"communicator.exchange": {
"total": 489.1634868570063,
"count": 45487,
"is_parallel": true,
"self": 489.1634868570063
},
"steps_from_proto": {
"total": 81.31440623198228,
"count": 45487,
"is_parallel": true,
"self": 17.566893185969775,
"children": {
"_process_rank_one_or_two_observation": {
"total": 63.7475130460125,
"count": 454870,
"is_parallel": true,
"self": 63.7475130460125
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00013016000002608052,
"count": 1,
"self": 0.00013016000002608052,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1091.920142166927,
"count": 1441797,
"is_parallel": true,
"self": 36.37594577264622,
"children": {
"process_trajectory": {
"total": 856.5934215082818,
"count": 1441797,
"is_parallel": true,
"self": 853.5401176002822,
"children": {
"RLTrainer._checkpoint": {
"total": 3.0533039079996342,
"count": 10,
"is_parallel": true,
"self": 3.0533039079996342
}
}
},
"_update_policy": {
"total": 198.95077488599895,
"count": 47,
"is_parallel": true,
"self": 136.9055363820048,
"children": {
"TorchPPOOptimizer.update": {
"total": 62.04523850399414,
"count": 1410,
"is_parallel": true,
"self": 62.04523850399414
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.17001958499986358,
"count": 1,
"self": 0.0010764119997475063,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16894317300011608,
"count": 1,
"self": 0.16894317300011608
}
}
}
}
}
}
}