M331's picture
First Push
a827bc7
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.5469810962677002,
"min": 0.5361841917037964,
"max": 2.8614871501922607,
"count": 50
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 5282.74365234375,
"min": 5282.74365234375,
"max": 29367.44140625,
"count": 50
},
"SnowballTarget.Step.mean": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Step.sum": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.632340431213379,
"min": 0.341781884431839,
"max": 13.796305656433105,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2794.6298828125,
"min": 66.3056869506836,
"max": 2822.87646484375,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06317157646716462,
"min": 0.06201571205465549,
"max": 0.07627568223745351,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3158578823358231,
"min": 0.24806284821862196,
"max": 0.36351435495695716,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1896061252437386,
"min": 0.12089029220777436,
"max": 0.29683436438733457,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.9480306262186929,
"min": 0.48356116883109745,
"max": 1.3783004295592216,
"count": 50
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 3.0528989824000028e-06,
"min": 3.0528989824000028e-06,
"max": 0.00029675280108239997,
"count": 50
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.5264494912000015e-05,
"min": 1.5264494912000015e-05,
"max": 0.001454064015312,
"count": 50
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10101760000000001,
"min": 0.10101760000000001,
"max": 0.19891759999999997,
"count": 50
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5050880000000001,
"min": 0.41199040000000003,
"max": 0.984688,
"count": 50
},
"SnowballTarget.Policy.Beta.mean": {
"value": 6.0778240000000044e-05,
"min": 6.0778240000000044e-05,
"max": 0.00494598824,
"count": 50
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0003038912000000002,
"min": 0.0003038912000000002,
"max": 0.0242359312,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.545454545454547,
"min": 3.159090909090909,
"max": 27.318181818181817,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1460.0,
"min": 139.0,
"max": 1496.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.545454545454547,
"min": 3.159090909090909,
"max": 27.318181818181817,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1460.0,
"min": 139.0,
"max": 1496.0,
"count": 50
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679154109",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679155220"
},
"total": 1111.1528324649998,
"count": 1,
"self": 0.4353955179994955,
"children": {
"run_training.setup": {
"total": 0.10220676900007675,
"count": 1,
"self": 0.10220676900007675
},
"TrainerController.start_learning": {
"total": 1110.6152301780003,
"count": 1,
"self": 1.2611830250193634,
"children": {
"TrainerController._reset_env": {
"total": 12.21132761499996,
"count": 1,
"self": 12.21132761499996
},
"TrainerController.advance": {
"total": 1097.0037110759806,
"count": 45476,
"self": 0.6690487760031374,
"children": {
"env_step": {
"total": 1096.3346622999775,
"count": 45476,
"self": 786.343466318991,
"children": {
"SubprocessEnvManager._take_step": {
"total": 309.3468315310014,
"count": 45476,
"self": 5.049092212964297,
"children": {
"TorchPolicy.evaluate": {
"total": 304.2977393180371,
"count": 45476,
"self": 304.2977393180371
}
}
},
"workers": {
"total": 0.6443644499851189,
"count": 45476,
"self": 0.0,
"children": {
"worker_root": {
"total": 1107.409380133027,
"count": 45476,
"is_parallel": true,
"self": 530.9186069640217,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006002519000048778,
"count": 1,
"is_parallel": true,
"self": 0.004518476999919585,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014840420001291932,
"count": 10,
"is_parallel": true,
"self": 0.0014840420001291932
}
}
},
"UnityEnvironment.step": {
"total": 0.03362339199998132,
"count": 1,
"is_parallel": true,
"self": 0.0005392749999373336,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003980660000024727,
"count": 1,
"is_parallel": true,
"self": 0.0003980660000024727
},
"communicator.exchange": {
"total": 0.030893245000015668,
"count": 1,
"is_parallel": true,
"self": 0.030893245000015668
},
"steps_from_proto": {
"total": 0.0017928060000258483,
"count": 1,
"is_parallel": true,
"self": 0.0003697939998801303,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001423012000145718,
"count": 10,
"is_parallel": true,
"self": 0.001423012000145718
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 576.4907731690054,
"count": 45475,
"is_parallel": true,
"self": 22.929547776021423,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 12.641583627009027,
"count": 45475,
"is_parallel": true,
"self": 12.641583627009027
},
"communicator.exchange": {
"total": 466.96638106998364,
"count": 45475,
"is_parallel": true,
"self": 466.96638106998364
},
"steps_from_proto": {
"total": 73.95326069599128,
"count": 45475,
"is_parallel": true,
"self": 14.355607679968443,
"children": {
"_process_rank_one_or_two_observation": {
"total": 59.59765301602283,
"count": 454750,
"is_parallel": true,
"self": 59.59765301602283
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00012591800032168976,
"count": 1,
"self": 0.00012591800032168976,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1088.8524258070931,
"count": 966139,
"is_parallel": true,
"self": 23.87928366909273,
"children": {
"process_trajectory": {
"total": 599.3912466010019,
"count": 966139,
"is_parallel": true,
"self": 597.2728521690013,
"children": {
"RLTrainer._checkpoint": {
"total": 2.118394432000514,
"count": 10,
"is_parallel": true,
"self": 2.118394432000514
}
}
},
"_update_policy": {
"total": 465.58189553699856,
"count": 227,
"is_parallel": true,
"self": 169.7260702290041,
"children": {
"TorchPPOOptimizer.update": {
"total": 295.85582530799445,
"count": 11574,
"is_parallel": true,
"self": 295.85582530799445
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1388825439998982,
"count": 1,
"self": 0.0008949610000854591,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13798758299981273,
"count": 1,
"self": 0.13798758299981273
}
}
}
}
}
}
}