jcramirezpr's picture
commit snowball
5869b6a
raw
history blame contribute delete
No virus
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9555771946907043,
"min": 0.9531955122947693,
"max": 2.729161024093628,
"count": 19
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9102.828125,
"min": 9102.828125,
"max": 26601.55859375,
"count": 19
},
"SnowballTarget.Step.mean": {
"value": 199960.0,
"min": 19968.0,
"max": 199960.0,
"count": 19
},
"SnowballTarget.Step.sum": {
"value": 199960.0,
"min": 19968.0,
"max": 199960.0,
"count": 19
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.639564514160156,
"min": 1.5505114793777466,
"max": 12.639564514160156,
"count": 19
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2464.715087890625,
"min": 210.86956787109375,
"max": 2545.27587890625,
"count": 19
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 19
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 6567.0,
"max": 10945.0,
"count": 19
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06779544857358012,
"min": 0.0625097456477199,
"max": 0.07532066806292563,
"count": 19
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2711817942943205,
"min": 0.19933996502304047,
"max": 0.37660334031462817,
"count": 19
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20238702977989234,
"min": 0.18720294447506175,
"max": 0.2890216240695878,
"count": 19
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8095481191195694,
"min": 0.6170700874325692,
"max": 1.3757285975358065,
"count": 19
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.214097262000005e-06,
"min": 8.214097262000005e-06,
"max": 0.000273864008712,
"count": 19
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.285638904800002e-05,
"min": 3.285638904800002e-05,
"max": 0.00123732008756,
"count": 19
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10273800000000001,
"min": 0.10273800000000001,
"max": 0.19128799999999999,
"count": 19
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41095200000000004,
"min": 0.41095200000000004,
"max": 0.9124400000000001,
"count": 19
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001466262000000001,
"min": 0.0001466262000000001,
"max": 0.004565271199999999,
"count": 19
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005865048000000004,
"min": 0.0005865048000000004,
"max": 0.020630756,
"count": 19
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.522727272727273,
"min": 6.636363636363637,
"max": 25.522727272727273,
"count": 19
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1123.0,
"min": 219.0,
"max": 1354.0,
"count": 19
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.522727272727273,
"min": 6.636363636363637,
"max": 25.522727272727273,
"count": 19
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1123.0,
"min": 219.0,
"max": 1354.0,
"count": 19
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 19
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 19
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679087305",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679087760"
},
"total": 454.47214893499995,
"count": 1,
"self": 0.38305882599991037,
"children": {
"run_training.setup": {
"total": 0.18540138000003026,
"count": 1,
"self": 0.18540138000003026
},
"TrainerController.start_learning": {
"total": 453.903688729,
"count": 1,
"self": 0.5803683109929807,
"children": {
"TrainerController._reset_env": {
"total": 6.013717145999976,
"count": 1,
"self": 6.013717145999976
},
"TrainerController.advance": {
"total": 447.18101703000707,
"count": 17000,
"self": 0.29113066399622767,
"children": {
"env_step": {
"total": 446.88988636601084,
"count": 17000,
"self": 321.7251976679946,
"children": {
"SubprocessEnvManager._take_step": {
"total": 124.8861289540007,
"count": 17000,
"self": 2.323696690005704,
"children": {
"TorchPolicy.evaluate": {
"total": 122.56243226399499,
"count": 17000,
"self": 122.56243226399499
}
}
},
"workers": {
"total": 0.2785597440155243,
"count": 17000,
"self": 0.0,
"children": {
"worker_root": {
"total": 452.4741243759946,
"count": 17000,
"is_parallel": true,
"self": 213.28853657200193,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0031129779999901075,
"count": 1,
"is_parallel": true,
"self": 0.0009860399999297442,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021269380000603633,
"count": 10,
"is_parallel": true,
"self": 0.0021269380000603633
}
}
},
"UnityEnvironment.step": {
"total": 0.052412438000033035,
"count": 1,
"is_parallel": true,
"self": 0.0005429159999721378,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00031134900007145916,
"count": 1,
"is_parallel": true,
"self": 0.00031134900007145916
},
"communicator.exchange": {
"total": 0.04970026199998756,
"count": 1,
"is_parallel": true,
"self": 0.04970026199998756
},
"steps_from_proto": {
"total": 0.0018579110000018773,
"count": 1,
"is_parallel": true,
"self": 0.0003948490000311722,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001463061999970705,
"count": 10,
"is_parallel": true,
"self": 0.001463061999970705
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 239.1855878039927,
"count": 16999,
"is_parallel": true,
"self": 9.119688853002458,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.09729963199436,
"count": 16999,
"is_parallel": true,
"self": 5.09729963199436
},
"communicator.exchange": {
"total": 194.3885450259993,
"count": 16999,
"is_parallel": true,
"self": 194.3885450259993
},
"steps_from_proto": {
"total": 30.58005429299658,
"count": 16999,
"is_parallel": true,
"self": 6.305617006002649,
"children": {
"_process_rank_one_or_two_observation": {
"total": 24.27443728699393,
"count": 169990,
"is_parallel": true,
"self": 24.27443728699393
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.352099997755431e-05,
"count": 1,
"self": 3.352099997755431e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 443.6191421599615,
"count": 413911,
"is_parallel": true,
"self": 10.168503500958195,
"children": {
"process_trajectory": {
"total": 248.3985289960027,
"count": 413911,
"is_parallel": true,
"self": 247.65049026500287,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7480387309998378,
"count": 4,
"is_parallel": true,
"self": 0.7480387309998378
}
}
},
"_update_policy": {
"total": 185.05210966300058,
"count": 84,
"is_parallel": true,
"self": 65.93473184300376,
"children": {
"TorchPPOOptimizer.update": {
"total": 119.11737781999682,
"count": 4281,
"is_parallel": true,
"self": 119.11737781999682
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.12855272100000548,
"count": 1,
"self": 0.001085244000023522,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12746747699998195,
"count": 1,
"self": 0.12746747699998195
}
}
}
}
}
}
}