ramathuzen's picture
First Push
48eb457
raw
history blame
No virus
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.6212834715843201,
"min": 0.6212834715843201,
"max": 2.5113227367401123,
"count": 10
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 31334.431640625,
"min": 31334.431640625,
"max": 127100.5546875,
"count": 10
},
"SnowballTarget.Step.mean": {
"value": 499912.0,
"min": 49952.0,
"max": 499912.0,
"count": 10
},
"SnowballTarget.Step.sum": {
"value": 499912.0,
"min": 49952.0,
"max": 499912.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.776063919067383,
"min": 3.122358560562134,
"max": 13.776063919067383,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 6901.80810546875,
"min": 1551.812255859375,
"max": 6901.80810546875,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 50347.0,
"min": 48158.0,
"max": 50347.0,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.972332015810277,
"min": 9.692622950819672,
"max": 26.972332015810277,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 6824.0,
"min": 2365.0,
"max": 6824.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.972332015810277,
"min": 9.692622950819672,
"max": 26.972332015810277,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 6824.0,
"min": 2365.0,
"max": 6824.0,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06727982555731091,
"min": 0.06610792550520869,
"max": 0.06946593991671618,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 1.547435987818151,
"min": 1.4543743611145912,
"max": 1.5977166180844722,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18281737867447423,
"min": 0.18281737867447423,
"max": 0.2537367838194303,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 4.2047997095129075,
"min": 4.2047997095129075,
"max": 5.835946027846897,
"count": 10
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.488009504e-05,
"min": 1.488009504e-05,
"max": 0.00028482000505999997,
"count": 10
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.00034224218592,
"min": 0.00034224218592,
"max": 0.006266040111319999,
"count": 10
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10496000000000003,
"min": 0.10496000000000003,
"max": 0.19493999999999997,
"count": 10
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 2.4140800000000007,
"min": 2.4140800000000007,
"max": 4.288679999999999,
"count": 10
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00025750400000000003,
"min": 0.00025750400000000003,
"max": 0.004747505999999999,
"count": 10
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.005922592000000001,
"min": 0.005922592000000001,
"max": 0.104445132,
"count": 10
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1703599484",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1703600573"
},
"total": 1089.957315092,
"count": 1,
"self": 0.7371019600000182,
"children": {
"run_training.setup": {
"total": 0.05031633300006888,
"count": 1,
"self": 0.05031633300006888
},
"TrainerController.start_learning": {
"total": 1089.169896799,
"count": 1,
"self": 1.4554743249784678,
"children": {
"TrainerController._reset_env": {
"total": 2.7857125090000636,
"count": 1,
"self": 2.7857125090000636
},
"TrainerController.advance": {
"total": 1084.7899431570218,
"count": 45548,
"self": 0.6593831560323906,
"children": {
"env_step": {
"total": 1084.1305600009894,
"count": 45548,
"self": 686.7062443009918,
"children": {
"SubprocessEnvManager._take_step": {
"total": 396.7267661689973,
"count": 45548,
"self": 3.706804077967263,
"children": {
"TorchPolicy.evaluate": {
"total": 393.01996209103004,
"count": 45548,
"self": 393.01996209103004
}
}
},
"workers": {
"total": 0.6975495310002771,
"count": 45548,
"self": 0.0,
"children": {
"worker_root": {
"total": 1085.8218655810106,
"count": 45548,
"is_parallel": true,
"self": 515.967000327981,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004778807000093366,
"count": 1,
"is_parallel": true,
"self": 0.0032336840000652955,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015451230000280702,
"count": 10,
"is_parallel": true,
"self": 0.0015451230000280702
}
}
},
"UnityEnvironment.step": {
"total": 0.03785008199997719,
"count": 1,
"is_parallel": true,
"self": 0.0007229389999565683,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00041126500002519606,
"count": 1,
"is_parallel": true,
"self": 0.00041126500002519606
},
"communicator.exchange": {
"total": 0.034826715000008335,
"count": 1,
"is_parallel": true,
"self": 0.034826715000008335
},
"steps_from_proto": {
"total": 0.001889162999987093,
"count": 1,
"is_parallel": true,
"self": 0.00036270800023885386,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001526454999748239,
"count": 10,
"is_parallel": true,
"self": 0.001526454999748239
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 569.8548652530295,
"count": 45547,
"is_parallel": true,
"self": 26.79547779401105,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 13.565955926022298,
"count": 45547,
"is_parallel": true,
"self": 13.565955926022298
},
"communicator.exchange": {
"total": 444.3168439539992,
"count": 45547,
"is_parallel": true,
"self": 444.3168439539992
},
"steps_from_proto": {
"total": 85.17658757899699,
"count": 45547,
"is_parallel": true,
"self": 15.796277885033192,
"children": {
"_process_rank_one_or_two_observation": {
"total": 69.3803096939638,
"count": 455470,
"is_parallel": true,
"self": 69.3803096939638
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.000761173000000781,
"count": 1,
"self": 0.000761173000000781,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1074.2896445499428,
"count": 1352597,
"is_parallel": true,
"self": 28.35052806101112,
"children": {
"process_trajectory": {
"total": 489.1180842299324,
"count": 1352597,
"is_parallel": true,
"self": 486.46477100393224,
"children": {
"RLTrainer._checkpoint": {
"total": 2.6533132260001366,
"count": 10,
"is_parallel": true,
"self": 2.6533132260001366
}
}
},
"_update_policy": {
"total": 556.8210322589994,
"count": 227,
"is_parallel": true,
"self": 154.71368056399308,
"children": {
"TorchPPOOptimizer.update": {
"total": 402.1073516950063,
"count": 11574,
"is_parallel": true,
"self": 402.1073516950063
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.13800563499989948,
"count": 1,
"self": 0.0013383019997945667,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13666733300010492,
"count": 1,
"self": 0.13666733300010492
}
}
}
}
}
}
}