casals90's picture
SnowballTarget agent
103ddad
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8986996412277222,
"min": 0.8986996412277222,
"max": 2.5502874851226807,
"count": 4
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 44742.66015625,
"min": 44742.66015625,
"max": 128848.171875,
"count": 4
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 49936.0,
"max": 199984.0,
"count": 4
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 49936.0,
"max": 199984.0,
"count": 4
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.738842964172363,
"min": 2.5263326168060303,
"max": 12.738842964172363,
"count": 4
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 12649.6708984375,
"min": 2506.1220703125,
"max": 12649.6708984375,
"count": 4
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 4
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 48158.0,
"min": 48158.0,
"max": 50347.0,
"count": 4
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.04632755251758212,
"min": 0.04430820485148698,
"max": 0.04865574299783738,
"count": 4
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 1.0192061553868066,
"min": 0.9747805067327135,
"max": 1.1190820889502597,
"count": 4
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21243467427451498,
"min": 0.21243467427451498,
"max": 0.2734953221203624,
"count": 4
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 4.67356283403933,
"min": 4.67356283403933,
"max": 6.290392408768336,
"count": 4
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 5.037608740599999e-05,
"min": 5.037608740599999e-05,
"max": 0.00034957601260600005,
"count": 4
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.0011082739229319998,
"min": 0.0011082739229319998,
"max": 0.007690672277332001,
"count": 4
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.11259400000000003,
"min": 0.11259400000000003,
"max": 0.18739400000000003,
"count": 4
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 2.4770680000000005,
"min": 2.4770680000000005,
"max": 4.122668000000001,
"count": 4
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0006384406,
"min": 0.0006384406,
"max": 0.0043709606,
"count": 4
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.014045693199999999,
"min": 0.014045693199999999,
"max": 0.0961611332,
"count": 4
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.21900826446281,
"min": 8.962809917355372,
"max": 25.21900826446281,
"count": 4
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 6103.0,
"min": 2169.0,
"max": 6130.0,
"count": 4
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.21900826446281,
"min": 8.962809917355372,
"max": 25.21900826446281,
"count": 4
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 6103.0,
"min": 2169.0,
"max": 6130.0,
"count": 4
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 4
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 4
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1685729131",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1685729587"
},
"total": 456.2128257029999,
"count": 1,
"self": 0.4278515199998765,
"children": {
"run_training.setup": {
"total": 0.04437486500000887,
"count": 1,
"self": 0.04437486500000887
},
"TrainerController.start_learning": {
"total": 455.740599318,
"count": 1,
"self": 0.5891577260024405,
"children": {
"TrainerController._reset_env": {
"total": 4.61565690700013,
"count": 1,
"self": 4.61565690700013
},
"TrainerController.advance": {
"total": 450.3961513479976,
"count": 18197,
"self": 0.30243467299783333,
"children": {
"env_step": {
"total": 450.09371667499977,
"count": 18197,
"self": 314.2947311350115,
"children": {
"SubprocessEnvManager._take_step": {
"total": 135.49346667000123,
"count": 18197,
"self": 1.8037477550094536,
"children": {
"TorchPolicy.evaluate": {
"total": 133.68971891499177,
"count": 18197,
"self": 133.68971891499177
}
}
},
"workers": {
"total": 0.30551886998705413,
"count": 18197,
"self": 0.0,
"children": {
"worker_root": {
"total": 454.09914541900457,
"count": 18197,
"is_parallel": true,
"self": 200.9775325099895,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005102915999941615,
"count": 1,
"is_parallel": true,
"self": 0.0035525810001217906,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015503349998198246,
"count": 10,
"is_parallel": true,
"self": 0.0015503349998198246
}
}
},
"UnityEnvironment.step": {
"total": 0.06232388700004776,
"count": 1,
"is_parallel": true,
"self": 0.0006328690001282666,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004490090000217606,
"count": 1,
"is_parallel": true,
"self": 0.0004490090000217606
},
"communicator.exchange": {
"total": 0.05718236799998522,
"count": 1,
"is_parallel": true,
"self": 0.05718236799998522
},
"steps_from_proto": {
"total": 0.004059640999912517,
"count": 1,
"is_parallel": true,
"self": 0.0012361069998405583,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002823534000071959,
"count": 10,
"is_parallel": true,
"self": 0.002823534000071959
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 253.12161290901508,
"count": 18196,
"is_parallel": true,
"self": 10.251260011047634,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.468177977974165,
"count": 18196,
"is_parallel": true,
"self": 5.468177977974165
},
"communicator.exchange": {
"total": 203.28468006399225,
"count": 18196,
"is_parallel": true,
"self": 203.28468006399225
},
"steps_from_proto": {
"total": 34.11749485600103,
"count": 18196,
"is_parallel": true,
"self": 6.5729254689367735,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.544569387064257,
"count": 181960,
"is_parallel": true,
"self": 27.544569387064257
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.898800001588825e-05,
"count": 1,
"self": 8.898800001588825e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 446.73269791298026,
"count": 449755,
"is_parallel": true,
"self": 10.09641651000834,
"children": {
"process_trajectory": {
"total": 261.71174789797215,
"count": 449755,
"is_parallel": true,
"self": 260.39681715597203,
"children": {
"RLTrainer._checkpoint": {
"total": 1.314930742000115,
"count": 2,
"is_parallel": true,
"self": 1.314930742000115
}
}
},
"_update_policy": {
"total": 174.92453350499977,
"count": 90,
"is_parallel": true,
"self": 81.10975437799993,
"children": {
"TorchPPOOptimizer.update": {
"total": 93.81477912699984,
"count": 2160,
"is_parallel": true,
"self": 93.81477912699984
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.13954434899983426,
"count": 1,
"self": 0.0009534879998227552,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1385908610000115,
"count": 1,
"self": 0.1385908610000115
}
}
}
}
}
}
}