tmoroder's picture
default parameter
ef22c12 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.6983694434165955,
"min": 0.6983694434165955,
"max": 2.844540596008301,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 6637.30322265625,
"min": 6637.30322265625,
"max": 29037.0703125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.24760627746582,
"min": 0.2522730231285095,
"max": 13.24760627746582,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2583.283203125,
"min": 48.94096755981445,
"max": 2692.13818359375,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07032539223626942,
"min": 0.06269248251434571,
"max": 0.07445500510848611,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2813015689450777,
"min": 0.25076993005738285,
"max": 0.3722750255424306,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19148527341438276,
"min": 0.11769408540224985,
"max": 0.29405767149200623,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.765941093657531,
"min": 0.4707763416089994,
"max": 1.4417962350097355,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.795454545454547,
"min": 3.3636363636363638,
"max": 26.272727272727273,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1135.0,
"min": 148.0,
"max": 1445.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.795454545454547,
"min": 3.3636363636363638,
"max": 26.272727272727273,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1135.0,
"min": 148.0,
"max": 1445.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1740871564",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1740872038"
},
"total": 474.245168078,
"count": 1,
"self": 0.757917586999838,
"children": {
"run_training.setup": {
"total": 0.025110328999971898,
"count": 1,
"self": 0.025110328999971898
},
"TrainerController.start_learning": {
"total": 473.4621401620002,
"count": 1,
"self": 0.3832547129861723,
"children": {
"TrainerController._reset_env": {
"total": 3.331458780000048,
"count": 1,
"self": 3.331458780000048
},
"TrainerController.advance": {
"total": 469.6091286690141,
"count": 18192,
"self": 0.38271826201321346,
"children": {
"env_step": {
"total": 331.6286992499902,
"count": 18192,
"self": 253.53884830699508,
"children": {
"SubprocessEnvManager._take_step": {
"total": 77.87400305300162,
"count": 18192,
"self": 1.349073783020458,
"children": {
"TorchPolicy.evaluate": {
"total": 76.52492926998116,
"count": 18192,
"self": 76.52492926998116
}
}
},
"workers": {
"total": 0.21584788999348348,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 471.67904138499625,
"count": 18192,
"is_parallel": true,
"self": 248.4731339500156,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0059892170002058265,
"count": 1,
"is_parallel": true,
"self": 0.004243144999918513,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017460720002873131,
"count": 10,
"is_parallel": true,
"self": 0.0017460720002873131
}
}
},
"UnityEnvironment.step": {
"total": 0.038771520000182136,
"count": 1,
"is_parallel": true,
"self": 0.0006628410001212615,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004164229999332747,
"count": 1,
"is_parallel": true,
"self": 0.0004164229999332747
},
"communicator.exchange": {
"total": 0.03569239600005858,
"count": 1,
"is_parallel": true,
"self": 0.03569239600005858
},
"steps_from_proto": {
"total": 0.0019998600000690203,
"count": 1,
"is_parallel": true,
"self": 0.0003799980001986114,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001619861999870409,
"count": 10,
"is_parallel": true,
"self": 0.001619861999870409
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 223.20590743498065,
"count": 18191,
"is_parallel": true,
"self": 10.780592404988965,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.896382646996926,
"count": 18191,
"is_parallel": true,
"self": 5.896382646996926
},
"communicator.exchange": {
"total": 172.2289598829907,
"count": 18191,
"is_parallel": true,
"self": 172.2289598829907
},
"steps_from_proto": {
"total": 34.29997250000406,
"count": 18191,
"is_parallel": true,
"self": 6.128359274914146,
"children": {
"_process_rank_one_or_two_observation": {
"total": 28.171613225089914,
"count": 181910,
"is_parallel": true,
"self": 28.171613225089914
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 137.59771115701074,
"count": 18192,
"self": 0.4329450930076746,
"children": {
"process_trajectory": {
"total": 29.340661325001747,
"count": 18192,
"self": 28.678115492001552,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6625458330001948,
"count": 4,
"self": 0.6625458330001948
}
}
},
"_update_policy": {
"total": 107.82410473900131,
"count": 90,
"self": 43.7892726940031,
"children": {
"TorchPPOOptimizer.update": {
"total": 64.03483204499821,
"count": 4587,
"self": 64.03483204499821
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0899998414970469e-06,
"count": 1,
"self": 1.0899998414970469e-06
},
"TrainerController._save_models": {
"total": 0.1382969100000082,
"count": 1,
"self": 0.001299213000038435,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13699769699996978,
"count": 1,
"self": 0.13699769699996978
}
}
}
}
}
}
}