refrtzd's picture
First Push
96961ae verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7575081586837769,
"min": 0.7575081586837769,
"max": 2.8493618965148926,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7199.357421875,
"min": 7199.357421875,
"max": 29086.28515625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.186546325683594,
"min": 0.40107688307762146,
"max": 13.186546325683594,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2571.37646484375,
"min": 77.80891418457031,
"max": 2677.221923828125,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06881800351060091,
"min": 0.06112763850636411,
"max": 0.07556200237515584,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27527201404240365,
"min": 0.24451055402545643,
"max": 0.37781001187577923,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2205128619337783,
"min": 0.11057285886878768,
"max": 0.3016151224865633,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8820514477351132,
"min": 0.4422914354751507,
"max": 1.4267888224008036,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.136363636363637,
"min": 3.25,
"max": 26.136363636363637,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1150.0,
"min": 143.0,
"max": 1425.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.136363636363637,
"min": 3.25,
"max": 26.136363636363637,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1150.0,
"min": 143.0,
"max": 1425.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1761235685",
"python_version": "3.10.13 (main, Oct 14 2025, 13:44:10) [GCC 14.2.0]",
"command_line_arguments": "/home/chames/mlagents/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1761236172"
},
"total": 487.61459071299987,
"count": 1,
"self": 0.22112175899997055,
"children": {
"run_training.setup": {
"total": 0.06706255199992484,
"count": 1,
"self": 0.06706255199992484
},
"TrainerController.start_learning": {
"total": 487.326406402,
"count": 1,
"self": 0.3231819800046196,
"children": {
"TrainerController._reset_env": {
"total": 1.9970527509999556,
"count": 1,
"self": 1.9970527509999556
},
"TrainerController.advance": {
"total": 484.92625125699556,
"count": 18192,
"self": 0.339616692977188,
"children": {
"env_step": {
"total": 352.9607629459963,
"count": 18192,
"self": 236.5436825870038,
"children": {
"SubprocessEnvManager._take_step": {
"total": 116.19726923301255,
"count": 18192,
"self": 1.4002444940213081,
"children": {
"TorchPolicy.evaluate": {
"total": 114.79702473899124,
"count": 18192,
"self": 114.79702473899124
}
}
},
"workers": {
"total": 0.2198111259799589,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 486.23131331901277,
"count": 18192,
"is_parallel": true,
"self": 273.3429734400338,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0030211319999580155,
"count": 1,
"is_parallel": true,
"self": 0.0011209419997157966,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001900190000242219,
"count": 10,
"is_parallel": true,
"self": 0.001900190000242219
}
}
},
"UnityEnvironment.step": {
"total": 0.03098493199991026,
"count": 1,
"is_parallel": true,
"self": 0.00039758599996275734,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00015167900005508272,
"count": 1,
"is_parallel": true,
"self": 0.00015167900005508272
},
"communicator.exchange": {
"total": 0.02889334599990434,
"count": 1,
"is_parallel": true,
"self": 0.02889334599990434
},
"steps_from_proto": {
"total": 0.0015423209999880783,
"count": 1,
"is_parallel": true,
"self": 0.0002513989998078614,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012909220001802169,
"count": 10,
"is_parallel": true,
"self": 0.0012909220001802169
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 212.88833987897897,
"count": 18191,
"is_parallel": true,
"self": 4.431364396956269,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.448907880003617,
"count": 18191,
"is_parallel": true,
"self": 2.448907880003617
},
"communicator.exchange": {
"total": 193.0532086459939,
"count": 18191,
"is_parallel": true,
"self": 193.0532086459939
},
"steps_from_proto": {
"total": 12.954858956025191,
"count": 18191,
"is_parallel": true,
"self": 2.769786760085708,
"children": {
"_process_rank_one_or_two_observation": {
"total": 10.185072195939483,
"count": 181910,
"is_parallel": true,
"self": 10.185072195939483
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 131.62587161802207,
"count": 18192,
"self": 0.4091583250385611,
"children": {
"process_trajectory": {
"total": 27.17257055698451,
"count": 18192,
"self": 26.750472057984553,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4220984989999579,
"count": 4,
"self": 0.4220984989999579
}
}
},
"_update_policy": {
"total": 104.044142735999,
"count": 90,
"self": 26.508620627994105,
"children": {
"TorchPPOOptimizer.update": {
"total": 77.5355221080049,
"count": 4587,
"self": 77.5355221080049
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.96999915514607e-07,
"count": 1,
"self": 9.96999915514607e-07
},
"TrainerController._save_models": {
"total": 0.07991941699992822,
"count": 1,
"self": 0.0007081779999680293,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07921123899996019,
"count": 1,
"self": 0.07921123899996019
}
}
}
}
}
}
}