rightspeed's picture
First Push
b79289a
raw
history blame contribute delete
No virus
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9304640293121338,
"min": 0.9304640293121338,
"max": 2.864683151245117,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8884.0703125,
"min": 8884.0703125,
"max": 29368.73046875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.998976707458496,
"min": 0.2619020640850067,
"max": 12.998976707458496,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2534.800537109375,
"min": 50.808998107910156,
"max": 2619.743896484375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06747290388653603,
"min": 0.05993395472949344,
"max": 0.07371174484918205,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2698916155461441,
"min": 0.2687006451248867,
"max": 0.3505265610589299,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18791016278898015,
"min": 0.10237762434567856,
"max": 0.2840573507927212,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7516406511559206,
"min": 0.4095104973827142,
"max": 1.362534106946459,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.227272727272727,
"min": 3.1363636363636362,
"max": 26.227272727272727,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1154.0,
"min": 138.0,
"max": 1401.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.227272727272727,
"min": 3.1363636363636362,
"max": 26.227272727272727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1154.0,
"min": 138.0,
"max": 1401.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1691768739",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1691769235"
},
"total": 495.902654257,
"count": 1,
"self": 0.43231169999990016,
"children": {
"run_training.setup": {
"total": 0.06417416199997206,
"count": 1,
"self": 0.06417416199997206
},
"TrainerController.start_learning": {
"total": 495.4061683950001,
"count": 1,
"self": 0.625945258004208,
"children": {
"TrainerController._reset_env": {
"total": 4.782685975000049,
"count": 1,
"self": 4.782685975000049
},
"TrainerController.advance": {
"total": 489.85574652999605,
"count": 18201,
"self": 0.29930609797440866,
"children": {
"env_step": {
"total": 489.55644043202165,
"count": 18201,
"self": 357.241881505007,
"children": {
"SubprocessEnvManager._take_step": {
"total": 132.0050378849993,
"count": 18201,
"self": 2.1227551020020883,
"children": {
"TorchPolicy.evaluate": {
"total": 129.8822827829972,
"count": 18201,
"self": 129.8822827829972
}
}
},
"workers": {
"total": 0.3095210420153762,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 493.65703140099856,
"count": 18201,
"is_parallel": true,
"self": 230.87760585199783,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00679045499998665,
"count": 1,
"is_parallel": true,
"self": 0.004348318000097606,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002442136999889044,
"count": 10,
"is_parallel": true,
"self": 0.002442136999889044
}
}
},
"UnityEnvironment.step": {
"total": 0.05020134799997322,
"count": 1,
"is_parallel": true,
"self": 0.0006146959999568935,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004539369999747578,
"count": 1,
"is_parallel": true,
"self": 0.0004539369999747578
},
"communicator.exchange": {
"total": 0.04686046599999827,
"count": 1,
"is_parallel": true,
"self": 0.04686046599999827
},
"steps_from_proto": {
"total": 0.0022722490000433027,
"count": 1,
"is_parallel": true,
"self": 0.0003912419999778649,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018810070000654378,
"count": 10,
"is_parallel": true,
"self": 0.0018810070000654378
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 262.77942554900073,
"count": 18200,
"is_parallel": true,
"self": 10.964830762991937,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.753320871014466,
"count": 18200,
"is_parallel": true,
"self": 5.753320871014466
},
"communicator.exchange": {
"total": 207.03283610499943,
"count": 18200,
"is_parallel": true,
"self": 207.03283610499943
},
"steps_from_proto": {
"total": 39.0284378099949,
"count": 18200,
"is_parallel": true,
"self": 7.226770461008755,
"children": {
"_process_rank_one_or_two_observation": {
"total": 31.801667348986143,
"count": 182000,
"is_parallel": true,
"self": 31.801667348986143
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00015703499980190827,
"count": 1,
"self": 0.00015703499980190827,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 486.08100372899867,
"count": 454345,
"is_parallel": true,
"self": 10.3427104750142,
"children": {
"process_trajectory": {
"total": 266.2322346979846,
"count": 454345,
"is_parallel": true,
"self": 264.2243464219847,
"children": {
"RLTrainer._checkpoint": {
"total": 2.0078882759999033,
"count": 4,
"is_parallel": true,
"self": 2.0078882759999033
}
}
},
"_update_policy": {
"total": 209.50605855599986,
"count": 90,
"is_parallel": true,
"self": 82.39957589800974,
"children": {
"TorchPPOOptimizer.update": {
"total": 127.10648265799011,
"count": 4584,
"is_parallel": true,
"self": 127.10648265799011
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1416335970000091,
"count": 1,
"self": 0.0008213480000449636,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14081224899996414,
"count": 1,
"self": 0.14081224899996414
}
}
}
}
}
}
}