foreverip's picture
First Push
d5d7d18
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8258373141288757,
"min": 0.8258373141288757,
"max": 2.847506046295166,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7912.34716796875,
"min": 7912.34716796875,
"max": 29192.630859375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.80038070678711,
"min": 0.34684571623802185,
"max": 12.80038070678711,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2496.07421875,
"min": 67.28807067871094,
"max": 2600.479248046875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07154965822316145,
"min": 0.06111196784982966,
"max": 0.07338152895710093,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2861986328926458,
"min": 0.25049444334581494,
"max": 0.356385483324166,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18522895865288436,
"min": 0.13049516201763434,
"max": 0.2671083513136004,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7409158346115374,
"min": 0.5219806480705373,
"max": 1.335541756568002,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.613636363636363,
"min": 3.840909090909091,
"max": 25.613636363636363,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1127.0,
"min": 169.0,
"max": 1395.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.613636363636363,
"min": 3.840909090909091,
"max": 25.613636363636363,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1127.0,
"min": 169.0,
"max": 1395.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1696708820",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cu118",
"numpy_version": "1.21.2",
"end_time_seconds": "1696709346"
},
"total": 526.7983700049999,
"count": 1,
"self": 0.9060613639999247,
"children": {
"run_training.setup": {
"total": 0.046789777999947546,
"count": 1,
"self": 0.046789777999947546
},
"TrainerController.start_learning": {
"total": 525.845518863,
"count": 1,
"self": 0.622770097979128,
"children": {
"TrainerController._reset_env": {
"total": 7.870869069999799,
"count": 1,
"self": 7.870869069999799
},
"TrainerController.advance": {
"total": 517.1878038240211,
"count": 18212,
"self": 0.2972240130359296,
"children": {
"env_step": {
"total": 516.8905798109852,
"count": 18212,
"self": 354.65780514698486,
"children": {
"SubprocessEnvManager._take_step": {
"total": 161.93042774900073,
"count": 18212,
"self": 1.5827789740137632,
"children": {
"TorchPolicy.evaluate": {
"total": 160.34764877498696,
"count": 18212,
"self": 160.34764877498696
}
}
},
"workers": {
"total": 0.302346914999589,
"count": 18212,
"self": 0.0,
"children": {
"worker_root": {
"total": 524.355864914003,
"count": 18212,
"is_parallel": true,
"self": 260.0386864730076,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0023252270000284625,
"count": 1,
"is_parallel": true,
"self": 0.0006870020004043909,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016382249996240716,
"count": 10,
"is_parallel": true,
"self": 0.0016382249996240716
}
}
},
"UnityEnvironment.step": {
"total": 0.0367189139999482,
"count": 1,
"is_parallel": true,
"self": 0.0005760409994763904,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004550130001916841,
"count": 1,
"is_parallel": true,
"self": 0.0004550130001916841
},
"communicator.exchange": {
"total": 0.033351167000091664,
"count": 1,
"is_parallel": true,
"self": 0.033351167000091664
},
"steps_from_proto": {
"total": 0.002336693000188461,
"count": 1,
"is_parallel": true,
"self": 0.00042729799997687223,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001909395000211589,
"count": 10,
"is_parallel": true,
"self": 0.001909395000211589
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 264.31717844099535,
"count": 18211,
"is_parallel": true,
"self": 11.221985315048641,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.894398654983661,
"count": 18211,
"is_parallel": true,
"self": 5.894398654983661
},
"communicator.exchange": {
"total": 205.32908093198512,
"count": 18211,
"is_parallel": true,
"self": 205.32908093198512
},
"steps_from_proto": {
"total": 41.87171353897793,
"count": 18211,
"is_parallel": true,
"self": 7.731201313978772,
"children": {
"_process_rank_one_or_two_observation": {
"total": 34.140512224999156,
"count": 182110,
"is_parallel": true,
"self": 34.140512224999156
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001678420001098857,
"count": 1,
"self": 0.0001678420001098857,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 512.738639983952,
"count": 530548,
"is_parallel": true,
"self": 11.86151506993997,
"children": {
"process_trajectory": {
"total": 283.12046174901184,
"count": 530548,
"is_parallel": true,
"self": 281.92309063501193,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1973711139999068,
"count": 4,
"is_parallel": true,
"self": 1.1973711139999068
}
}
},
"_update_policy": {
"total": 217.7566631650002,
"count": 90,
"is_parallel": true,
"self": 84.56245451800737,
"children": {
"TorchPPOOptimizer.update": {
"total": 133.19420864699282,
"count": 4584,
"is_parallel": true,
"self": 133.19420864699282
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.16390802899991286,
"count": 1,
"self": 0.0012625009999283066,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16264552799998455,
"count": 1,
"self": 0.16264552799998455
}
}
}
}
}
}
}