Dharkelf's picture
First Push
061d6a7
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0414718389511108,
"min": 1.0414718389511108,
"max": 2.8502416610717773,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9943.97265625,
"min": 9943.97265625,
"max": 29189.32421875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.204896926879883,
"min": 0.45432907342910767,
"max": 13.204896926879883,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2574.954833984375,
"min": 88.13983917236328,
"max": 2686.681396484375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0698429983844375,
"min": 0.06459338415892596,
"max": 0.07085835429660031,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27937199353775,
"min": 0.25837353663570384,
"max": 0.35429177148300156,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2140042748521356,
"min": 0.14186652419260046,
"max": 0.2811530264423174,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8560170994085424,
"min": 0.5674660967704018,
"max": 1.2452054215704693,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000007e-06,
"min": 8.082097306000007e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400003e-05,
"min": 3.232838922400003e-05,
"max": 0.0013851600382799997,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.9617200000000001,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.363636363636363,
"min": 3.75,
"max": 26.363636363636363,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1160.0,
"min": 165.0,
"max": 1428.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.363636363636363,
"min": 3.75,
"max": 26.363636363636363,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1160.0,
"min": 165.0,
"max": 1428.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673511308",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673511966"
},
"total": 658.586456501,
"count": 1,
"self": 0.38953070300010495,
"children": {
"run_training.setup": {
"total": 0.11720312399995692,
"count": 1,
"self": 0.11720312399995692
},
"TrainerController.start_learning": {
"total": 658.079722674,
"count": 1,
"self": 0.5404757710037984,
"children": {
"TrainerController._reset_env": {
"total": 7.094584716999975,
"count": 1,
"self": 7.094584716999975
},
"TrainerController.advance": {
"total": 650.3012418149962,
"count": 18204,
"self": 0.2871906029911315,
"children": {
"env_step": {
"total": 650.014051212005,
"count": 18204,
"self": 487.4433901079865,
"children": {
"SubprocessEnvManager._take_step": {
"total": 162.27097741300827,
"count": 18204,
"self": 1.5159552140104324,
"children": {
"TorchPolicy.evaluate": {
"total": 160.75502219899784,
"count": 18204,
"self": 36.84152610199385,
"children": {
"TorchPolicy.sample_actions": {
"total": 123.91349609700399,
"count": 18204,
"self": 123.91349609700399
}
}
}
}
},
"workers": {
"total": 0.2996836910102729,
"count": 18204,
"self": 0.0,
"children": {
"worker_root": {
"total": 656.7026968100074,
"count": 18204,
"is_parallel": true,
"self": 417.25616778200856,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0063227890000234765,
"count": 1,
"is_parallel": true,
"self": 0.0038553970000521076,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002467391999971369,
"count": 10,
"is_parallel": true,
"self": 0.002467391999971369
}
}
},
"UnityEnvironment.step": {
"total": 0.03558719700004076,
"count": 1,
"is_parallel": true,
"self": 0.0005068640001582025,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00031959699992967217,
"count": 1,
"is_parallel": true,
"self": 0.00031959699992967217
},
"communicator.exchange": {
"total": 0.03260466200003975,
"count": 1,
"is_parallel": true,
"self": 0.03260466200003975
},
"steps_from_proto": {
"total": 0.002156073999913133,
"count": 1,
"is_parallel": true,
"self": 0.0006410149999283021,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015150589999848307,
"count": 10,
"is_parallel": true,
"self": 0.0015150589999848307
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 239.44652902799885,
"count": 18203,
"is_parallel": true,
"self": 9.2148596470023,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.803007293997894,
"count": 18203,
"is_parallel": true,
"self": 5.803007293997894
},
"communicator.exchange": {
"total": 189.66599462599686,
"count": 18203,
"is_parallel": true,
"self": 189.66599462599686
},
"steps_from_proto": {
"total": 34.7626674610018,
"count": 18203,
"is_parallel": true,
"self": 7.547676220986659,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.214991240015138,
"count": 182030,
"is_parallel": true,
"self": 27.214991240015138
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.275000007211929e-05,
"count": 1,
"self": 7.275000007211929e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 646.8539535160261,
"count": 365146,
"is_parallel": true,
"self": 10.35338517705145,
"children": {
"process_trajectory": {
"total": 260.8336727469733,
"count": 365146,
"is_parallel": true,
"self": 260.0319802579734,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8016924889999473,
"count": 4,
"is_parallel": true,
"self": 0.8016924889999473
}
}
},
"_update_policy": {
"total": 375.6668955920013,
"count": 90,
"is_parallel": true,
"self": 146.8037407939969,
"children": {
"TorchPPOOptimizer.update": {
"total": 228.86315479800442,
"count": 15290,
"is_parallel": true,
"self": 228.86315479800442
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.14334762099997533,
"count": 1,
"self": 0.0010179660000630975,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14232965499991224,
"count": 1,
"self": 0.14232965499991224
}
}
}
}
}
}
}