philippds's picture
Upload 13 files
da94d76 verified
raw
history blame
26.8 kB
{
"name": "root",
"gauges": {
"Agent.Policy.Entropy.mean": {
"value": 1.3203582763671875,
"min": 1.3203582763671875,
"max": 1.4230849742889404,
"count": 200
},
"Agent.Policy.Entropy.sum": {
"value": 7981.56591796875,
"min": 6405.69921875,
"max": 9538.6259765625,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.mean": {
"value": 0.014285714285714285,
"min": 0.0,
"max": 0.23076923076923078,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.sum": {
"value": 3.0,
"min": 0.0,
"max": 9.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.EpisodeLength.mean": {
"value": 27.84285714285714,
"min": 19.02247191011236,
"max": 248.22222222222223,
"count": 200
},
"Agent.Environment.EpisodeLength.sum": {
"value": 5847.0,
"min": 4659.0,
"max": 6936.0,
"count": 200
},
"Agent.Step.mean": {
"value": 1199994.0,
"min": 5913.0,
"max": 1199994.0,
"count": 200
},
"Agent.Step.sum": {
"value": 1199994.0,
"min": 5913.0,
"max": 1199994.0,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.mean": {
"value": 0.2455085813999176,
"min": -0.45354989171028137,
"max": 0.9412897825241089,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.sum": {
"value": 51.0657844543457,
"min": -66.2182846069336,
"max": 183.70339965820312,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.mean": {
"value": 73.94851684570312,
"min": 0.07052389532327652,
"max": 83.03712463378906,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.sum": {
"value": 15381.291015625,
"min": 1.7630974054336548,
"max": 21857.748046875,
"count": 200
},
"Agent.Environment.CumulativeReward.mean": {
"value": 97.52531419282045,
"min": 57.09107523841354,
"max": 105.92347007686129,
"count": 200
},
"Agent.Environment.CumulativeReward.sum": {
"value": 20285.265352106653,
"min": 1484.367956198752,
"max": 29285.44507972675,
"count": 200
},
"Agent.Policy.CuriosityReward.mean": {
"value": 0.07286154175660788,
"min": 0.0,
"max": 9.22925870693647,
"count": 200
},
"Agent.Policy.CuriosityReward.sum": {
"value": 15.155200685374439,
"min": 0.0,
"max": 239.9607263803482,
"count": 200
},
"Agent.Policy.ExtrinsicReward.mean": {
"value": 87.77278065584743,
"min": 51.381964386082615,
"max": 95.33112049452387,
"count": 200
},
"Agent.Policy.ExtrinsicReward.sum": {
"value": 18256.738376416266,
"min": 1335.931074038148,
"max": 26356.89976554393,
"count": 200
},
"Agent.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.Losses.PolicyLoss.mean": {
"value": 0.029771444698174793,
"min": 0.014351546531543136,
"max": 0.03230706377265354,
"count": 144
},
"Agent.Losses.PolicyLoss.sum": {
"value": 0.029771444698174793,
"min": 0.014351546531543136,
"max": 0.03230706377265354,
"count": 144
},
"Agent.Losses.ValueLoss.mean": {
"value": 1322.8896026611328,
"min": 218.00844383239746,
"max": 2543.290812174479,
"count": 144
},
"Agent.Losses.ValueLoss.sum": {
"value": 1322.8896026611328,
"min": 218.00844383239746,
"max": 2543.290812174479,
"count": 144
},
"Agent.Policy.LearningRate.mean": {
"value": 3.48099884000015e-07,
"min": 3.48099884000015e-07,
"max": 0.00029789475070175,
"count": 144
},
"Agent.Policy.LearningRate.sum": {
"value": 3.48099884000015e-07,
"min": 3.48099884000015e-07,
"max": 0.00029789475070175,
"count": 144
},
"Agent.Policy.Epsilon.mean": {
"value": 0.10011600000000002,
"min": 0.10011600000000002,
"max": 0.19929825,
"count": 144
},
"Agent.Policy.Epsilon.sum": {
"value": 0.10011600000000002,
"min": 0.10011600000000002,
"max": 0.19929825,
"count": 144
},
"Agent.Policy.Beta.mean": {
"value": 1.578840000000025e-05,
"min": 1.578840000000025e-05,
"max": 0.004964982675,
"count": 144
},
"Agent.Policy.Beta.sum": {
"value": 1.578840000000025e-05,
"min": 1.578840000000025e-05,
"max": 0.004964982675,
"count": 144
},
"Agent.Losses.CuriosityForwardLoss.mean": {
"value": 0.028019882505759597,
"min": 0.02158251474611461,
"max": 0.572342399507761,
"count": 144
},
"Agent.Losses.CuriosityForwardLoss.sum": {
"value": 0.028019882505759597,
"min": 0.02158251474611461,
"max": 0.572342399507761,
"count": 144
},
"Agent.Losses.CuriosityInverseLoss.mean": {
"value": 1.4992835571368535,
"min": 1.2533991237481434,
"max": 3.3059381445248923,
"count": 144
},
"Agent.Losses.CuriosityInverseLoss.sum": {
"value": 1.4992835571368535,
"min": 1.2533991237481434,
"max": 3.3059381445248923,
"count": 144
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1716628503",
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_1_task_1_run_id_1_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_1_task_1_run_id_1_train --base-port 5007",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.7.1+cu110",
"numpy_version": "1.21.0",
"end_time_seconds": "1716634946"
},
"total": 6442.752453900001,
"count": 1,
"self": 2.219137300001421,
"children": {
"run_training.setup": {
"total": 0.05183649999999995,
"count": 1,
"self": 0.05183649999999995
},
"TrainerController.start_learning": {
"total": 6440.481480099999,
"count": 1,
"self": 7.312115699965943,
"children": {
"TrainerController._reset_env": {
"total": 2.0995172,
"count": 1,
"self": 2.0995172
},
"TrainerController.advance": {
"total": 6430.865352400033,
"count": 410409,
"self": 7.2899541001361285,
"children": {
"env_step": {
"total": 6423.575398299897,
"count": 410409,
"self": 3992.6428585004273,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2425.7739042996877,
"count": 410409,
"self": 13.34329899986642,
"children": {
"TorchPolicy.evaluate": {
"total": 2412.4306052998213,
"count": 400022,
"self": 2412.4306052998213
}
}
},
"workers": {
"total": 5.15863549978179,
"count": 410409,
"self": 0.0,
"children": {
"worker_root": {
"total": 6430.833114899772,
"count": 410409,
"is_parallel": true,
"self": 2805.3006044999697,
"children": {
"steps_from_proto": {
"total": 0.006264999999999965,
"count": 1,
"is_parallel": true,
"self": 0.00010100000000012876,
"children": {
"_process_maybe_compressed_observation": {
"total": 0.006118599999999974,
"count": 2,
"is_parallel": true,
"self": 4.039999999982946e-05,
"children": {
"_observation_to_np_array": {
"total": 0.006078200000000145,
"count": 3,
"is_parallel": true,
"self": 3.0299999999927607e-05,
"children": {
"process_pixels": {
"total": 0.006047900000000217,
"count": 3,
"is_parallel": true,
"self": 0.00023530000000038243,
"children": {
"image_decompress": {
"total": 0.0058125999999998346,
"count": 3,
"is_parallel": true,
"self": 0.0058125999999998346
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 4.539999999986222e-05,
"count": 2,
"is_parallel": true,
"self": 4.539999999986222e-05
}
}
},
"UnityEnvironment.step": {
"total": 3625.5262453998025,
"count": 410409,
"is_parallel": true,
"self": 24.969074599661326,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.12903800021271,
"count": 410409,
"is_parallel": true,
"self": 23.12903800021271
},
"communicator.exchange": {
"total": 3390.5878296999513,
"count": 410409,
"is_parallel": true,
"self": 3390.5878296999513
},
"steps_from_proto": {
"total": 186.8403030999769,
"count": 410409,
"is_parallel": true,
"self": 36.92467929988027,
"children": {
"_process_maybe_compressed_observation": {
"total": 134.00122640034093,
"count": 820818,
"is_parallel": true,
"self": 10.428723500491543,
"children": {
"_observation_to_np_array": {
"total": 123.57250289984938,
"count": 1239024,
"is_parallel": true,
"self": 9.911986300518194,
"children": {
"process_pixels": {
"total": 113.66051659933119,
"count": 1239024,
"is_parallel": true,
"self": 53.21384719912152,
"children": {
"image_decompress": {
"total": 60.44666940020967,
"count": 1239024,
"is_parallel": true,
"self": 60.44666940020967
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 15.914397399755694,
"count": 820818,
"is_parallel": true,
"self": 15.914397399755694
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.090000038559083e-05,
"count": 1,
"self": 4.090000038559083e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 6434.990600099994,
"count": 311306,
"is_parallel": true,
"self": 8.984891500132107,
"children": {
"process_trajectory": {
"total": 5594.213934999861,
"count": 311306,
"is_parallel": true,
"self": 5593.71791929986,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4960157000004983,
"count": 2,
"is_parallel": true,
"self": 0.4960157000004983
}
}
},
"_update_policy": {
"total": 831.7917736000009,
"count": 144,
"is_parallel": true,
"self": 554.0815228999735,
"children": {
"TorchPPOOptimizer.update": {
"total": 277.71025070002736,
"count": 3462,
"is_parallel": true,
"self": 277.71025070002736
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.2044538999998622,
"count": 1,
"self": 0.006551699999363336,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19790220000049885,
"count": 1,
"self": 0.19790220000049885
}
}
}
}
}
}
}