|
{
|
|
"name": "root",
|
|
"gauges": {
|
|
"Agent.Policy.Entropy.mean": {
|
|
"value": 1.442776083946228,
|
|
"min": 1.4186757802963257,
|
|
"max": 1.4452122449874878,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.Entropy.sum": {
|
|
"value": 8652.328125,
|
|
"min": 8511.5712890625,
|
|
"max": 8681.615234375,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.TreeDropCount.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.TreeDropCount.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.RechargeEnergyCount.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.RechargeEnergyCount.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.SaveLocationCount.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.SaveLocationCount.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.OutofEnergyCount.mean": {
|
|
"value": 0.6,
|
|
"min": 0.6,
|
|
"max": 0.7333333333333333,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.OutofEnergyCount.sum": {
|
|
"value": 9.0,
|
|
"min": 9.0,
|
|
"max": 11.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.HighestPotentialSoildFound.mean": {
|
|
"value": 0.9559984922409057,
|
|
"min": 0.5225563247998556,
|
|
"max": 0.9999298810958862,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.HighestPotentialSoildFound.sum": {
|
|
"value": 14.339977383613586,
|
|
"min": 9.067141830921173,
|
|
"max": 14.998948216438293,
|
|
"count": 200
|
|
},
|
|
"Agent.Environment.LessonNumber.difficulty.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Environment.LessonNumber.difficulty.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Environment.LessonNumber.task.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Environment.LessonNumber.task.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Environment.EpisodeLength.mean": {
|
|
"value": 399.0,
|
|
"min": 399.0,
|
|
"max": 399.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Environment.EpisodeLength.sum": {
|
|
"value": 5985.0,
|
|
"min": 5985.0,
|
|
"max": 5985.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Step.mean": {
|
|
"value": 1199600.0,
|
|
"min": 5600.0,
|
|
"max": 1199600.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Step.sum": {
|
|
"value": 1199600.0,
|
|
"min": 5600.0,
|
|
"max": 1199600.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.CuriosityValueEstimate.mean": {
|
|
"value": 0.3682994544506073,
|
|
"min": 0.03938337415456772,
|
|
"max": 0.9965881109237671,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.CuriosityValueEstimate.sum": {
|
|
"value": 5.524491786956787,
|
|
"min": 0.5513672232627869,
|
|
"max": 14.948822021484375,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.ExtrinsicValueEstimate.mean": {
|
|
"value": -0.12333516776561737,
|
|
"min": -0.16663993895053864,
|
|
"max": 0.07217133045196533,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.ExtrinsicValueEstimate.sum": {
|
|
"value": -1.8500275611877441,
|
|
"min": -2.499598979949951,
|
|
"max": 1.0103986263275146,
|
|
"count": 200
|
|
},
|
|
"Agent.Environment.CumulativeReward.mean": {
|
|
"value": -0.050596084197362265,
|
|
"min": -0.3522908863212381,
|
|
"max": 7.03871250152588e-05,
|
|
"count": 200
|
|
},
|
|
"Agent.Environment.CumulativeReward.sum": {
|
|
"value": -0.758941262960434,
|
|
"min": -5.219755828380585,
|
|
"max": 0.0010558068752288818,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.CuriosityReward.mean": {
|
|
"value": 1.3816294272740681,
|
|
"min": 0.0,
|
|
"max": 16.28054116566976,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.CuriosityReward.sum": {
|
|
"value": 20.724441409111023,
|
|
"min": 0.0,
|
|
"max": 244.2081174850464,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.ExtrinsicReward.mean": {
|
|
"value": -0.045536668101946516,
|
|
"min": -0.31706198304891586,
|
|
"max": 6.30497932434082e-05,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.ExtrinsicReward.sum": {
|
|
"value": -0.6830500215291977,
|
|
"min": -4.697783201932907,
|
|
"max": 0.000945746898651123,
|
|
"count": 200
|
|
},
|
|
"Agent.IsTraining.mean": {
|
|
"value": 1.0,
|
|
"min": 1.0,
|
|
"max": 1.0,
|
|
"count": 200
|
|
},
|
|
"Agent.IsTraining.sum": {
|
|
"value": 1.0,
|
|
"min": 1.0,
|
|
"max": 1.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Losses.PolicyLoss.mean": {
|
|
"value": 0.020216951612383127,
|
|
"min": 0.015028195979539305,
|
|
"max": 0.03332180882959316,
|
|
"count": 142
|
|
},
|
|
"Agent.Losses.PolicyLoss.sum": {
|
|
"value": 0.020216951612383127,
|
|
"min": 0.015028195979539305,
|
|
"max": 0.03332180882959316,
|
|
"count": 142
|
|
},
|
|
"Agent.Losses.ValueLoss.mean": {
|
|
"value": 0.0038419710375213376,
|
|
"min": 0.0026907013865032545,
|
|
"max": 0.06172032565033684,
|
|
"count": 142
|
|
},
|
|
"Agent.Losses.ValueLoss.sum": {
|
|
"value": 0.0038419710375213376,
|
|
"min": 0.0026907013865032545,
|
|
"max": 0.06172032565033684,
|
|
"count": 142
|
|
},
|
|
"Agent.Policy.LearningRate.mean": {
|
|
"value": 1.8000994000000013e-06,
|
|
"min": 1.8000994000000013e-06,
|
|
"max": 0.0002979000007,
|
|
"count": 142
|
|
},
|
|
"Agent.Policy.LearningRate.sum": {
|
|
"value": 1.8000994000000013e-06,
|
|
"min": 1.8000994000000013e-06,
|
|
"max": 0.0002979000007,
|
|
"count": 142
|
|
},
|
|
"Agent.Policy.Epsilon.mean": {
|
|
"value": 0.10060000000000001,
|
|
"min": 0.10060000000000001,
|
|
"max": 0.1993,
|
|
"count": 142
|
|
},
|
|
"Agent.Policy.Epsilon.sum": {
|
|
"value": 0.10060000000000001,
|
|
"min": 0.10060000000000001,
|
|
"max": 0.1993,
|
|
"count": 142
|
|
},
|
|
"Agent.Policy.Beta.mean": {
|
|
"value": 3.994000000000003e-05,
|
|
"min": 3.994000000000003e-05,
|
|
"max": 0.00496507,
|
|
"count": 142
|
|
},
|
|
"Agent.Policy.Beta.sum": {
|
|
"value": 3.994000000000003e-05,
|
|
"min": 3.994000000000003e-05,
|
|
"max": 0.00496507,
|
|
"count": 142
|
|
},
|
|
"Agent.Losses.CuriosityForwardLoss.mean": {
|
|
"value": 0.03441119488949577,
|
|
"min": 0.029511885329460103,
|
|
"max": 0.5812804649273554,
|
|
"count": 142
|
|
},
|
|
"Agent.Losses.CuriosityForwardLoss.sum": {
|
|
"value": 0.03441119488949577,
|
|
"min": 0.029511885329460103,
|
|
"max": 0.5812804649273554,
|
|
"count": 142
|
|
},
|
|
"Agent.Losses.CuriosityInverseLoss.mean": {
|
|
"value": 2.2547863821188607,
|
|
"min": 2.019305537144343,
|
|
"max": 3.2931356926759086,
|
|
"count": 142
|
|
},
|
|
"Agent.Losses.CuriosityInverseLoss.sum": {
|
|
"value": 2.2547863821188607,
|
|
"min": 2.019305537144343,
|
|
"max": 3.2931356926759086,
|
|
"count": 142
|
|
}
|
|
},
|
|
"metadata": {
|
|
"timer_format_version": "0.1.0",
|
|
"start_time_seconds": "1717618846",
|
|
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
|
|
"command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_4_task_4_run_id_2_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_4_task_4_run_id_2_train --base-port 5007",
|
|
"mlagents_version": "0.30.0",
|
|
"mlagents_envs_version": "0.30.0",
|
|
"communication_protocol_version": "1.5.0",
|
|
"pytorch_version": "1.7.1+cu110",
|
|
"numpy_version": "1.21.0",
|
|
"end_time_seconds": "1717622896"
|
|
},
|
|
"total": 4050.8611982,
|
|
"count": 1,
|
|
"self": 0.28286549999984345,
|
|
"children": {
|
|
"run_training.setup": {
|
|
"total": 0.05722579999999999,
|
|
"count": 1,
|
|
"self": 0.05722579999999999
|
|
},
|
|
"TrainerController.start_learning": {
|
|
"total": 4050.5211069,
|
|
"count": 1,
|
|
"self": 7.755769800099188,
|
|
"children": {
|
|
"TrainerController._reset_env": {
|
|
"total": 2.3042475,
|
|
"count": 1,
|
|
"self": 2.3042475
|
|
},
|
|
"TrainerController.advance": {
|
|
"total": 4040.221172299901,
|
|
"count": 400401,
|
|
"self": 6.915694199725749,
|
|
"children": {
|
|
"env_step": {
|
|
"total": 4033.305478100175,
|
|
"count": 400401,
|
|
"self": 1840.6298471003556,
|
|
"children": {
|
|
"SubprocessEnvManager._take_step": {
|
|
"total": 2187.926601599922,
|
|
"count": 400401,
|
|
"self": 13.67496749983411,
|
|
"children": {
|
|
"TorchPolicy.evaluate": {
|
|
"total": 2174.251634100088,
|
|
"count": 400401,
|
|
"self": 2174.251634100088
|
|
}
|
|
}
|
|
},
|
|
"workers": {
|
|
"total": 4.74902939989744,
|
|
"count": 400401,
|
|
"self": 0.0,
|
|
"children": {
|
|
"worker_root": {
|
|
"total": 4040.942998700102,
|
|
"count": 400401,
|
|
"is_parallel": true,
|
|
"self": 2456.2720338002273,
|
|
"children": {
|
|
"steps_from_proto": {
|
|
"total": 0.006718000000000002,
|
|
"count": 1,
|
|
"is_parallel": true,
|
|
"self": 0.00011049999999990234,
|
|
"children": {
|
|
"_process_maybe_compressed_observation": {
|
|
"total": 0.006556400000000018,
|
|
"count": 2,
|
|
"is_parallel": true,
|
|
"self": 4.21000000001559e-05,
|
|
"children": {
|
|
"_observation_to_np_array": {
|
|
"total": 0.006514299999999862,
|
|
"count": 3,
|
|
"is_parallel": true,
|
|
"self": 3.390000000003113e-05,
|
|
"children": {
|
|
"process_pixels": {
|
|
"total": 0.006480399999999831,
|
|
"count": 3,
|
|
"is_parallel": true,
|
|
"self": 0.0002976999999997343,
|
|
"children": {
|
|
"image_decompress": {
|
|
"total": 0.0061827000000000965,
|
|
"count": 3,
|
|
"is_parallel": true,
|
|
"self": 0.0061827000000000965
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"_process_rank_one_or_two_observation": {
|
|
"total": 5.1100000000081636e-05,
|
|
"count": 2,
|
|
"is_parallel": true,
|
|
"self": 5.1100000000081636e-05
|
|
}
|
|
}
|
|
},
|
|
"UnityEnvironment.step": {
|
|
"total": 1584.6642468998748,
|
|
"count": 400401,
|
|
"is_parallel": true,
|
|
"self": 22.510608400007186,
|
|
"children": {
|
|
"UnityEnvironment._generate_step_input": {
|
|
"total": 23.439472500081244,
|
|
"count": 400401,
|
|
"is_parallel": true,
|
|
"self": 23.439472500081244
|
|
},
|
|
"communicator.exchange": {
|
|
"total": 1355.5917321999057,
|
|
"count": 400401,
|
|
"is_parallel": true,
|
|
"self": 1355.5917321999057
|
|
},
|
|
"steps_from_proto": {
|
|
"total": 183.12243379988087,
|
|
"count": 400401,
|
|
"is_parallel": true,
|
|
"self": 36.406514799877186,
|
|
"children": {
|
|
"_process_maybe_compressed_observation": {
|
|
"total": 131.31438099991735,
|
|
"count": 800802,
|
|
"is_parallel": true,
|
|
"self": 10.005922099962135,
|
|
"children": {
|
|
"_observation_to_np_array": {
|
|
"total": 121.30845889995521,
|
|
"count": 1204206,
|
|
"is_parallel": true,
|
|
"self": 9.702267500006826,
|
|
"children": {
|
|
"process_pixels": {
|
|
"total": 111.60619139994839,
|
|
"count": 1204206,
|
|
"is_parallel": true,
|
|
"self": 51.952398399685826,
|
|
"children": {
|
|
"image_decompress": {
|
|
"total": 59.65379300026256,
|
|
"count": 1204206,
|
|
"is_parallel": true,
|
|
"self": 59.65379300026256
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"_process_rank_one_or_two_observation": {
|
|
"total": 15.40153800008634,
|
|
"count": 800802,
|
|
"is_parallel": true,
|
|
"self": 15.40153800008634
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"trainer_threads": {
|
|
"total": 3.409999999348656e-05,
|
|
"count": 1,
|
|
"self": 3.409999999348656e-05,
|
|
"children": {
|
|
"thread_root": {
|
|
"total": 0.0,
|
|
"count": 0,
|
|
"is_parallel": true,
|
|
"self": 0.0,
|
|
"children": {
|
|
"trainer_advance": {
|
|
"total": 4045.731148299917,
|
|
"count": 197766,
|
|
"is_parallel": true,
|
|
"self": 6.91600229984715,
|
|
"children": {
|
|
"process_trajectory": {
|
|
"total": 3237.875512200067,
|
|
"count": 197766,
|
|
"is_parallel": true,
|
|
"self": 3237.411982600067,
|
|
"children": {
|
|
"RLTrainer._checkpoint": {
|
|
"total": 0.463529600000129,
|
|
"count": 2,
|
|
"is_parallel": true,
|
|
"self": 0.463529600000129
|
|
}
|
|
}
|
|
},
|
|
"_update_policy": {
|
|
"total": 800.9396338000026,
|
|
"count": 142,
|
|
"is_parallel": true,
|
|
"self": 532.7303677000089,
|
|
"children": {
|
|
"TorchPPOOptimizer.update": {
|
|
"total": 268.20926609999384,
|
|
"count": 3408,
|
|
"is_parallel": true,
|
|
"self": 268.20926609999384
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"TrainerController._save_models": {
|
|
"total": 0.23988319999989471,
|
|
"count": 1,
|
|
"self": 0.005443099999865808,
|
|
"children": {
|
|
"RLTrainer._checkpoint": {
|
|
"total": 0.2344401000000289,
|
|
"count": 1,
|
|
"self": 0.2344401000000289
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
} |