|
{
|
|
"name": "root",
|
|
"gauges": {
|
|
"Agent.Policy.Entropy.mean": {
|
|
"value": 1.4533461332321167,
|
|
"min": 1.4189385175704956,
|
|
"max": 1.4572324752807617,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.Entropy.sum": {
|
|
"value": 9068.8798828125,
|
|
"min": 7246.6962890625,
|
|
"max": 9839.8515625,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.TreeDropCount.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.TreeDropCount.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.RechargeEnergyCount.mean": {
|
|
"value": 129.76190476190476,
|
|
"min": 0.0,
|
|
"max": 417.93333333333334,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.RechargeEnergyCount.sum": {
|
|
"value": 2725.0,
|
|
"min": 0.0,
|
|
"max": 6269.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.SaveLocationCount.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.SaveLocationCount.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.OutofEnergyCount.mean": {
|
|
"value": 0.6190476190476191,
|
|
"min": 0.4444444444444444,
|
|
"max": 0.8,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.OutofEnergyCount.sum": {
|
|
"value": 13.0,
|
|
"min": 7.0,
|
|
"max": 15.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Environment.LessonNumber.difficulty.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Environment.LessonNumber.difficulty.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Environment.LessonNumber.task.mean": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Environment.LessonNumber.task.sum": {
|
|
"value": 0.0,
|
|
"min": 0.0,
|
|
"max": 0.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Environment.EpisodeLength.mean": {
|
|
"value": 346.3333333333333,
|
|
"min": 276.14285714285717,
|
|
"max": 399.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Environment.EpisodeLength.sum": {
|
|
"value": 6234.0,
|
|
"min": 4956.0,
|
|
"max": 6765.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Step.mean": {
|
|
"value": 1199610.0,
|
|
"min": 5600.0,
|
|
"max": 1199610.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Step.sum": {
|
|
"value": 1199610.0,
|
|
"min": 5600.0,
|
|
"max": 1199610.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.CuriosityValueEstimate.mean": {
|
|
"value": 0.18385379016399384,
|
|
"min": 0.0292599406093359,
|
|
"max": 1.0435168743133545,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.CuriosityValueEstimate.sum": {
|
|
"value": 3.125514507293701,
|
|
"min": 0.43889909982681274,
|
|
"max": 15.652752876281738,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.ExtrinsicValueEstimate.mean": {
|
|
"value": 0.8692796230316162,
|
|
"min": -0.172968327999115,
|
|
"max": 3.2063863277435303,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.ExtrinsicValueEstimate.sum": {
|
|
"value": 14.777753829956055,
|
|
"min": -2.940021514892578,
|
|
"max": 67.28934478759766,
|
|
"count": 200
|
|
},
|
|
"Agent.Environment.CumulativeReward.mean": {
|
|
"value": 10.825707225238576,
|
|
"min": -1.132599985599518,
|
|
"max": 30.65860773701417,
|
|
"count": 200
|
|
},
|
|
"Agent.Environment.CumulativeReward.sum": {
|
|
"value": 184.0370228290558,
|
|
"min": -16.988999783992767,
|
|
"max": 582.5135470032692,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.CuriosityReward.mean": {
|
|
"value": 0.6476288959383965,
|
|
"min": 0.0,
|
|
"max": 14.209401581022474,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.CuriosityReward.sum": {
|
|
"value": 11.00969123095274,
|
|
"min": 0.0,
|
|
"max": 255.76922845840454,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.ExtrinsicReward.mean": {
|
|
"value": 9.74313505432185,
|
|
"min": -1.0193402210871378,
|
|
"max": 27.592743119126872,
|
|
"count": 200
|
|
},
|
|
"Agent.Policy.ExtrinsicReward.sum": {
|
|
"value": 165.63329592347145,
|
|
"min": -15.290103316307068,
|
|
"max": 524.2621192634106,
|
|
"count": 200
|
|
},
|
|
"Agent.IsTraining.mean": {
|
|
"value": 1.0,
|
|
"min": 1.0,
|
|
"max": 1.0,
|
|
"count": 200
|
|
},
|
|
"Agent.IsTraining.sum": {
|
|
"value": 1.0,
|
|
"min": 1.0,
|
|
"max": 1.0,
|
|
"count": 200
|
|
},
|
|
"Agent.Losses.PolicyLoss.mean": {
|
|
"value": 0.02661372983129695,
|
|
"min": 0.014345233794301748,
|
|
"max": 0.03552510698015491,
|
|
"count": 139
|
|
},
|
|
"Agent.Losses.PolicyLoss.sum": {
|
|
"value": 0.02661372983129695,
|
|
"min": 0.014345233794301748,
|
|
"max": 0.03552510698015491,
|
|
"count": 139
|
|
},
|
|
"Agent.Losses.ValueLoss.mean": {
|
|
"value": 7.1942349672317505,
|
|
"min": 0.0016999954968923703,
|
|
"max": 21.090327545448584,
|
|
"count": 139
|
|
},
|
|
"Agent.Losses.ValueLoss.sum": {
|
|
"value": 7.1942349672317505,
|
|
"min": 0.0016999954968923703,
|
|
"max": 21.090327545448584,
|
|
"count": 139
|
|
},
|
|
"Agent.Policy.LearningRate.mean": {
|
|
"value": 1.660599446499987e-06,
|
|
"min": 1.660599446499987e-06,
|
|
"max": 0.0002979000007,
|
|
"count": 139
|
|
},
|
|
"Agent.Policy.LearningRate.sum": {
|
|
"value": 1.660599446499987e-06,
|
|
"min": 1.660599446499987e-06,
|
|
"max": 0.0002979000007,
|
|
"count": 139
|
|
},
|
|
"Agent.Policy.Epsilon.mean": {
|
|
"value": 0.1005535,
|
|
"min": 0.1005535,
|
|
"max": 0.1993,
|
|
"count": 139
|
|
},
|
|
"Agent.Policy.Epsilon.sum": {
|
|
"value": 0.1005535,
|
|
"min": 0.1005535,
|
|
"max": 0.1993,
|
|
"count": 139
|
|
},
|
|
"Agent.Policy.Beta.mean": {
|
|
"value": 3.761964999999979e-05,
|
|
"min": 3.761964999999979e-05,
|
|
"max": 0.00496507,
|
|
"count": 139
|
|
},
|
|
"Agent.Policy.Beta.sum": {
|
|
"value": 3.761964999999979e-05,
|
|
"min": 3.761964999999979e-05,
|
|
"max": 0.00496507,
|
|
"count": 139
|
|
},
|
|
"Agent.Losses.CuriosityForwardLoss.mean": {
|
|
"value": 0.017956590512767434,
|
|
"min": 0.017066249158233404,
|
|
"max": 0.5835290277997652,
|
|
"count": 139
|
|
},
|
|
"Agent.Losses.CuriosityForwardLoss.sum": {
|
|
"value": 0.017956590512767434,
|
|
"min": 0.017066249158233404,
|
|
"max": 0.5835290277997652,
|
|
"count": 139
|
|
},
|
|
"Agent.Losses.CuriosityInverseLoss.mean": {
|
|
"value": 2.0348302821318307,
|
|
"min": 1.9834658404191334,
|
|
"max": 3.310828596353531,
|
|
"count": 139
|
|
},
|
|
"Agent.Losses.CuriosityInverseLoss.sum": {
|
|
"value": 2.0348302821318307,
|
|
"min": 1.9834658404191334,
|
|
"max": 3.310828596353531,
|
|
"count": 139
|
|
}
|
|
},
|
|
"metadata": {
|
|
"timer_format_version": "0.1.0",
|
|
"start_time_seconds": "1717574791",
|
|
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
|
|
"command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_3_task_2_run_id_2_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_3_task_2_run_id_2_train --base-port 5007",
|
|
"mlagents_version": "0.30.0",
|
|
"mlagents_envs_version": "0.30.0",
|
|
"communication_protocol_version": "1.5.0",
|
|
"pytorch_version": "1.7.1+cu110",
|
|
"numpy_version": "1.21.0",
|
|
"end_time_seconds": "1717579534"
|
|
},
|
|
"total": 4742.8268369,
|
|
"count": 1,
|
|
"self": 0.32466740000018035,
|
|
"children": {
|
|
"run_training.setup": {
|
|
"total": 0.07259170000000004,
|
|
"count": 1,
|
|
"self": 0.07259170000000004
|
|
},
|
|
"TrainerController.start_learning": {
|
|
"total": 4742.4295778,
|
|
"count": 1,
|
|
"self": 8.336508300056266,
|
|
"children": {
|
|
"TrainerController._reset_env": {
|
|
"total": 2.2312725,
|
|
"count": 1,
|
|
"self": 2.2312725
|
|
},
|
|
"TrainerController.advance": {
|
|
"total": 4731.6206696999425,
|
|
"count": 401158,
|
|
"self": 7.422396399857462,
|
|
"children": {
|
|
"env_step": {
|
|
"total": 4724.198273300085,
|
|
"count": 401158,
|
|
"self": 1947.7122418999506,
|
|
"children": {
|
|
"SubprocessEnvManager._take_step": {
|
|
"total": 2771.474913300064,
|
|
"count": 401158,
|
|
"self": 13.969147400131988,
|
|
"children": {
|
|
"TorchPolicy.evaluate": {
|
|
"total": 2757.505765899932,
|
|
"count": 400270,
|
|
"self": 2757.505765899932
|
|
}
|
|
}
|
|
},
|
|
"workers": {
|
|
"total": 5.011118100070284,
|
|
"count": 401158,
|
|
"self": 0.0,
|
|
"children": {
|
|
"worker_root": {
|
|
"total": 4732.350145099952,
|
|
"count": 401158,
|
|
"is_parallel": true,
|
|
"self": 3067.838198099922,
|
|
"children": {
|
|
"steps_from_proto": {
|
|
"total": 0.006916900000000004,
|
|
"count": 1,
|
|
"is_parallel": true,
|
|
"self": 0.00010650000000000936,
|
|
"children": {
|
|
"_process_maybe_compressed_observation": {
|
|
"total": 0.006762600000000063,
|
|
"count": 2,
|
|
"is_parallel": true,
|
|
"self": 3.790000000014615e-05,
|
|
"children": {
|
|
"_observation_to_np_array": {
|
|
"total": 0.006724699999999917,
|
|
"count": 3,
|
|
"is_parallel": true,
|
|
"self": 3.399999999986747e-05,
|
|
"children": {
|
|
"process_pixels": {
|
|
"total": 0.006690700000000049,
|
|
"count": 3,
|
|
"is_parallel": true,
|
|
"self": 0.000247200000000003,
|
|
"children": {
|
|
"image_decompress": {
|
|
"total": 0.006443500000000046,
|
|
"count": 3,
|
|
"is_parallel": true,
|
|
"self": 0.006443500000000046
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"_process_rank_one_or_two_observation": {
|
|
"total": 4.779999999993123e-05,
|
|
"count": 2,
|
|
"is_parallel": true,
|
|
"self": 4.779999999993123e-05
|
|
}
|
|
}
|
|
},
|
|
"UnityEnvironment.step": {
|
|
"total": 1664.5050301000297,
|
|
"count": 401158,
|
|
"is_parallel": true,
|
|
"self": 26.92025999966586,
|
|
"children": {
|
|
"UnityEnvironment._generate_step_input": {
|
|
"total": 26.473373000047772,
|
|
"count": 401158,
|
|
"is_parallel": true,
|
|
"self": 26.473373000047772
|
|
},
|
|
"communicator.exchange": {
|
|
"total": 1405.283343300271,
|
|
"count": 401158,
|
|
"is_parallel": true,
|
|
"self": 1405.283343300271
|
|
},
|
|
"steps_from_proto": {
|
|
"total": 205.8280538000449,
|
|
"count": 401158,
|
|
"is_parallel": true,
|
|
"self": 41.25461419988886,
|
|
"children": {
|
|
"_process_maybe_compressed_observation": {
|
|
"total": 146.472787500226,
|
|
"count": 802316,
|
|
"is_parallel": true,
|
|
"self": 11.28791419999743,
|
|
"children": {
|
|
"_observation_to_np_array": {
|
|
"total": 135.18487330022856,
|
|
"count": 1204062,
|
|
"is_parallel": true,
|
|
"self": 11.567830200627753,
|
|
"children": {
|
|
"process_pixels": {
|
|
"total": 123.6170430996008,
|
|
"count": 1204062,
|
|
"is_parallel": true,
|
|
"self": 58.48534459972299,
|
|
"children": {
|
|
"image_decompress": {
|
|
"total": 65.13169849987781,
|
|
"count": 1204062,
|
|
"is_parallel": true,
|
|
"self": 65.13169849987781
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"_process_rank_one_or_two_observation": {
|
|
"total": 18.100652099930034,
|
|
"count": 802316,
|
|
"is_parallel": true,
|
|
"self": 18.100652099930034
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"trainer_threads": {
|
|
"total": 3.49000001733657e-05,
|
|
"count": 1,
|
|
"self": 3.49000001733657e-05,
|
|
"children": {
|
|
"thread_root": {
|
|
"total": 0.0,
|
|
"count": 0,
|
|
"is_parallel": true,
|
|
"self": 0.0,
|
|
"children": {
|
|
"trainer_advance": {
|
|
"total": 4736.830238500035,
|
|
"count": 228938,
|
|
"is_parallel": true,
|
|
"self": 9.691872500030513,
|
|
"children": {
|
|
"process_trajectory": {
|
|
"total": 3745.0100708000027,
|
|
"count": 228938,
|
|
"is_parallel": true,
|
|
"self": 3744.3179560000026,
|
|
"children": {
|
|
"RLTrainer._checkpoint": {
|
|
"total": 0.6921147999998993,
|
|
"count": 2,
|
|
"is_parallel": true,
|
|
"self": 0.6921147999998993
|
|
}
|
|
}
|
|
},
|
|
"_update_policy": {
|
|
"total": 982.128295200002,
|
|
"count": 139,
|
|
"is_parallel": true,
|
|
"self": 654.2106892000122,
|
|
"children": {
|
|
"TorchPPOOptimizer.update": {
|
|
"total": 327.9176059999897,
|
|
"count": 3357,
|
|
"is_parallel": true,
|
|
"self": 327.9176059999897
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
},
|
|
"TrainerController._save_models": {
|
|
"total": 0.24109240000052523,
|
|
"count": 1,
|
|
"self": 0.006454300000768853,
|
|
"children": {
|
|
"RLTrainer._checkpoint": {
|
|
"total": 0.23463809999975638,
|
|
"count": 1,
|
|
"self": 0.23463809999975638
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
} |