philippds's picture
Upload 13 files
603fdc6 verified
{
"name": "root",
"gauges": {
"Agent.Policy.Entropy.mean": {
"value": 1.4193772077560425,
"min": 1.4186962842941284,
"max": 1.4354259967803955,
"count": 200
},
"Agent.Policy.Entropy.sum": {
"value": 8520.521484375,
"min": 8508.05859375,
"max": 8615.6083984375,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.TreeDropCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.RechargeEnergyCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.SaveLocationCount.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.mean": {
"value": 0.6,
"min": 0.6,
"max": 0.7333333333333333,
"count": 200
},
"Agent.DroneBasedReforestation.OutofEnergyCount.sum": {
"value": 9.0,
"min": 9.0,
"max": 11.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.DroneBasedReforestation.HighestPotentialSoildFound.mean": {
"value": 0.9997279206911723,
"min": 0.5225563247998556,
"max": 0.9998562773068745,
"count": 200
},
"Agent.DroneBasedReforestation.HighestPotentialSoildFound.sum": {
"value": 14.995918810367584,
"min": 9.4060138463974,
"max": 14.997844159603119,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.difficulty.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.EpisodeLength.mean": {
"value": 399.0,
"min": 399.0,
"max": 399.0,
"count": 200
},
"Agent.Environment.EpisodeLength.sum": {
"value": 5985.0,
"min": 5985.0,
"max": 5985.0,
"count": 200
},
"Agent.Step.mean": {
"value": 1199600.0,
"min": 5600.0,
"max": 1199600.0,
"count": 200
},
"Agent.Step.sum": {
"value": 1199600.0,
"min": 5600.0,
"max": 1199600.0,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.mean": {
"value": 0.3334868252277374,
"min": 0.03938337415456772,
"max": 0.9802345633506775,
"count": 200
},
"Agent.Policy.CuriosityValueEstimate.sum": {
"value": 5.002302169799805,
"min": 0.5513672232627869,
"max": 14.703518867492676,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.10327214747667313,
"min": -0.14949025213718414,
"max": 0.07217133045196533,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.sum": {
"value": -1.5490821599960327,
"min": -2.242353677749634,
"max": 1.0103986263275146,
"count": 200
},
"Agent.Environment.CumulativeReward.mean": {
"value": -0.00010599493980407715,
"min": -0.3522908863212381,
"max": 4.736582438151042e-06,
"count": 200
},
"Agent.Environment.CumulativeReward.sum": {
"value": -0.0015899240970611572,
"min": -4.9320724084973335,
"max": 7.104873657226562e-05,
"count": 200
},
"Agent.Policy.CuriosityReward.mean": {
"value": 1.2333391984303792,
"min": 0.0,
"max": 15.041553815205893,
"count": 200
},
"Agent.Policy.CuriosityReward.sum": {
"value": 18.50008797645569,
"min": 0.0,
"max": 225.62330722808838,
"count": 200
},
"Agent.Policy.ExtrinsicReward.mean": {
"value": -9.558399518330893e-05,
"min": -0.31706198304891586,
"max": 4.112720489501953e-06,
"count": 200
},
"Agent.Policy.ExtrinsicReward.sum": {
"value": -0.0014337599277496338,
"min": -4.438867762684822,
"max": 6.16908073425293e-05,
"count": 200
},
"Agent.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.Losses.PolicyLoss.mean": {
"value": 0.022889179022361834,
"min": 0.014444681427751979,
"max": 0.03420288699756687,
"count": 142
},
"Agent.Losses.PolicyLoss.sum": {
"value": 0.022889179022361834,
"min": 0.014444681427751979,
"max": 0.03420288699756687,
"count": 142
},
"Agent.Losses.ValueLoss.mean": {
"value": 0.003770941150529931,
"min": 0.002689642928695927,
"max": 0.05873268924187869,
"count": 142
},
"Agent.Losses.ValueLoss.sum": {
"value": 0.003770941150529931,
"min": 0.002689642928695927,
"max": 0.05873268924187869,
"count": 142
},
"Agent.Policy.LearningRate.mean": {
"value": 1.8000994000000013e-06,
"min": 1.8000994000000013e-06,
"max": 0.0002979000007,
"count": 142
},
"Agent.Policy.LearningRate.sum": {
"value": 1.8000994000000013e-06,
"min": 1.8000994000000013e-06,
"max": 0.0002979000007,
"count": 142
},
"Agent.Policy.Epsilon.mean": {
"value": 0.10060000000000001,
"min": 0.10060000000000001,
"max": 0.1993,
"count": 142
},
"Agent.Policy.Epsilon.sum": {
"value": 0.10060000000000001,
"min": 0.10060000000000001,
"max": 0.1993,
"count": 142
},
"Agent.Policy.Beta.mean": {
"value": 3.994000000000003e-05,
"min": 3.994000000000003e-05,
"max": 0.00496507,
"count": 142
},
"Agent.Policy.Beta.sum": {
"value": 3.994000000000003e-05,
"min": 3.994000000000003e-05,
"max": 0.00496507,
"count": 142
},
"Agent.Losses.CuriosityForwardLoss.mean": {
"value": 0.031225076410919428,
"min": 0.02829677751287818,
"max": 0.5812804649273554,
"count": 142
},
"Agent.Losses.CuriosityForwardLoss.sum": {
"value": 0.031225076410919428,
"min": 0.02829677751287818,
"max": 0.5812804649273554,
"count": 142
},
"Agent.Losses.CuriosityInverseLoss.mean": {
"value": 1.9004575858513515,
"min": 1.8592951099077861,
"max": 3.2931356926759086,
"count": 142
},
"Agent.Losses.CuriosityInverseLoss.sum": {
"value": 1.9004575858513515,
"min": 1.8592951099077861,
"max": 3.2931356926759086,
"count": 142
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1716776066",
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_1_task_4_run_id_0_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_1_task_4_run_id_0_train --base-port 5007",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.7.1+cu110",
"numpy_version": "1.21.0",
"end_time_seconds": "1716781138"
},
"total": 5071.6772444,
"count": 1,
"self": 0.2956878000004508,
"children": {
"run_training.setup": {
"total": 0.06335230000000003,
"count": 1,
"self": 0.06335230000000003
},
"TrainerController.start_learning": {
"total": 5071.3182043,
"count": 1,
"self": 7.166758099958315,
"children": {
"TrainerController._reset_env": {
"total": 4.1228065,
"count": 1,
"self": 4.1228065
},
"TrainerController.advance": {
"total": 5059.793163300043,
"count": 400402,
"self": 6.639534500118316,
"children": {
"env_step": {
"total": 5053.153628799924,
"count": 400402,
"self": 1947.4372845001544,
"children": {
"SubprocessEnvManager._take_step": {
"total": 3101.0268160998935,
"count": 400402,
"self": 13.266732400064484,
"children": {
"TorchPolicy.evaluate": {
"total": 3087.760083699829,
"count": 400402,
"self": 3087.760083699829
}
}
},
"workers": {
"total": 4.689528199876511,
"count": 400402,
"self": 0.0,
"children": {
"worker_root": {
"total": 5059.336825199737,
"count": 400402,
"is_parallel": true,
"self": 3381.5229891998642,
"children": {
"steps_from_proto": {
"total": 0.007475900000000202,
"count": 1,
"is_parallel": true,
"self": 0.00010810000000072151,
"children": {
"_process_maybe_compressed_observation": {
"total": 0.007317299999999971,
"count": 2,
"is_parallel": true,
"self": 4.110000000023817e-05,
"children": {
"_observation_to_np_array": {
"total": 0.007276199999999733,
"count": 3,
"is_parallel": true,
"self": 3.329999999968081e-05,
"children": {
"process_pixels": {
"total": 0.007242900000000052,
"count": 3,
"is_parallel": true,
"self": 0.00026730000000041443,
"children": {
"image_decompress": {
"total": 0.006975599999999638,
"count": 3,
"is_parallel": true,
"self": 0.006975599999999638
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 5.049999999950927e-05,
"count": 2,
"is_parallel": true,
"self": 5.049999999950927e-05
}
}
},
"UnityEnvironment.step": {
"total": 1677.806360099873,
"count": 400402,
"is_parallel": true,
"self": 21.533279499881473,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 21.852638000068005,
"count": 400402,
"is_parallel": true,
"self": 21.852638000068005
},
"communicator.exchange": {
"total": 1466.5887027000063,
"count": 400402,
"is_parallel": true,
"self": 1466.5887027000063
},
"steps_from_proto": {
"total": 167.8317398999172,
"count": 400402,
"is_parallel": true,
"self": 33.63285719959893,
"children": {
"_process_maybe_compressed_observation": {
"total": 119.80653640001671,
"count": 800804,
"is_parallel": true,
"self": 9.213807699939565,
"children": {
"_observation_to_np_array": {
"total": 110.59272870007715,
"count": 1204209,
"is_parallel": true,
"self": 9.156373099962806,
"children": {
"process_pixels": {
"total": 101.43635560011434,
"count": 1204209,
"is_parallel": true,
"self": 47.665081700038726,
"children": {
"image_decompress": {
"total": 53.77127390007561,
"count": 1204209,
"is_parallel": true,
"self": 53.77127390007561
}
}
}
}
}
}
},
"_process_rank_one_or_two_observation": {
"total": 14.392346300301565,
"count": 800804,
"is_parallel": true,
"self": 14.392346300301565
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.719999974942766e-05,
"count": 1,
"self": 2.719999974942766e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 5064.692878899909,
"count": 245638,
"is_parallel": true,
"self": 6.951058099828515,
"children": {
"process_trajectory": {
"total": 3996.2298547000823,
"count": 245638,
"is_parallel": true,
"self": 3995.6926148000816,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5372399000007135,
"count": 2,
"is_parallel": true,
"self": 0.5372399000007135
}
}
},
"_update_policy": {
"total": 1061.5119660999987,
"count": 142,
"is_parallel": true,
"self": 700.1020618000041,
"children": {
"TorchPPOOptimizer.update": {
"total": 361.40990429999465,
"count": 3408,
"is_parallel": true,
"self": 361.40990429999465
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.23544919999949343,
"count": 1,
"self": 0.004898299999695155,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23055089999979828,
"count": 1,
"self": 0.23055089999979828
}
}
}
}
}
}
}