philippds's picture
Upload 17 files
3b8b3a0 verified
raw
history blame
16.8 kB
{
"name": "root",
"gauges": {
"Agent.Policy.Entropy.mean": {
"value": 0.7487905025482178,
"min": 0.5864184498786926,
"max": 1.0954581499099731,
"count": 200
},
"Agent.Policy.Entropy.sum": {
"value": 29963.599609375,
"min": 23452.046875,
"max": 43888.4375,
"count": 200
},
"Agent.Environment.LessonNumber.pattern.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.pattern.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Environment.LessonNumber.task.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.Step.mean": {
"value": 7999096.0,
"min": 39096.0,
"max": 7999096.0,
"count": 200
},
"Agent.Step.sum": {
"value": 7999096.0,
"min": 39096.0,
"max": 7999096.0,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.mean": {
"value": 9.750008583068848,
"min": 2.483990430831909,
"max": 9.794022560119629,
"count": 200
},
"Agent.Policy.ExtrinsicValueEstimate.sum": {
"value": 234.00021362304688,
"min": 57.131778717041016,
"max": 235.05654907226562,
"count": 200
},
"Agent.Losses.PolicyLoss.mean": {
"value": 0.05110003270746102,
"min": 0.04344587100497429,
"max": 0.05577823658319494,
"count": 200
},
"Agent.Losses.PolicyLoss.sum": {
"value": 0.15330009812238304,
"min": 0.09584244352057189,
"max": 0.1673347097495848,
"count": 200
},
"Agent.Losses.ValueLoss.mean": {
"value": 0.0048299082340928995,
"min": 0.0035734779088208656,
"max": 4.954870798625052,
"count": 200
},
"Agent.Losses.ValueLoss.sum": {
"value": 0.0144897247022787,
"min": 0.010720433726462596,
"max": 9.909741597250104,
"count": 200
},
"Agent.Policy.LearningRate.mean": {
"value": 8.856997047999973e-07,
"min": 8.856997047999973e-07,
"max": 0.00029907840030719997,
"count": 200
},
"Agent.Policy.LearningRate.sum": {
"value": 2.657099114399992e-06,
"min": 2.657099114399992e-06,
"max": 0.0008936568021144,
"count": 200
},
"Agent.Policy.Epsilon.mean": {
"value": 0.10029520000000001,
"min": 0.10029520000000001,
"max": 0.1996928,
"count": 200
},
"Agent.Policy.Epsilon.sum": {
"value": 0.30088560000000003,
"min": 0.30088560000000003,
"max": 0.5978856,
"count": 200
},
"Agent.Policy.Beta.mean": {
"value": 2.4730479999999958e-05,
"min": 2.4730479999999958e-05,
"max": 0.004984670720000001,
"count": 200
},
"Agent.Policy.Beta.sum": {
"value": 7.419143999999988e-05,
"min": 7.419143999999988e-05,
"max": 0.014894491440000001,
"count": 200
},
"Agent.Environment.EpisodeLength.mean": {
"value": 4999.0,
"min": 4999.0,
"max": 4999.0,
"count": 200
},
"Agent.Environment.EpisodeLength.sum": {
"value": 39992.0,
"min": 39992.0,
"max": 39992.0,
"count": 200
},
"Agent.WindFarmControl.IndividualPerformance.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.WindFarmControl.IndividualPerformance.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 200
},
"Agent.WindFarmControl.AvoidDamageReward.mean": {
"value": 4855.470642089844,
"min": 1496.6440200805664,
"max": 4889.613586425781,
"count": 200
},
"Agent.WindFarmControl.AvoidDamageReward.sum": {
"value": 38843.76513671875,
"min": 11973.152160644531,
"max": 39116.90869140625,
"count": 200
},
"Agent.Environment.CumulativeReward.mean": {
"value": 4851.871337890625,
"min": 1742.0011529922485,
"max": 4893.7439041137695,
"count": 200
},
"Agent.Environment.CumulativeReward.sum": {
"value": 38814.970703125,
"min": 13936.009223937988,
"max": 39149.951232910156,
"count": 200
},
"Agent.Policy.ExtrinsicReward.mean": {
"value": 4851.871337890625,
"min": 1742.0011529922485,
"max": 4893.7439041137695,
"count": 200
},
"Agent.Policy.ExtrinsicReward.sum": {
"value": 38814.970703125,
"min": 13936.009223937988,
"max": 39149.951232910156,
"count": 200
},
"Agent.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"Agent.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1716310229",
"python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/WindFarmControl_pattern_8_task_1_run_id_2_train.yaml --run-id=WindFarmControl/train/WindFarmControl_pattern_8_task_1_run_id_2_train --base-port 5009",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.7.1+cu110",
"numpy_version": "1.21.2",
"end_time_seconds": "1716315736"
},
"total": 5507.6809619000005,
"count": 1,
"self": 0.20196660000056,
"children": {
"run_training.setup": {
"total": 0.061003200000000035,
"count": 1,
"self": 0.061003200000000035
},
"TrainerController.start_learning": {
"total": 5507.4179920999995,
"count": 1,
"self": 18.071718999837685,
"children": {
"TrainerController._reset_env": {
"total": 3.8922267999999995,
"count": 1,
"self": 3.8922267999999995
},
"TrainerController.advance": {
"total": 5485.418861400161,
"count": 1002050,
"self": 15.549165599899425,
"children": {
"env_step": {
"total": 5469.869695800262,
"count": 1002050,
"self": 2110.8445559002266,
"children": {
"SubprocessEnvManager._take_step": {
"total": 3350.797693199916,
"count": 1002050,
"self": 35.539810699322516,
"children": {
"TorchPolicy.evaluate": {
"total": 3315.2578825005935,
"count": 1002050,
"self": 3315.2578825005935
}
}
},
"workers": {
"total": 8.227446700119302,
"count": 1002050,
"self": 0.0,
"children": {
"worker_root": {
"total": 5485.25349300001,
"count": 1002050,
"is_parallel": true,
"self": 4229.318325200127,
"children": {
"steps_from_proto": {
"total": 0.00026780000000004023,
"count": 1,
"is_parallel": true,
"self": 0.0001335000000000086,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00013430000000003162,
"count": 2,
"is_parallel": true,
"self": 0.00013430000000003162
}
}
},
"UnityEnvironment.step": {
"total": 1255.934899999883,
"count": 1002050,
"is_parallel": true,
"self": 58.66727299893978,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 78.03818370012502,
"count": 1002050,
"is_parallel": true,
"self": 78.03818370012502
},
"communicator.exchange": {
"total": 952.499582400736,
"count": 1002050,
"is_parallel": true,
"self": 952.499582400736
},
"steps_from_proto": {
"total": 166.72986090008226,
"count": 1002050,
"is_parallel": true,
"self": 89.93004990013088,
"children": {
"_process_rank_one_or_two_observation": {
"total": 76.79981099995138,
"count": 2004100,
"is_parallel": true,
"self": 76.79981099995138
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.240000023652101e-05,
"count": 1,
"self": 3.240000023652101e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 5501.4550517000525,
"count": 174875,
"is_parallel": true,
"self": 9.243340100028945,
"children": {
"process_trajectory": {
"total": 2972.4364139000236,
"count": 174875,
"is_parallel": true,
"self": 2971.8741748000252,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5622390999982372,
"count": 16,
"is_parallel": true,
"self": 0.5622390999982372
}
}
},
"_update_policy": {
"total": 2519.7752977,
"count": 600,
"is_parallel": true,
"self": 741.8472785000695,
"children": {
"TorchPPOOptimizer.update": {
"total": 1777.9280191999305,
"count": 93600,
"is_parallel": true,
"self": 1777.9280191999305
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.03515250000054948,
"count": 1,
"self": 0.006966900000406895,
"children": {
"RLTrainer._checkpoint": {
"total": 0.028185600000142585,
"count": 1,
"self": 0.028185600000142585
}
}
}
}
}
}
}