philippds commited on
Commit
033326b
1 Parent(s): 784e87e

Upload 13 files

Browse files
Agent.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7d2e7cbdd2fc441e405e175370794344a649c8b53465918e12ed083447678f0
3
+ size 562656
Agent/Agent-1200735.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7d2e7cbdd2fc441e405e175370794344a649c8b53465918e12ed083447678f0
3
+ size 562656
Agent/Agent-1200735.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3582d95360f166bc77c480aec43de918902049bb4e511cbf96a900068b904d02
3
+ size 4530031
Agent/Agent-499734.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa9e4092684f4b724bb8b55c906563ef4cc4f933969c9397cb03d5534fe86685
3
+ size 562656
Agent/Agent-499734.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea268df91be327de01cee0142b5b62844eb669299d5c29d847ba380dea0a5f08
3
+ size 4530031
Agent/Agent-999693.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f36ccccaf9c1b36cb82f00afe125980ab530f9f6711a657a6f6f5388a50b911f
3
+ size 562656
Agent/Agent-999693.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0498d6836c1a7f5745943cf3029e956bcbcecc154dfaeb901d0b23de272d3105
3
+ size 4530031
Agent/checkpoint.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3582d95360f166bc77c480aec43de918902049bb4e511cbf96a900068b904d02
3
+ size 4530031
Agent/events.out.tfevents.1717382230.RICHARD.21952.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e345aa73ac4458549a655a2a86d4e752c33d5219489e04c11966de0d0b65fd58
3
+ size 2356439
README.md ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: hivex
3
+ original_train_name: DroneBasedReforestation_difficulty_9_task_0_run_id_1_train
4
+ tags:
5
+ - hivex
6
+ - hivex-drone-based-reforestation
7
+ - reinforcement-learning
8
+ - multi-agent-reinforcement-learning
9
+ model-index:
10
+ - name: hivex-DBR-PPO-baseline-task-0-difficulty-9
11
+ results:
12
+ - task:
13
+ type: main-task
14
+ name: main_task
15
+ task-id: 0
16
+ difficulty-id: 9
17
+ dataset:
18
+ name: hivex-drone-based-reforestation
19
+ type: hivex-drone-based-reforestation
20
+ metrics:
21
+ - type: cumulative_distance_reward
22
+ value: 1.9881950914859772 +/- 0.6435197796866906
23
+ name: Cumulative Distance Reward
24
+ verified: true
25
+ - type: cumulative_distance_until_tree_drop
26
+ value: 62.70152519226074 +/- 15.481708378361306
27
+ name: Cumulative Distance Until Tree Drop
28
+ verified: true
29
+ - type: cumulative_distance_to_existing_trees
30
+ value: 65.97381935119628 +/- 11.85813603247749
31
+ name: Cumulative Distance to Existing Trees
32
+ verified: true
33
+ - type: cumulative_normalized_distance_until_tree_drop
34
+ value: 0.19881950929760933 +/- 0.06435197701542571
35
+ name: Cumulative Normalized Distance Until Tree Drop
36
+ verified: true
37
+ - type: cumulative_tree_drop_reward
38
+ value: 5.003359270095825 +/- 1.691449588116139
39
+ name: Cumulative Tree Drop Reward
40
+ verified: true
41
+ - type: out_of_energy_count
42
+ value: 0.9511428594589233 +/- 0.05597489611462524
43
+ name: Out of Energy Count
44
+ verified: true
45
+ - type: recharge_energy_count
46
+ value: 10.313650798797607 +/- 1.3015464035988247
47
+ name: Recharge Energy Count
48
+ verified: true
49
+ - type: tree_drop_count
50
+ value: 1.0218095362186432 +/- 0.05570651145686611
51
+ name: Tree Drop Count
52
+ verified: true
53
+ - type: cumulative_reward
54
+ value: 7.543602123260498 +/- 2.5896056315352975
55
+ name: Cumulative Reward
56
+ verified: true
57
+ ---
58
+
59
+ This model serves as the baseline for the **Drone-Based Reforestation** environment, trained and tested on task <code>0</code> with difficulty <code>9</code> using the Proximal Policy Optimization (PPO) algorithm.<br><br>Environment: **Drone-Based Reforestation**<br>Task: <code>0</code><br>Difficulty: <code>9</code><br>Algorithm: <code>PPO</code><br>Episode Length: <code>2000</code><br>Training <code>max_steps</code>: <code>1200000</code><br>Testing <code>max_steps</code>: <code>300000</code><br><br>Train & Test [Scripts](https://github.com/hivex-research/hivex)<br>Download the [Environment](https://github.com/hivex-research/hivex-environments)
configuration.yaml ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ default_settings: null
2
+ behaviors:
3
+ Agent:
4
+ trainer_type: ppo
5
+ hyperparameters:
6
+ batch_size: 1024
7
+ buffer_size: 8192
8
+ learning_rate: 0.0003
9
+ beta: 0.005
10
+ epsilon: 0.2
11
+ lambd: 0.95
12
+ num_epoch: 3
13
+ shared_critic: false
14
+ learning_rate_schedule: linear
15
+ beta_schedule: linear
16
+ epsilon_schedule: linear
17
+ network_settings:
18
+ normalize: false
19
+ hidden_units: 128
20
+ num_layers: 2
21
+ vis_encode_type: resnet
22
+ memory: null
23
+ goal_conditioning_type: hyper
24
+ deterministic: false
25
+ reward_signals:
26
+ curiosity:
27
+ gamma: 0.99
28
+ strength: 0.1
29
+ network_settings:
30
+ normalize: false
31
+ hidden_units: 128
32
+ num_layers: 2
33
+ vis_encode_type: resnet
34
+ memory: null
35
+ goal_conditioning_type: hyper
36
+ deterministic: false
37
+ learning_rate: 0.0003
38
+ encoding_size: 256
39
+ extrinsic:
40
+ gamma: 0.99
41
+ strength: 0.9
42
+ network_settings:
43
+ normalize: false
44
+ hidden_units: 128
45
+ num_layers: 2
46
+ vis_encode_type: resnet
47
+ memory: null
48
+ goal_conditioning_type: hyper
49
+ deterministic: false
50
+ init_path: null
51
+ keep_checkpoints: 5
52
+ checkpoint_interval: 500000
53
+ max_steps: 1200000
54
+ time_horizon: 8192
55
+ summary_freq: 6000
56
+ threaded: true
57
+ self_play: null
58
+ behavioral_cloning: null
59
+ env_settings:
60
+ env_path: c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/dev_environments/Hivex_DroneBasedReforestation_win
61
+ env_args: null
62
+ base_port: 5007
63
+ num_envs: 1
64
+ num_areas: 1
65
+ seed: 5000
66
+ max_lifetime_restarts: 10
67
+ restarts_rate_limit_n: 1
68
+ restarts_rate_limit_period_s: 60
69
+ engine_settings:
70
+ width: 84
71
+ height: 84
72
+ quality_level: 5
73
+ time_scale: 20
74
+ target_frame_rate: -1
75
+ capture_frame_rate: 60
76
+ no_graphics: true
77
+ environment_parameters:
78
+ difficulty:
79
+ curriculum:
80
+ - value:
81
+ sampler_type: constant
82
+ sampler_parameters:
83
+ seed: 5000
84
+ value: 9
85
+ name: difficulty
86
+ completion_criteria: null
87
+ task:
88
+ curriculum:
89
+ - value:
90
+ sampler_type: constant
91
+ sampler_parameters:
92
+ seed: 5001
93
+ value: 0
94
+ name: task
95
+ completion_criteria: null
96
+ checkpoint_settings:
97
+ run_id: DroneBasedReforestation/train/DroneBasedReforestation_difficulty_9_task_0_run_id_1_train
98
+ initialize_from: null
99
+ load_model: false
100
+ resume: false
101
+ force: false
102
+ train_model: false
103
+ inference: false
104
+ results_dir: results
105
+ torch_settings:
106
+ device: null
107
+ debug: false
run_logs/timers.json ADDED
@@ -0,0 +1,575 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "root",
3
+ "gauges": {
4
+ "Agent.Policy.Entropy.mean": {
5
+ "value": 1.4405452013015747,
6
+ "min": 1.4220114946365356,
7
+ "max": 1.4441288709640503,
8
+ "count": 200
9
+ },
10
+ "Agent.Policy.Entropy.sum": {
11
+ "value": 8306.18359375,
12
+ "min": 7607.1845703125,
13
+ "max": 10276.486328125,
14
+ "count": 200
15
+ },
16
+ "Agent.DroneBasedReforestation.TreeDropCount.mean": {
17
+ "value": 1.0,
18
+ "min": 0.7777777777777778,
19
+ "max": 1.5333333333333334,
20
+ "count": 200
21
+ },
22
+ "Agent.DroneBasedReforestation.TreeDropCount.sum": {
23
+ "value": 15.0,
24
+ "min": 12.0,
25
+ "max": 23.0,
26
+ "count": 200
27
+ },
28
+ "Agent.DroneBasedReforestation.RechargeEnergyCount.mean": {
29
+ "value": 8.733333333333333,
30
+ "min": 8.6,
31
+ "max": 60.333333333333336,
32
+ "count": 200
33
+ },
34
+ "Agent.DroneBasedReforestation.RechargeEnergyCount.sum": {
35
+ "value": 131.0,
36
+ "min": 129.0,
37
+ "max": 905.0,
38
+ "count": 200
39
+ },
40
+ "Agent.DroneBasedReforestation.SaveLocationCount.mean": {
41
+ "value": 0.0,
42
+ "min": 0.0,
43
+ "max": 0.0,
44
+ "count": 200
45
+ },
46
+ "Agent.DroneBasedReforestation.SaveLocationCount.sum": {
47
+ "value": 0.0,
48
+ "min": 0.0,
49
+ "max": 0.0,
50
+ "count": 200
51
+ },
52
+ "Agent.DroneBasedReforestation.OutofEnergyCount.mean": {
53
+ "value": 1.0,
54
+ "min": 0.4666666666666667,
55
+ "max": 1.0,
56
+ "count": 200
57
+ },
58
+ "Agent.DroneBasedReforestation.OutofEnergyCount.sum": {
59
+ "value": 15.0,
60
+ "min": 7.0,
61
+ "max": 18.0,
62
+ "count": 200
63
+ },
64
+ "Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.mean": {
65
+ "value": 62.02187042236328,
66
+ "min": 18.203270212809244,
67
+ "max": 111.73464266459148,
68
+ "count": 200
69
+ },
70
+ "Agent.DroneBasedReforestation.CumulativeDistanceUntilTreeDrop.sum": {
71
+ "value": 930.3280563354492,
72
+ "min": 273.0490531921387,
73
+ "max": 1735.9881439208984,
74
+ "count": 200
75
+ },
76
+ "Agent.DroneBasedReforestation.CumulativeTreeDropReward.mean": {
77
+ "value": 7.596216059724489,
78
+ "min": 0.13252323203616673,
79
+ "max": 12.408484570185344,
80
+ "count": 200
81
+ },
82
+ "Agent.DroneBasedReforestation.CumulativeTreeDropReward.sum": {
83
+ "value": 113.94324089586735,
84
+ "min": 2.385418176651001,
85
+ "max": 217.52278470993042,
86
+ "count": 200
87
+ },
88
+ "Agent.DroneBasedReforestation.CumulativeDistanceReward.mean": {
89
+ "value": 2.0654015143712363,
90
+ "min": 0.08087263504664104,
91
+ "max": 3.434613621234894,
92
+ "count": 200
93
+ },
94
+ "Agent.DroneBasedReforestation.CumulativeDistanceReward.sum": {
95
+ "value": 30.981022715568542,
96
+ "min": 1.4557074308395386,
97
+ "max": 58.23595070838928,
98
+ "count": 200
99
+ },
100
+ "Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.mean": {
101
+ "value": 0.20654015143712362,
102
+ "min": 0.008087263339095645,
103
+ "max": 0.3434613659977913,
104
+ "count": 200
105
+ },
106
+ "Agent.DroneBasedReforestation.CumulativeNormalizedDistanceUntilTreeDrop.sum": {
107
+ "value": 3.0981022715568542,
108
+ "min": 0.14557074010372162,
109
+ "max": 5.823595106601715,
110
+ "count": 200
111
+ },
112
+ "Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.mean": {
113
+ "value": 54.73593068122864,
114
+ "min": 25.3091146879726,
115
+ "max": 117.90112050374348,
116
+ "count": 200
117
+ },
118
+ "Agent.DroneBasedReforestation.CumulativeDistancetoExistingTrees.sum": {
119
+ "value": 821.0389602184296,
120
+ "min": 455.5640643835068,
121
+ "max": 1768.5168075561523,
122
+ "count": 200
123
+ },
124
+ "Agent.Environment.LessonNumber.difficulty.mean": {
125
+ "value": 0.0,
126
+ "min": 0.0,
127
+ "max": 0.0,
128
+ "count": 200
129
+ },
130
+ "Agent.Environment.LessonNumber.difficulty.sum": {
131
+ "value": 0.0,
132
+ "min": 0.0,
133
+ "max": 0.0,
134
+ "count": 200
135
+ },
136
+ "Agent.Environment.LessonNumber.task.mean": {
137
+ "value": 0.0,
138
+ "min": 0.0,
139
+ "max": 0.0,
140
+ "count": 200
141
+ },
142
+ "Agent.Environment.LessonNumber.task.sum": {
143
+ "value": 0.0,
144
+ "min": 0.0,
145
+ "max": 0.0,
146
+ "count": 200
147
+ },
148
+ "Agent.Environment.EpisodeLength.mean": {
149
+ "value": 383.2,
150
+ "min": 333.5,
151
+ "max": 399.0,
152
+ "count": 200
153
+ },
154
+ "Agent.Environment.EpisodeLength.sum": {
155
+ "value": 5748.0,
156
+ "min": 5316.0,
157
+ "max": 7122.0,
158
+ "count": 200
159
+ },
160
+ "Agent.Step.mean": {
161
+ "value": 1199945.0,
162
+ "min": 5987.0,
163
+ "max": 1199945.0,
164
+ "count": 200
165
+ },
166
+ "Agent.Step.sum": {
167
+ "value": 1199945.0,
168
+ "min": 5987.0,
169
+ "max": 1199945.0,
170
+ "count": 200
171
+ },
172
+ "Agent.Policy.CuriosityValueEstimate.mean": {
173
+ "value": 0.426647812128067,
174
+ "min": 0.02268255315721035,
175
+ "max": 0.973943829536438,
176
+ "count": 200
177
+ },
178
+ "Agent.Policy.CuriosityValueEstimate.sum": {
179
+ "value": 6.826364994049072,
180
+ "min": 0.3402383029460907,
181
+ "max": 16.583505630493164,
182
+ "count": 200
183
+ },
184
+ "Agent.Policy.ExtrinsicValueEstimate.mean": {
185
+ "value": 0.46396204829216003,
186
+ "min": 0.06568823009729385,
187
+ "max": 1.6678794622421265,
188
+ "count": 200
189
+ },
190
+ "Agent.Policy.ExtrinsicValueEstimate.sum": {
191
+ "value": 7.4233927726745605,
192
+ "min": 1.1166999340057373,
193
+ "max": 28.35395050048828,
194
+ "count": 200
195
+ },
196
+ "Agent.Environment.CumulativeReward.mean": {
197
+ "value": 8.072101179510355,
198
+ "min": 0.5743498722712199,
199
+ "max": 14.941251850128173,
200
+ "count": 200
201
+ },
202
+ "Agent.Environment.CumulativeReward.sum": {
203
+ "value": 129.15361887216568,
204
+ "min": 8.615248084068298,
205
+ "max": 224.1187777519226,
206
+ "count": 200
207
+ },
208
+ "Agent.Policy.CuriosityReward.mean": {
209
+ "value": 1.2897327467799187,
210
+ "min": 0.0,
211
+ "max": 13.70919558207194,
212
+ "count": 200
213
+ },
214
+ "Agent.Policy.CuriosityReward.sum": {
215
+ "value": 20.6357239484787,
216
+ "min": 0.0,
217
+ "max": 205.6379337310791,
218
+ "count": 200
219
+ },
220
+ "Agent.Policy.ExtrinsicReward.mean": {
221
+ "value": 7.264889972284436,
222
+ "min": 0.5169146299362183,
223
+ "max": 13.447124989827474,
224
+ "count": 200
225
+ },
226
+ "Agent.Policy.ExtrinsicReward.sum": {
227
+ "value": 116.23823955655098,
228
+ "min": 7.753719449043274,
229
+ "max": 201.7068748474121,
230
+ "count": 200
231
+ },
232
+ "Agent.IsTraining.mean": {
233
+ "value": 1.0,
234
+ "min": 1.0,
235
+ "max": 1.0,
236
+ "count": 200
237
+ },
238
+ "Agent.IsTraining.sum": {
239
+ "value": 1.0,
240
+ "min": 1.0,
241
+ "max": 1.0,
242
+ "count": 200
243
+ },
244
+ "Agent.Losses.PolicyLoss.mean": {
245
+ "value": 0.024178701685741544,
246
+ "min": 0.015746597200632095,
247
+ "max": 0.035641532908711165,
248
+ "count": 136
249
+ },
250
+ "Agent.Losses.PolicyLoss.sum": {
251
+ "value": 0.024178701685741544,
252
+ "min": 0.015746597200632095,
253
+ "max": 0.035641532908711165,
254
+ "count": 136
255
+ },
256
+ "Agent.Losses.ValueLoss.mean": {
257
+ "value": 0.8647272114952406,
258
+ "min": 0.12877223547548056,
259
+ "max": 1.371308876408471,
260
+ "count": 136
261
+ },
262
+ "Agent.Losses.ValueLoss.sum": {
263
+ "value": 0.8647272114952406,
264
+ "min": 0.12877223547548056,
265
+ "max": 1.371308876408471,
266
+ "count": 136
267
+ },
268
+ "Agent.Policy.LearningRate.mean": {
269
+ "value": 1.8480993840000163e-06,
270
+ "min": 1.8480993840000163e-06,
271
+ "max": 0.00029780325073225,
272
+ "count": 136
273
+ },
274
+ "Agent.Policy.LearningRate.sum": {
275
+ "value": 1.8480993840000163e-06,
276
+ "min": 1.8480993840000163e-06,
277
+ "max": 0.00029780325073225,
278
+ "count": 136
279
+ },
280
+ "Agent.Policy.Epsilon.mean": {
281
+ "value": 0.100616,
282
+ "min": 0.100616,
283
+ "max": 0.19926775000000005,
284
+ "count": 136
285
+ },
286
+ "Agent.Policy.Epsilon.sum": {
287
+ "value": 0.100616,
288
+ "min": 0.100616,
289
+ "max": 0.19926775000000005,
290
+ "count": 136
291
+ },
292
+ "Agent.Policy.Beta.mean": {
293
+ "value": 4.073840000000028e-05,
294
+ "min": 4.073840000000028e-05,
295
+ "max": 0.004963460725,
296
+ "count": 136
297
+ },
298
+ "Agent.Policy.Beta.sum": {
299
+ "value": 4.073840000000028e-05,
300
+ "min": 4.073840000000028e-05,
301
+ "max": 0.004963460725,
302
+ "count": 136
303
+ },
304
+ "Agent.Losses.CuriosityForwardLoss.mean": {
305
+ "value": 0.035057641876240574,
306
+ "min": 0.03353164764121175,
307
+ "max": 0.6027635087569555,
308
+ "count": 136
309
+ },
310
+ "Agent.Losses.CuriosityForwardLoss.sum": {
311
+ "value": 0.035057641876240574,
312
+ "min": 0.03353164764121175,
313
+ "max": 0.6027635087569555,
314
+ "count": 136
315
+ },
316
+ "Agent.Losses.CuriosityInverseLoss.mean": {
317
+ "value": 1.815239240725835,
318
+ "min": 1.766872525215149,
319
+ "max": 3.315477500359217,
320
+ "count": 136
321
+ },
322
+ "Agent.Losses.CuriosityInverseLoss.sum": {
323
+ "value": 1.815239240725835,
324
+ "min": 1.766872525215149,
325
+ "max": 3.315477500359217,
326
+ "count": 136
327
+ }
328
+ },
329
+ "metadata": {
330
+ "timer_format_version": "0.1.0",
331
+ "start_time_seconds": "1717382228",
332
+ "python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
333
+ "command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/DroneBasedReforestation_difficulty_9_task_0_run_id_1_train.yaml --run-id=DroneBasedReforestation/train/DroneBasedReforestation_difficulty_9_task_0_run_id_1_train --base-port 5007",
334
+ "mlagents_version": "0.30.0",
335
+ "mlagents_envs_version": "0.30.0",
336
+ "communication_protocol_version": "1.5.0",
337
+ "pytorch_version": "1.7.1+cu110",
338
+ "numpy_version": "1.21.0",
339
+ "end_time_seconds": "1717385903"
340
+ },
341
+ "total": 3674.7049556,
342
+ "count": 1,
343
+ "self": 0.2661654000003182,
344
+ "children": {
345
+ "run_training.setup": {
346
+ "total": 0.052535,
347
+ "count": 1,
348
+ "self": 0.052535
349
+ },
350
+ "TrainerController.start_learning": {
351
+ "total": 3674.3862552,
352
+ "count": 1,
353
+ "self": 7.290390299856426,
354
+ "children": {
355
+ "TrainerController._reset_env": {
356
+ "total": 2.041863,
357
+ "count": 1,
358
+ "self": 2.041863
359
+ },
360
+ "TrainerController.advance": {
361
+ "total": 3664.8988267001437,
362
+ "count": 401208,
363
+ "self": 6.674391100080811,
364
+ "children": {
365
+ "env_step": {
366
+ "total": 3658.224435600063,
367
+ "count": 401208,
368
+ "self": 1754.6349792000276,
369
+ "children": {
370
+ "SubprocessEnvManager._take_step": {
371
+ "total": 1899.230050900079,
372
+ "count": 401208,
373
+ "self": 12.695854500179621,
374
+ "children": {
375
+ "TorchPolicy.evaluate": {
376
+ "total": 1886.5341963998994,
377
+ "count": 400248,
378
+ "self": 1886.5341963998994
379
+ }
380
+ }
381
+ },
382
+ "workers": {
383
+ "total": 4.359405499956263,
384
+ "count": 401208,
385
+ "self": 0.0,
386
+ "children": {
387
+ "worker_root": {
388
+ "total": 3665.6144427000363,
389
+ "count": 401208,
390
+ "is_parallel": true,
391
+ "self": 2142.4204017000457,
392
+ "children": {
393
+ "steps_from_proto": {
394
+ "total": 0.0064805999999999475,
395
+ "count": 1,
396
+ "is_parallel": true,
397
+ "self": 9.839999999972093e-05,
398
+ "children": {
399
+ "_process_maybe_compressed_observation": {
400
+ "total": 0.006319900000000045,
401
+ "count": 2,
402
+ "is_parallel": true,
403
+ "self": 3.45999999997737e-05,
404
+ "children": {
405
+ "_observation_to_np_array": {
406
+ "total": 0.006285300000000271,
407
+ "count": 3,
408
+ "is_parallel": true,
409
+ "self": 2.670000000004613e-05,
410
+ "children": {
411
+ "process_pixels": {
412
+ "total": 0.006258600000000225,
413
+ "count": 3,
414
+ "is_parallel": true,
415
+ "self": 0.0002170999999999701,
416
+ "children": {
417
+ "image_decompress": {
418
+ "total": 0.006041500000000255,
419
+ "count": 3,
420
+ "is_parallel": true,
421
+ "self": 0.006041500000000255
422
+ }
423
+ }
424
+ }
425
+ }
426
+ }
427
+ }
428
+ },
429
+ "_process_rank_one_or_two_observation": {
430
+ "total": 6.230000000018165e-05,
431
+ "count": 2,
432
+ "is_parallel": true,
433
+ "self": 6.230000000018165e-05
434
+ }
435
+ }
436
+ },
437
+ "UnityEnvironment.step": {
438
+ "total": 1523.1875603999908,
439
+ "count": 401208,
440
+ "is_parallel": true,
441
+ "self": 18.84510889997,
442
+ "children": {
443
+ "UnityEnvironment._generate_step_input": {
444
+ "total": 18.87043490002324,
445
+ "count": 401208,
446
+ "is_parallel": true,
447
+ "self": 18.87043490002324
448
+ },
449
+ "communicator.exchange": {
450
+ "total": 1327.9881203000123,
451
+ "count": 401208,
452
+ "is_parallel": true,
453
+ "self": 1327.9881203000123
454
+ },
455
+ "steps_from_proto": {
456
+ "total": 157.4838962999855,
457
+ "count": 401208,
458
+ "is_parallel": true,
459
+ "self": 30.96124800020729,
460
+ "children": {
461
+ "_process_maybe_compressed_observation": {
462
+ "total": 113.2023702999204,
463
+ "count": 802416,
464
+ "is_parallel": true,
465
+ "self": 8.737345000014244,
466
+ "children": {
467
+ "_observation_to_np_array": {
468
+ "total": 104.46502529990616,
469
+ "count": 1203855,
470
+ "is_parallel": true,
471
+ "self": 7.99194869999009,
472
+ "children": {
473
+ "process_pixels": {
474
+ "total": 96.47307659991607,
475
+ "count": 1203855,
476
+ "is_parallel": true,
477
+ "self": 44.47351000003595,
478
+ "children": {
479
+ "image_decompress": {
480
+ "total": 51.99956659988012,
481
+ "count": 1203855,
482
+ "is_parallel": true,
483
+ "self": 51.99956659988012
484
+ }
485
+ }
486
+ }
487
+ }
488
+ }
489
+ }
490
+ },
491
+ "_process_rank_one_or_two_observation": {
492
+ "total": 13.320277999857804,
493
+ "count": 802416,
494
+ "is_parallel": true,
495
+ "self": 13.320277999857804
496
+ }
497
+ }
498
+ }
499
+ }
500
+ }
501
+ }
502
+ }
503
+ }
504
+ }
505
+ }
506
+ }
507
+ }
508
+ },
509
+ "trainer_threads": {
510
+ "total": 2.8299999939918052e-05,
511
+ "count": 1,
512
+ "self": 2.8299999939918052e-05,
513
+ "children": {
514
+ "thread_root": {
515
+ "total": 0.0,
516
+ "count": 0,
517
+ "is_parallel": true,
518
+ "self": 0.0,
519
+ "children": {
520
+ "trainer_advance": {
521
+ "total": 3670.3741325999927,
522
+ "count": 182739,
523
+ "is_parallel": true,
524
+ "self": 5.5118690999092905,
525
+ "children": {
526
+ "process_trajectory": {
527
+ "total": 2967.3206787000836,
528
+ "count": 182739,
529
+ "is_parallel": true,
530
+ "self": 2966.911564500084,
531
+ "children": {
532
+ "RLTrainer._checkpoint": {
533
+ "total": 0.40911419999997634,
534
+ "count": 2,
535
+ "is_parallel": true,
536
+ "self": 0.40911419999997634
537
+ }
538
+ }
539
+ },
540
+ "_update_policy": {
541
+ "total": 697.5415847999999,
542
+ "count": 136,
543
+ "is_parallel": true,
544
+ "self": 470.7448629999821,
545
+ "children": {
546
+ "TorchPPOOptimizer.update": {
547
+ "total": 226.7967218000178,
548
+ "count": 3387,
549
+ "is_parallel": true,
550
+ "self": 226.7967218000178
551
+ }
552
+ }
553
+ }
554
+ }
555
+ }
556
+ }
557
+ }
558
+ }
559
+ },
560
+ "TrainerController._save_models": {
561
+ "total": 0.15514690000009068,
562
+ "count": 1,
563
+ "self": 0.0052067000001443375,
564
+ "children": {
565
+ "RLTrainer._checkpoint": {
566
+ "total": 0.14994019999994634,
567
+ "count": 1,
568
+ "self": 0.14994019999994634
569
+ }
570
+ }
571
+ }
572
+ }
573
+ }
574
+ }
575
+ }
run_logs/training_status.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "difficulty": {
3
+ "lesson_num": 0
4
+ },
5
+ "task": {
6
+ "lesson_num": 0
7
+ },
8
+ "Agent": {
9
+ "checkpoints": [
10
+ {
11
+ "steps": 499734,
12
+ "file_path": "results\\DroneBasedReforestation/train/DroneBasedReforestation_difficulty_9_task_0_run_id_1_train\\Agent\\Agent-499734.onnx",
13
+ "reward": 12.925641179084778,
14
+ "creation_time": 1717383754.224171,
15
+ "auxillary_file_paths": [
16
+ "results\\DroneBasedReforestation/train/DroneBasedReforestation_difficulty_9_task_0_run_id_1_train\\Agent\\Agent-499734.pt"
17
+ ]
18
+ },
19
+ {
20
+ "steps": 999693,
21
+ "file_path": "results\\DroneBasedReforestation/train/DroneBasedReforestation_difficulty_9_task_0_run_id_1_train\\Agent\\Agent-999693.onnx",
22
+ "reward": 9.547621119590033,
23
+ "creation_time": 1717385287.4823232,
24
+ "auxillary_file_paths": [
25
+ "results\\DroneBasedReforestation/train/DroneBasedReforestation_difficulty_9_task_0_run_id_1_train\\Agent\\Agent-999693.pt"
26
+ ]
27
+ },
28
+ {
29
+ "steps": 1200735,
30
+ "file_path": "results\\DroneBasedReforestation/train/DroneBasedReforestation_difficulty_9_task_0_run_id_1_train\\Agent\\Agent-1200735.onnx",
31
+ "reward": 8.914543892656054,
32
+ "creation_time": 1717385903.3143573,
33
+ "auxillary_file_paths": [
34
+ "results\\DroneBasedReforestation/train/DroneBasedReforestation_difficulty_9_task_0_run_id_1_train\\Agent\\Agent-1200735.pt"
35
+ ]
36
+ }
37
+ ],
38
+ "final_checkpoint": {
39
+ "steps": 1200735,
40
+ "file_path": "results\\DroneBasedReforestation/train/DroneBasedReforestation_difficulty_9_task_0_run_id_1_train\\Agent.onnx",
41
+ "reward": 8.914543892656054,
42
+ "creation_time": 1717385903.3143573,
43
+ "auxillary_file_paths": [
44
+ "results\\DroneBasedReforestation/train/DroneBasedReforestation_difficulty_9_task_0_run_id_1_train\\Agent\\Agent-1200735.pt"
45
+ ]
46
+ }
47
+ },
48
+ "metadata": {
49
+ "stats_format_version": "0.3.0",
50
+ "mlagents_version": "0.30.0",
51
+ "torch_version": "1.7.1+cu110"
52
+ }
53
+ }