philippds commited on
Commit
ab310e4
1 Parent(s): 9f7e49f

Upload 16 files

Browse files
Agent.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62100783f97892f3dc1bd559bf66ff2efea7e8e8d51802484fc087574beab238
3
+ size 608107
Agent/Agent-1499882.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34b59eeb25a04bad7eaf6c6adac83c20e90e28ca4fefc592b6a0a686b4172790
3
+ size 608107
Agent/Agent-1499882.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a38aef2758da40d3697ed52964e2320fd60f5262deace01551ba574280fe4ac0
3
+ size 4847687
Agent/Agent-1999446.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3f18a61d8e57ea5d6ea843818a88c4af4c5e3fca830010db71aa26f64eed012
3
+ size 608107
Agent/Agent-1999446.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad657a6224141b49c8c47a7f31af48a87db19a62cf7d091c096e110a9de115a7
3
+ size 4847687
Agent/Agent-2499870.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ae767d175c63242fdfaa6fef3e026759408bdb1730af9b26631542577f8fbc5
3
+ size 608107
Agent/Agent-2499870.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70215f15295325263c588f5cf88b13a3b3c2e12dd194937cc3d2b91dfa07c8f5
3
+ size 4847687
Agent/Agent-2999934.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62100783f97892f3dc1bd559bf66ff2efea7e8e8d51802484fc087574beab238
3
+ size 608107
Agent/Agent-2999934.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ac27cf74b115bf3b770a587d5641b50b707a83c6c39f88b3b2a7b8a0ab24ce1
3
+ size 4847687
Agent/Agent-3000264.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62100783f97892f3dc1bd559bf66ff2efea7e8e8d51802484fc087574beab238
3
+ size 608107
Agent/Agent-3000264.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22cfa2b4c9fe6be1ed555e7374f1b3b2a71bf72fa163db4fbd6d034638d13bd9
3
+ size 4847687
Agent/checkpoint.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22cfa2b4c9fe6be1ed555e7374f1b3b2a71bf72fa163db4fbd6d034638d13bd9
3
+ size 4847687
Agent/events.out.tfevents.1716271204.RICHARD.10080.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92cb00747daa3051fdc26b37b4e6677efe1c745eb582e15761ec03cfa830a472
3
+ size 2384687
configuration.yaml ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ default_settings: null
2
+ behaviors:
3
+ Agent:
4
+ trainer_type: ppo
5
+ hyperparameters:
6
+ batch_size: 1024
7
+ buffer_size: 10240
8
+ learning_rate: 0.0003
9
+ beta: 0.005
10
+ epsilon: 0.2
11
+ lambd: 0.95
12
+ num_epoch: 3
13
+ shared_critic: false
14
+ learning_rate_schedule: linear
15
+ beta_schedule: linear
16
+ epsilon_schedule: linear
17
+ network_settings:
18
+ normalize: false
19
+ hidden_units: 128
20
+ num_layers: 2
21
+ vis_encode_type: resnet
22
+ memory: null
23
+ goal_conditioning_type: hyper
24
+ deterministic: false
25
+ reward_signals:
26
+ curiosity:
27
+ gamma: 0.99
28
+ strength: 0.1
29
+ network_settings:
30
+ normalize: false
31
+ hidden_units: 128
32
+ num_layers: 2
33
+ vis_encode_type: resnet
34
+ memory: null
35
+ goal_conditioning_type: hyper
36
+ deterministic: false
37
+ learning_rate: 0.0003
38
+ encoding_size: 256
39
+ extrinsic:
40
+ gamma: 0.99
41
+ strength: 0.9
42
+ network_settings:
43
+ normalize: false
44
+ hidden_units: 128
45
+ num_layers: 2
46
+ vis_encode_type: resnet
47
+ memory: null
48
+ goal_conditioning_type: hyper
49
+ deterministic: false
50
+ init_path: null
51
+ keep_checkpoints: 5
52
+ checkpoint_interval: 500000
53
+ max_steps: 3000000
54
+ time_horizon: 10240
55
+ summary_freq: 15000
56
+ threaded: true
57
+ self_play: null
58
+ behavioral_cloning: null
59
+ env_settings:
60
+ env_path: c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/dev_environments/Hivex_OceanPlasticCollection_win
61
+ env_args: null
62
+ base_port: 5007
63
+ num_envs: 1
64
+ num_areas: 1
65
+ seed: 5000
66
+ max_lifetime_restarts: 10
67
+ restarts_rate_limit_n: 1
68
+ restarts_rate_limit_period_s: 60
69
+ engine_settings:
70
+ width: 84
71
+ height: 84
72
+ quality_level: 5
73
+ time_scale: 20
74
+ target_frame_rate: -1
75
+ capture_frame_rate: 60
76
+ no_graphics: true
77
+ environment_parameters:
78
+ task:
79
+ curriculum:
80
+ - value:
81
+ sampler_type: constant
82
+ sampler_parameters:
83
+ seed: 5000
84
+ value: 1
85
+ name: task
86
+ completion_criteria: null
87
+ checkpoint_settings:
88
+ run_id: OceanPlasticCollection/train/OceanPlasticCollection_task_1_run_id_0_train
89
+ initialize_from: null
90
+ load_model: false
91
+ resume: false
92
+ force: false
93
+ train_model: false
94
+ inference: false
95
+ results_dir: results
96
+ torch_settings:
97
+ device: null
98
+ debug: false
run_logs/timers.json ADDED
@@ -0,0 +1,447 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "root",
3
+ "gauges": {
4
+ "Agent.Policy.Entropy.mean": {
5
+ "value": 0.44589558243751526,
6
+ "min": 0.42532193660736084,
7
+ "max": 1.79140305519104,
8
+ "count": 200
9
+ },
10
+ "Agent.Policy.Entropy.sum": {
11
+ "value": 6744.61669921875,
12
+ "min": 6259.1630859375,
13
+ "max": 27053.76953125,
14
+ "count": 200
15
+ },
16
+ "Agent.Environment.EpisodeLength.mean": {
17
+ "value": 151.72727272727272,
18
+ "min": 26.29281767955801,
19
+ "max": 207.65217391304347,
20
+ "count": 200
21
+ },
22
+ "Agent.Environment.EpisodeLength.sum": {
23
+ "value": 15021.0,
24
+ "min": 13128.0,
25
+ "max": 16833.0,
26
+ "count": 200
27
+ },
28
+ "Agent.OceanPlasticCollector.GlobalReward.mean": {
29
+ "value": 286.7894780923442,
30
+ "min": 1.1800377699405407,
31
+ "max": 388.8985731244282,
32
+ "count": 200
33
+ },
34
+ "Agent.OceanPlasticCollector.GlobalReward.sum": {
35
+ "value": 53342.842925176024,
36
+ "min": 623.0599425286055,
37
+ "max": 65570.11977723241,
38
+ "count": 200
39
+ },
40
+ "Agent.OceanPlasticCollector.LocalReward.mean": {
41
+ "value": 169.48924731182797,
42
+ "min": 17.577651515151516,
43
+ "max": 191.92857142857142,
44
+ "count": 200
45
+ },
46
+ "Agent.OceanPlasticCollector.LocalReward.sum": {
47
+ "value": 31525.0,
48
+ "min": 8876.0,
49
+ "max": 34975.0,
50
+ "count": 200
51
+ },
52
+ "Agent.Environment.LessonNumber.task.mean": {
53
+ "value": 0.0,
54
+ "min": 0.0,
55
+ "max": 0.0,
56
+ "count": 200
57
+ },
58
+ "Agent.Environment.LessonNumber.task.sum": {
59
+ "value": 0.0,
60
+ "min": 0.0,
61
+ "max": 0.0,
62
+ "count": 200
63
+ },
64
+ "Agent.Step.mean": {
65
+ "value": 2999934.0,
66
+ "min": 14995.0,
67
+ "max": 2999934.0,
68
+ "count": 200
69
+ },
70
+ "Agent.Step.sum": {
71
+ "value": 2999934.0,
72
+ "min": 14995.0,
73
+ "max": 2999934.0,
74
+ "count": 200
75
+ },
76
+ "Agent.Policy.CuriosityValueEstimate.mean": {
77
+ "value": 0.07356642931699753,
78
+ "min": -0.33354872465133667,
79
+ "max": 1.1176748275756836,
80
+ "count": 200
81
+ },
82
+ "Agent.Policy.CuriosityValueEstimate.sum": {
83
+ "value": 7.356642723083496,
84
+ "min": -88.1844711303711,
85
+ "max": 253.16246032714844,
86
+ "count": 200
87
+ },
88
+ "Agent.Policy.ExtrinsicValueEstimate.mean": {
89
+ "value": 371.4970397949219,
90
+ "min": 3.141871690750122,
91
+ "max": 406.1068115234375,
92
+ "count": 200
93
+ },
94
+ "Agent.Policy.ExtrinsicValueEstimate.sum": {
95
+ "value": 37149.703125,
96
+ "min": 1410.700439453125,
97
+ "max": 58084.30859375,
98
+ "count": 200
99
+ },
100
+ "Agent.Environment.CumulativeReward.mean": {
101
+ "value": 1165.9769846343995,
102
+ "min": 90.62464488949237,
103
+ "max": 1429.8103665571946,
104
+ "count": 200
105
+ },
106
+ "Agent.Environment.CumulativeReward.sum": {
107
+ "value": 116597.69846343994,
108
+ "min": 40694.93034648895,
109
+ "max": 128350.83875656128,
110
+ "count": 200
111
+ },
112
+ "Agent.Policy.CuriosityReward.mean": {
113
+ "value": 0.19991103200241922,
114
+ "min": 0.04842318139165507,
115
+ "max": 1.0922839760062206,
116
+ "count": 200
117
+ },
118
+ "Agent.Policy.CuriosityReward.sum": {
119
+ "value": 19.991103200241923,
120
+ "min": 19.56209521740675,
121
+ "max": 233.2555589172989,
122
+ "count": 200
123
+ },
124
+ "Agent.Policy.ExtrinsicReward.mean": {
125
+ "value": 1049.3792574310303,
126
+ "min": 81.56217563912958,
127
+ "max": 1286.8292953830498,
128
+ "count": 200
129
+ },
130
+ "Agent.Policy.ExtrinsicReward.sum": {
131
+ "value": 104937.92574310303,
132
+ "min": 36625.43554967642,
133
+ "max": 115515.75228118896,
134
+ "count": 200
135
+ },
136
+ "Agent.Losses.PolicyLoss.mean": {
137
+ "value": 0.02173497434705496,
138
+ "min": 0.015104639964799086,
139
+ "max": 0.03335427862281601,
140
+ "count": 200
141
+ },
142
+ "Agent.Losses.PolicyLoss.sum": {
143
+ "value": 0.02173497434705496,
144
+ "min": 0.015104639964799086,
145
+ "max": 0.057910619350150225,
146
+ "count": 200
147
+ },
148
+ "Agent.Losses.ValueLoss.mean": {
149
+ "value": 7911.067073567709,
150
+ "min": 1563.9071044921875,
151
+ "max": 8878.087890625,
152
+ "count": 200
153
+ },
154
+ "Agent.Losses.ValueLoss.sum": {
155
+ "value": 7911.067073567709,
156
+ "min": 1563.9071044921875,
157
+ "max": 16406.7236328125,
158
+ "count": 200
159
+ },
160
+ "Agent.Policy.LearningRate.mean": {
161
+ "value": 8.607997131000032e-07,
162
+ "min": 8.607997131000032e-07,
163
+ "max": 0.00029895630034790005,
164
+ "count": 200
165
+ },
166
+ "Agent.Policy.LearningRate.sum": {
167
+ "value": 8.607997131000032e-07,
168
+ "min": 8.607997131000032e-07,
169
+ "max": 0.0005927421024192999,
170
+ "count": 200
171
+ },
172
+ "Agent.Policy.Epsilon.mean": {
173
+ "value": 0.1002869,
174
+ "min": 0.1002869,
175
+ "max": 0.19965209999999994,
176
+ "count": 200
177
+ },
178
+ "Agent.Policy.Epsilon.sum": {
179
+ "value": 0.1002869,
180
+ "min": 0.1002869,
181
+ "max": 0.3975807,
182
+ "count": 200
183
+ },
184
+ "Agent.Policy.Beta.mean": {
185
+ "value": 2.431631000000006e-05,
186
+ "min": 2.431631000000006e-05,
187
+ "max": 0.004982639789999998,
188
+ "count": 200
189
+ },
190
+ "Agent.Policy.Beta.sum": {
191
+ "value": 2.431631000000006e-05,
192
+ "min": 2.431631000000006e-05,
193
+ "max": 0.00987927693,
194
+ "count": 200
195
+ },
196
+ "Agent.Losses.CuriosityForwardLoss.mean": {
197
+ "value": 0.013924151131262382,
198
+ "min": 0.012690329737961293,
199
+ "max": 0.1835711356252432,
200
+ "count": 200
201
+ },
202
+ "Agent.Losses.CuriosityForwardLoss.sum": {
203
+ "value": 0.013924151131262382,
204
+ "min": 0.012690329737961293,
205
+ "max": 0.24826578845580421,
206
+ "count": 200
207
+ },
208
+ "Agent.Losses.CuriosityInverseLoss.mean": {
209
+ "value": 0.07042228939632575,
210
+ "min": 0.05831307669480642,
211
+ "max": 1.7913234074910482,
212
+ "count": 200
213
+ },
214
+ "Agent.Losses.CuriosityInverseLoss.sum": {
215
+ "value": 0.07042228939632575,
216
+ "min": 0.05831307669480642,
217
+ "max": 3.1144038677215575,
218
+ "count": 200
219
+ },
220
+ "Agent.IsTraining.mean": {
221
+ "value": 1.0,
222
+ "min": 1.0,
223
+ "max": 1.0,
224
+ "count": 200
225
+ },
226
+ "Agent.IsTraining.sum": {
227
+ "value": 1.0,
228
+ "min": 1.0,
229
+ "max": 1.0,
230
+ "count": 200
231
+ }
232
+ },
233
+ "metadata": {
234
+ "timer_format_version": "0.1.0",
235
+ "start_time_seconds": "1716271201",
236
+ "python_version": "3.9.18 (main, Sep 11 2023, 14:09:26) [MSC v.1916 64 bit (AMD64)]",
237
+ "command_line_arguments": "C:\\Users\\pdsie\\anaconda3\\envs\\mlagents20\\Scripts\\mlagents-learn c:/users/pdsie/documents/hivex/src/hivex/training/baseline/ml_agents/configs/mlagents/tmp/train/OceanPlasticCollection_task_1_run_id_0_train.yaml --run-id=OceanPlasticCollection/train/OceanPlasticCollection_task_1_run_id_0_train --base-port 5007",
238
+ "mlagents_version": "0.30.0",
239
+ "mlagents_envs_version": "0.30.0",
240
+ "communication_protocol_version": "1.5.0",
241
+ "pytorch_version": "1.7.1+cu110",
242
+ "numpy_version": "1.21.2",
243
+ "end_time_seconds": "1716300263"
244
+ },
245
+ "total": 29063.168051,
246
+ "count": 1,
247
+ "self": 1.468607000002521,
248
+ "children": {
249
+ "run_training.setup": {
250
+ "total": 0.058088099999999976,
251
+ "count": 1,
252
+ "self": 0.058088099999999976
253
+ },
254
+ "TrainerController.start_learning": {
255
+ "total": 29061.6413559,
256
+ "count": 1,
257
+ "self": 24.14261530045769,
258
+ "children": {
259
+ "TrainerController._reset_env": {
260
+ "total": 4.243644199999999,
261
+ "count": 1,
262
+ "self": 4.243644199999999
263
+ },
264
+ "TrainerController.advance": {
265
+ "total": 29033.015204899544,
266
+ "count": 1007298,
267
+ "self": 22.936762998244376,
268
+ "children": {
269
+ "env_step": {
270
+ "total": 29010.0784419013,
271
+ "count": 1007298,
272
+ "self": 18977.19774700009,
273
+ "children": {
274
+ "SubprocessEnvManager._take_step": {
275
+ "total": 10019.075315700178,
276
+ "count": 1007298,
277
+ "self": 42.5171780018718,
278
+ "children": {
279
+ "TorchPolicy.evaluate": {
280
+ "total": 9976.558137698306,
281
+ "count": 1000098,
282
+ "self": 9976.558137698306
283
+ }
284
+ }
285
+ },
286
+ "workers": {
287
+ "total": 13.805379201032467,
288
+ "count": 1007298,
289
+ "self": 0.0,
290
+ "children": {
291
+ "worker_root": {
292
+ "total": 29034.415797299407,
293
+ "count": 1007298,
294
+ "is_parallel": true,
295
+ "self": 10949.139479400816,
296
+ "children": {
297
+ "steps_from_proto": {
298
+ "total": 0.0004306000000000587,
299
+ "count": 1,
300
+ "is_parallel": true,
301
+ "self": 0.00011079999999985546,
302
+ "children": {
303
+ "_process_maybe_compressed_observation": {
304
+ "total": 0.00023300000000014975,
305
+ "count": 2,
306
+ "is_parallel": true,
307
+ "self": 3.339999999996124e-05,
308
+ "children": {
309
+ "_observation_to_np_array": {
310
+ "total": 0.00019960000000018852,
311
+ "count": 3,
312
+ "is_parallel": true,
313
+ "self": 0.00019960000000018852
314
+ }
315
+ }
316
+ },
317
+ "_process_rank_one_or_two_observation": {
318
+ "total": 8.68000000000535e-05,
319
+ "count": 2,
320
+ "is_parallel": true,
321
+ "self": 8.68000000000535e-05
322
+ }
323
+ }
324
+ },
325
+ "UnityEnvironment.step": {
326
+ "total": 18085.27588729859,
327
+ "count": 1007298,
328
+ "is_parallel": true,
329
+ "self": 134.34906380009124,
330
+ "children": {
331
+ "UnityEnvironment._generate_step_input": {
332
+ "total": 65.43167780016917,
333
+ "count": 1007298,
334
+ "is_parallel": true,
335
+ "self": 65.43167780016917
336
+ },
337
+ "communicator.exchange": {
338
+ "total": 17397.16803759995,
339
+ "count": 1007298,
340
+ "is_parallel": true,
341
+ "self": 17397.16803759995
342
+ },
343
+ "steps_from_proto": {
344
+ "total": 488.3271080983802,
345
+ "count": 1007298,
346
+ "is_parallel": true,
347
+ "self": 114.74149420003545,
348
+ "children": {
349
+ "_process_maybe_compressed_observation": {
350
+ "total": 309.15640659773845,
351
+ "count": 2014596,
352
+ "is_parallel": true,
353
+ "self": 40.06789069569027,
354
+ "children": {
355
+ "_observation_to_np_array": {
356
+ "total": 269.0885159020482,
357
+ "count": 3027150,
358
+ "is_parallel": true,
359
+ "self": 269.0885159020482
360
+ }
361
+ }
362
+ },
363
+ "_process_rank_one_or_two_observation": {
364
+ "total": 64.42920730060631,
365
+ "count": 2014596,
366
+ "is_parallel": true,
367
+ "self": 64.42920730060631
368
+ }
369
+ }
370
+ }
371
+ }
372
+ }
373
+ }
374
+ }
375
+ }
376
+ }
377
+ }
378
+ }
379
+ }
380
+ },
381
+ "trainer_threads": {
382
+ "total": 2.8799997380701825e-05,
383
+ "count": 1,
384
+ "self": 2.8799997380701825e-05,
385
+ "children": {
386
+ "thread_root": {
387
+ "total": 0.0,
388
+ "count": 0,
389
+ "is_parallel": true,
390
+ "self": 0.0,
391
+ "children": {
392
+ "trainer_advance": {
393
+ "total": 29033.17883840124,
394
+ "count": 1526568,
395
+ "is_parallel": true,
396
+ "self": 67.84328040284527,
397
+ "children": {
398
+ "process_trajectory": {
399
+ "total": 24385.49959319836,
400
+ "count": 1526568,
401
+ "is_parallel": true,
402
+ "self": 24383.658545898357,
403
+ "children": {
404
+ "RLTrainer._checkpoint": {
405
+ "total": 1.8410473000030834,
406
+ "count": 6,
407
+ "is_parallel": true,
408
+ "self": 1.8410473000030834
409
+ }
410
+ }
411
+ },
412
+ "_update_policy": {
413
+ "total": 4579.835964800032,
414
+ "count": 282,
415
+ "is_parallel": true,
416
+ "self": 3046.1118077001292,
417
+ "children": {
418
+ "TorchPPOOptimizer.update": {
419
+ "total": 1533.724157099903,
420
+ "count": 8532,
421
+ "is_parallel": true,
422
+ "self": 1533.724157099903
423
+ }
424
+ }
425
+ }
426
+ }
427
+ }
428
+ }
429
+ }
430
+ }
431
+ },
432
+ "TrainerController._save_models": {
433
+ "total": 0.2398627000002307,
434
+ "count": 1,
435
+ "self": 0.019564499998523388,
436
+ "children": {
437
+ "RLTrainer._checkpoint": {
438
+ "total": 0.2202982000017073,
439
+ "count": 1,
440
+ "self": 0.2202982000017073
441
+ }
442
+ }
443
+ }
444
+ }
445
+ }
446
+ }
447
+ }
run_logs/training_status.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task": {
3
+ "lesson_num": 0
4
+ },
5
+ "Agent": {
6
+ "checkpoints": [
7
+ {
8
+ "steps": 1499882,
9
+ "file_path": "results\\OceanPlasticCollection/train/OceanPlasticCollection_task_1_run_id_0_train\\Agent\\Agent-1499882.onnx",
10
+ "reward": 785.739506149292,
11
+ "creation_time": 1716285649.2064097,
12
+ "auxillary_file_paths": [
13
+ "results\\OceanPlasticCollection/train/OceanPlasticCollection_task_1_run_id_0_train\\Agent\\Agent-1499882.pt"
14
+ ]
15
+ },
16
+ {
17
+ "steps": 1999446,
18
+ "file_path": "results\\OceanPlasticCollection/train/OceanPlasticCollection_task_1_run_id_0_train\\Agent\\Agent-1999446.onnx",
19
+ "reward": 1539.5435720171247,
20
+ "creation_time": 1716290432.8668234,
21
+ "auxillary_file_paths": [
22
+ "results\\OceanPlasticCollection/train/OceanPlasticCollection_task_1_run_id_0_train\\Agent\\Agent-1999446.pt"
23
+ ]
24
+ },
25
+ {
26
+ "steps": 2499870,
27
+ "file_path": "results\\OceanPlasticCollection/train/OceanPlasticCollection_task_1_run_id_0_train\\Agent\\Agent-2499870.onnx",
28
+ "reward": 1005.5468650658926,
29
+ "creation_time": 1716295547.9635189,
30
+ "auxillary_file_paths": [
31
+ "results\\OceanPlasticCollection/train/OceanPlasticCollection_task_1_run_id_0_train\\Agent\\Agent-2499870.pt"
32
+ ]
33
+ },
34
+ {
35
+ "steps": 2999934,
36
+ "file_path": "results\\OceanPlasticCollection/train/OceanPlasticCollection_task_1_run_id_0_train\\Agent\\Agent-2999934.onnx",
37
+ "reward": 1042.9728899309712,
38
+ "creation_time": 1716300262.2323508,
39
+ "auxillary_file_paths": [
40
+ "results\\OceanPlasticCollection/train/OceanPlasticCollection_task_1_run_id_0_train\\Agent\\Agent-2999934.pt"
41
+ ]
42
+ },
43
+ {
44
+ "steps": 3000264,
45
+ "file_path": "results\\OceanPlasticCollection/train/OceanPlasticCollection_task_1_run_id_0_train\\Agent\\Agent-3000264.onnx",
46
+ "reward": 1048.6695104932028,
47
+ "creation_time": 1716300262.4908817,
48
+ "auxillary_file_paths": [
49
+ "results\\OceanPlasticCollection/train/OceanPlasticCollection_task_1_run_id_0_train\\Agent\\Agent-3000264.pt"
50
+ ]
51
+ }
52
+ ],
53
+ "final_checkpoint": {
54
+ "steps": 3000264,
55
+ "file_path": "results\\OceanPlasticCollection/train/OceanPlasticCollection_task_1_run_id_0_train\\Agent.onnx",
56
+ "reward": 1048.6695104932028,
57
+ "creation_time": 1716300262.4908817,
58
+ "auxillary_file_paths": [
59
+ "results\\OceanPlasticCollection/train/OceanPlasticCollection_task_1_run_id_0_train\\Agent\\Agent-3000264.pt"
60
+ ]
61
+ }
62
+ },
63
+ "metadata": {
64
+ "stats_format_version": "0.3.0",
65
+ "mlagents_version": "0.30.0",
66
+ "torch_version": "1.7.1+cu110"
67
+ }
68
+ }