bsenst commited on
Commit
5cd6399
1 Parent(s): 334810a

add ppo snowballtarget agent

Browse files
SnowballTarget.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f72de47d3f9381123a8c41395a2a5f16c598ff4727ffaddbfa989d3e26aa418b
3
  size 646532
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2664a2e0bcdb531977baa711c0df6ae87da487202412deb82efa02edc3eb1035
3
  size 646532
SnowballTarget/SnowballTarget-1408.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f72de47d3f9381123a8c41395a2a5f16c598ff4727ffaddbfa989d3e26aa418b
3
  size 646532
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2664a2e0bcdb531977baa711c0df6ae87da487202412deb82efa02edc3eb1035
3
  size 646532
SnowballTarget/SnowballTarget-1408.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:98669ac7905e423b2998e7b7f88b9538d85212a5df7101e2e1bd512565be9846
3
- size 1284832
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab04bf0dde56903d2a488a656a06464d5b3bbe4f9558102f5bf29e3b2fe79c71
3
+ size 1285152
SnowballTarget/checkpoint.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:98669ac7905e423b2998e7b7f88b9538d85212a5df7101e2e1bd512565be9846
3
- size 1284832
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab04bf0dde56903d2a488a656a06464d5b3bbe4f9558102f5bf29e3b2fe79c71
3
+ size 1285152
SnowballTarget/{events.out.tfevents.1680782860.ed24bde6b0fb.34290.0 → events.out.tfevents.1680784150.f5f04898c26b.966.0} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cb0859a55a16bdcdb763809bec46346dbfcb64322ded2fc6c7508ff9f0344891
3
  size 1112
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:acfabee5b543c6b037e5f94d2538922729cda60e3bc1c3006e37a132b69e102e
3
  size 1112
run_logs/Player-0.log CHANGED
@@ -2,6 +2,9 @@ Mono path[0] = '/kaggle/working/ml-agents/training-envs-executables/linux/Snowba
2
  Mono config path = '/kaggle/working/ml-agents/training-envs-executables/linux/SnowballTarget/SnowballTarget_Data/MonoBleedingEdge/etc'
3
  Preloaded 'lib_burst_generated.so'
4
  Preloaded 'libgrpc_csharp_ext.x64.so'
 
 
 
5
  Initialize engine version: 2021.3.14f1 (eee1884e7226)
6
  [Subsystems] Discovering subsystems at path /kaggle/working/ml-agents/training-envs-executables/linux/SnowballTarget/SnowballTarget_Data/UnitySubsystems
7
  Forcing GfxDevice: Null
@@ -31,7 +34,7 @@ ALSA lib pcm.c:2642:(snd_pcm_open_noupdate) Unknown PCM default
31
  FMOD failed to initialize the output device.: "Error initializing output device. " (60)
32
  FMOD initialized on nosound output
33
  Begin MonoManager ReloadAssembly
34
- - Completed reload, in 0.137 seconds
35
  ERROR: Shader Sprites/Default shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
36
  ERROR: Shader Sprites/Mask shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
37
  ERROR: Shader Legacy Shaders/VertexLit shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
@@ -42,10 +45,10 @@ ERROR: Shader Standard shader is not supported on this GPU (none of subshaders/f
42
  WARNING: Shader Unsupported: 'Standard' - All subshaders removed
43
  WARNING: Shader Did you use #pragma only_renderers and omit this platform?
44
  WARNING: Shader If subshaders removal was intentional, you may have forgotten turning Fallback off?
45
- UnloadTime: 1.295323 ms
46
  ERROR: Shader UI/Default shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
47
  requesting resize 84 x 84
48
- Setting up 2 worker threads for Enlighten.
49
  Memory Statistics:
50
  [ALLOC_TEMP_TLS] TLS Allocator
51
  StackAllocators :
@@ -100,7 +103,7 @@ Memory Statistics:
100
  Current Block Size 32.0 KB
101
  Peak Allocated Bytes 0 B
102
  Overflow Count 0
103
- [ALLOC_TEMP_EnlightenWorker] x 2
104
  Initial Block Size 64.0 KB
105
  Current Block Size 64.0 KB
106
  Peak Allocated Bytes 0 B
@@ -125,7 +128,7 @@ Memory Statistics:
125
  Current Block Size 32.0 KB
126
  Peak Allocated Bytes 0 B
127
  Overflow Count 0
128
- [ALLOC_TEMP_AssetGarbageCollectorHelper] x 3
129
  Initial Block Size 64.0 KB
130
  Current Block Size 64.0 KB
131
  Peak Allocated Bytes 0 B
@@ -140,22 +143,12 @@ Memory Statistics:
140
  Current Block Size 32.0 KB
141
  Peak Allocated Bytes 0 B
142
  Overflow Count 0
143
- [ALLOC_TEMP_Job.Worker 1]
144
- Initial Block Size 256.0 KB
145
- Current Block Size 256.0 KB
146
- Peak Allocated Bytes 3.6 KB
147
- Overflow Count 0
148
- [ALLOC_TEMP_Job.Worker 2]
149
- Initial Block Size 256.0 KB
150
- Current Block Size 256.0 KB
151
- Peak Allocated Bytes 3.6 KB
152
- Overflow Count 0
153
- [ALLOC_TEMP_Background Job.Worker 3]
154
  Initial Block Size 32.0 KB
155
  Current Block Size 32.0 KB
156
  Peak Allocated Bytes 0 B
157
  Overflow Count 0
158
- [ALLOC_TEMP_Background Job.Worker 11]
159
  Initial Block Size 32.0 KB
160
  Current Block Size 32.0 KB
161
  Peak Allocated Bytes 0 B
@@ -176,7 +169,7 @@ Memory Statistics:
176
  Peak Allocated Bytes 0 B
177
  Overflow Count 0
178
  [ALLOC_DEFAULT] Dual Thread Allocator
179
- Peak main deferred allocation count 28
180
  [ALLOC_BUCKET]
181
  Large Block size 4.0 MB
182
  Used Block count 1
@@ -185,7 +178,7 @@ Memory Statistics:
185
  Peak usage frame count: [4.0 MB-8.0 MB]: 40 frames
186
  Requested Block Size 16.0 MB
187
  Peak Block count 1
188
- Peak Allocated memory 4.8 MB
189
  Peak Large allocation bytes 0 B
190
  [ALLOC_DEFAULT_THREAD]
191
  Peak usage frame count: [16.0 MB-32.0 MB]: 40 frames
@@ -220,7 +213,7 @@ Memory Statistics:
220
  Used Block count 1
221
  Peak Allocated bytes 1.0 MB
222
  [ALLOC_GFX_MAIN]
223
- Peak usage frame count: [32.0 KB-64.0 KB]: 36 frames, [64.0 KB-128.0 KB]: 4 frames
224
  Requested Block Size 16.0 MB
225
  Peak Block count 1
226
  Peak Allocated memory 65.6 KB
 
2
  Mono config path = '/kaggle/working/ml-agents/training-envs-executables/linux/SnowballTarget/SnowballTarget_Data/MonoBleedingEdge/etc'
3
  Preloaded 'lib_burst_generated.so'
4
  Preloaded 'libgrpc_csharp_ext.x64.so'
5
+ PlayerPrefs - Creating folder: /root/.config/unity3d/Hugging Face
6
+ PlayerPrefs - Creating folder: /root/.config/unity3d/Hugging Face/SnowballTarget
7
+ Unable to load player prefs
8
  Initialize engine version: 2021.3.14f1 (eee1884e7226)
9
  [Subsystems] Discovering subsystems at path /kaggle/working/ml-agents/training-envs-executables/linux/SnowballTarget/SnowballTarget_Data/UnitySubsystems
10
  Forcing GfxDevice: Null
 
34
  FMOD failed to initialize the output device.: "Error initializing output device. " (60)
35
  FMOD initialized on nosound output
36
  Begin MonoManager ReloadAssembly
37
+ - Completed reload, in 0.089 seconds
38
  ERROR: Shader Sprites/Default shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
39
  ERROR: Shader Sprites/Mask shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
40
  ERROR: Shader Legacy Shaders/VertexLit shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
 
45
  WARNING: Shader Unsupported: 'Standard' - All subshaders removed
46
  WARNING: Shader Did you use #pragma only_renderers and omit this platform?
47
  WARNING: Shader If subshaders removal was intentional, you may have forgotten turning Fallback off?
48
+ UnloadTime: 0.598937 ms
49
  ERROR: Shader UI/Default shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
50
  requesting resize 84 x 84
51
+ Setting up 1 worker threads for Enlighten.
52
  Memory Statistics:
53
  [ALLOC_TEMP_TLS] TLS Allocator
54
  StackAllocators :
 
103
  Current Block Size 32.0 KB
104
  Peak Allocated Bytes 0 B
105
  Overflow Count 0
106
+ [ALLOC_TEMP_EnlightenWorker]
107
  Initial Block Size 64.0 KB
108
  Current Block Size 64.0 KB
109
  Peak Allocated Bytes 0 B
 
128
  Current Block Size 32.0 KB
129
  Peak Allocated Bytes 0 B
130
  Overflow Count 0
131
+ [ALLOC_TEMP_AssetGarbageCollectorHelper]
132
  Initial Block Size 64.0 KB
133
  Current Block Size 64.0 KB
134
  Peak Allocated Bytes 0 B
 
143
  Current Block Size 32.0 KB
144
  Peak Allocated Bytes 0 B
145
  Overflow Count 0
146
+ [ALLOC_TEMP_Background Job.Worker 11]
 
 
 
 
 
 
 
 
 
 
147
  Initial Block Size 32.0 KB
148
  Current Block Size 32.0 KB
149
  Peak Allocated Bytes 0 B
150
  Overflow Count 0
151
+ [ALLOC_TEMP_Background Job.Worker 3]
152
  Initial Block Size 32.0 KB
153
  Current Block Size 32.0 KB
154
  Peak Allocated Bytes 0 B
 
169
  Peak Allocated Bytes 0 B
170
  Overflow Count 0
171
  [ALLOC_DEFAULT] Dual Thread Allocator
172
+ Peak main deferred allocation count 20
173
  [ALLOC_BUCKET]
174
  Large Block size 4.0 MB
175
  Used Block count 1
 
178
  Peak usage frame count: [4.0 MB-8.0 MB]: 40 frames
179
  Requested Block Size 16.0 MB
180
  Peak Block count 1
181
+ Peak Allocated memory 4.7 MB
182
  Peak Large allocation bytes 0 B
183
  [ALLOC_DEFAULT_THREAD]
184
  Peak usage frame count: [16.0 MB-32.0 MB]: 40 frames
 
213
  Used Block count 1
214
  Peak Allocated bytes 1.0 MB
215
  [ALLOC_GFX_MAIN]
216
+ Peak usage frame count: [32.0 KB-64.0 KB]: 31 frames, [64.0 KB-128.0 KB]: 9 frames
217
  Requested Block Size 16.0 MB
218
  Peak Block count 1
219
  Peak Allocated memory 65.6 KB
run_logs/timers.json CHANGED
@@ -2,7 +2,7 @@
2
  "name": "root",
3
  "metadata": {
4
  "timer_format_version": "0.1.0",
5
- "start_time_seconds": "1680782859",
6
  "python_version": "3.10.5 | packaged by conda-forge | (main, Jun 14 2022, 07:04:59) [GCC 10.3.0]",
7
  "command_line_arguments": "ml-agents/mlagents/trainers/learn.py ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
8
  "mlagents_version": "0.31.0.dev0",
@@ -10,59 +10,59 @@
10
  "communication_protocol_version": "1.5.0",
11
  "pytorch_version": "1.11.0+cu102",
12
  "numpy_version": "1.21.2",
13
- "end_time_seconds": "1680782864"
14
  },
15
- "total": 4.8662428259999615,
16
  "count": 1,
17
- "self": 0.38026291799997125,
18
  "children": {
19
  "run_training.setup": {
20
- "total": 0.024673117000020284,
21
  "count": 1,
22
- "self": 0.024673117000020284
23
  },
24
  "TrainerController.start_learning": {
25
- "total": 4.46130679099997,
26
  "count": 1,
27
- "self": 0.06062403700207142,
28
  "children": {
29
  "TrainerController._reset_env": {
30
- "total": 0.8937696269999833,
31
  "count": 1,
32
- "self": 0.8937696269999833
33
  },
34
  "TrainerController.advance": {
35
- "total": 3.287795212997935,
36
  "count": 131,
37
- "self": 0.0029381669960457657,
38
  "children": {
39
  "env_step": {
40
- "total": 3.284857046001889,
41
  "count": 131,
42
- "self": 2.64100915600784,
43
  "children": {
44
  "SubprocessEnvManager._take_step": {
45
- "total": 0.6406369539977277,
46
  "count": 131,
47
- "self": 0.015728908997516555,
48
  "children": {
49
  "TorchPolicy.evaluate": {
50
- "total": 0.6249080450002111,
51
  "count": 131,
52
- "self": 0.6249080450002111
53
  }
54
  }
55
  },
56
  "workers": {
57
- "total": 0.0032109359963214956,
58
  "count": 131,
59
  "self": 0.0,
60
  "children": {
61
  "worker_root": {
62
- "total": 4.199120163999396,
63
  "count": 131,
64
  "is_parallel": true,
65
- "self": 2.010407003997443,
66
  "children": {
67
  "run_training.setup": {
68
  "total": 0.0,
@@ -71,48 +71,48 @@
71
  "self": 0.0,
72
  "children": {
73
  "steps_from_proto": {
74
- "total": 0.003694363000249723,
75
  "count": 1,
76
  "is_parallel": true,
77
- "self": 0.001252010000825976,
78
  "children": {
79
  "_process_rank_one_or_two_observation": {
80
- "total": 0.002442352999423747,
81
  "count": 10,
82
  "is_parallel": true,
83
- "self": 0.002442352999423747
84
  }
85
  }
86
  },
87
  "UnityEnvironment.step": {
88
- "total": 0.06070607199990263,
89
  "count": 1,
90
  "is_parallel": true,
91
- "self": 0.0008654529997329519,
92
  "children": {
93
  "UnityEnvironment._generate_step_input": {
94
- "total": 0.0005708850003429689,
95
  "count": 1,
96
  "is_parallel": true,
97
- "self": 0.0005708850003429689
98
  },
99
  "communicator.exchange": {
100
- "total": 0.05644129299980705,
101
  "count": 1,
102
  "is_parallel": true,
103
- "self": 0.05644129299980705
104
  },
105
  "steps_from_proto": {
106
- "total": 0.002828441000019666,
107
  "count": 1,
108
  "is_parallel": true,
109
- "self": 0.0006106840000938973,
110
  "children": {
111
  "_process_rank_one_or_two_observation": {
112
- "total": 0.0022177569999257685,
113
  "count": 10,
114
  "is_parallel": true,
115
- "self": 0.0022177569999257685
116
  }
117
  }
118
  }
@@ -121,34 +121,34 @@
121
  }
122
  },
123
  "UnityEnvironment.step": {
124
- "total": 2.188713160001953,
125
  "count": 130,
126
  "is_parallel": true,
127
- "self": 0.08645828599946981,
128
  "children": {
129
  "UnityEnvironment._generate_step_input": {
130
- "total": 0.0435731890020179,
131
  "count": 130,
132
  "is_parallel": true,
133
- "self": 0.0435731890020179
134
  },
135
  "communicator.exchange": {
136
- "total": 1.7741295920018274,
137
  "count": 130,
138
  "is_parallel": true,
139
- "self": 1.7741295920018274
140
  },
141
  "steps_from_proto": {
142
- "total": 0.2845520929986378,
143
  "count": 130,
144
  "is_parallel": true,
145
- "self": 0.058892392007237504,
146
  "children": {
147
  "_process_rank_one_or_two_observation": {
148
- "total": 0.2256597009914003,
149
  "count": 1300,
150
  "is_parallel": true,
151
- "self": 0.2256597009914003
152
  }
153
  }
154
  }
@@ -163,9 +163,9 @@
163
  }
164
  },
165
  "trainer_threads": {
166
- "total": 6.945099994482007e-05,
167
  "count": 1,
168
- "self": 6.945099994482007e-05,
169
  "children": {
170
  "thread_root": {
171
  "total": 0.0,
@@ -174,16 +174,16 @@
174
  "self": 0.0,
175
  "children": {
176
  "trainer_advance": {
177
- "total": 3.2903225010322785,
178
- "count": 4069,
179
  "is_parallel": true,
180
- "self": 0.1252297930250279,
181
  "children": {
182
  "process_trajectory": {
183
- "total": 3.1650927080072506,
184
- "count": 4069,
185
  "is_parallel": true,
186
- "self": 3.1650927080072506
187
  }
188
  }
189
  }
@@ -192,14 +192,14 @@
192
  }
193
  },
194
  "TrainerController._save_models": {
195
- "total": 0.21904846300003555,
196
  "count": 1,
197
- "self": 0.0014783860001443827,
198
  "children": {
199
  "RLTrainer._checkpoint": {
200
- "total": 0.21757007699989117,
201
  "count": 1,
202
- "self": 0.21757007699989117
203
  }
204
  }
205
  }
 
2
  "name": "root",
3
  "metadata": {
4
  "timer_format_version": "0.1.0",
5
+ "start_time_seconds": "1680784149",
6
  "python_version": "3.10.5 | packaged by conda-forge | (main, Jun 14 2022, 07:04:59) [GCC 10.3.0]",
7
  "command_line_arguments": "ml-agents/mlagents/trainers/learn.py ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
8
  "mlagents_version": "0.31.0.dev0",
 
10
  "communication_protocol_version": "1.5.0",
11
  "pytorch_version": "1.11.0+cu102",
12
  "numpy_version": "1.21.2",
13
+ "end_time_seconds": "1680784156"
14
  },
15
+ "total": 7.037014824999915,
16
  "count": 1,
17
+ "self": 0.5127422909997676,
18
  "children": {
19
  "run_training.setup": {
20
+ "total": 0.0196718580000379,
21
  "count": 1,
22
+ "self": 0.0196718580000379
23
  },
24
  "TrainerController.start_learning": {
25
+ "total": 6.504600676000109,
26
  "count": 1,
27
+ "self": 0.028221422998058188,
28
  "children": {
29
  "TrainerController._reset_env": {
30
+ "total": 3.2124334480001835,
31
  "count": 1,
32
+ "self": 3.2124334480001835
33
  },
34
  "TrainerController.advance": {
35
+ "total": 2.952782744001752,
36
  "count": 131,
37
+ "self": 0.0019149590036704467,
38
  "children": {
39
  "env_step": {
40
+ "total": 2.9508677849980813,
41
  "count": 131,
42
+ "self": 2.3390354029995706,
43
  "children": {
44
  "SubprocessEnvManager._take_step": {
45
+ "total": 0.6098573089998354,
46
  "count": 131,
47
+ "self": 0.009847335000131352,
48
  "children": {
49
  "TorchPolicy.evaluate": {
50
+ "total": 0.600009973999704,
51
  "count": 131,
52
+ "self": 0.600009973999704
53
  }
54
  }
55
  },
56
  "workers": {
57
+ "total": 0.0019750729986753868,
58
  "count": 131,
59
  "self": 0.0,
60
  "children": {
61
  "worker_root": {
62
+ "total": 6.177824511000836,
63
  "count": 131,
64
  "is_parallel": true,
65
+ "self": 4.207890857000621,
66
  "children": {
67
  "run_training.setup": {
68
  "total": 0.0,
 
71
  "self": 0.0,
72
  "children": {
73
  "steps_from_proto": {
74
+ "total": 0.0020833740002217382,
75
  "count": 1,
76
  "is_parallel": true,
77
+ "self": 0.0006434789995637402,
78
  "children": {
79
  "_process_rank_one_or_two_observation": {
80
+ "total": 0.001439895000657998,
81
  "count": 10,
82
  "is_parallel": true,
83
+ "self": 0.001439895000657998
84
  }
85
  }
86
  },
87
  "UnityEnvironment.step": {
88
+ "total": 0.05380231999993157,
89
  "count": 1,
90
  "is_parallel": true,
91
+ "self": 0.00039890099992589967,
92
  "children": {
93
  "UnityEnvironment._generate_step_input": {
94
+ "total": 0.0003473599999779253,
95
  "count": 1,
96
  "is_parallel": true,
97
+ "self": 0.0003473599999779253
98
  },
99
  "communicator.exchange": {
100
+ "total": 0.05078761699996903,
101
  "count": 1,
102
  "is_parallel": true,
103
+ "self": 0.05078761699996903
104
  },
105
  "steps_from_proto": {
106
+ "total": 0.0022684420000587124,
107
  "count": 1,
108
  "is_parallel": true,
109
+ "self": 0.00043957899993074534,
110
  "children": {
111
  "_process_rank_one_or_two_observation": {
112
+ "total": 0.001828863000127967,
113
  "count": 10,
114
  "is_parallel": true,
115
+ "self": 0.001828863000127967
116
  }
117
  }
118
  }
 
121
  }
122
  },
123
  "UnityEnvironment.step": {
124
+ "total": 1.969933654000215,
125
  "count": 130,
126
  "is_parallel": true,
127
+ "self": 0.0699027690022831,
128
  "children": {
129
  "UnityEnvironment._generate_step_input": {
130
+ "total": 0.048496131998490455,
131
  "count": 130,
132
  "is_parallel": true,
133
+ "self": 0.048496131998490455
134
  },
135
  "communicator.exchange": {
136
+ "total": 1.6077250019980056,
137
  "count": 130,
138
  "is_parallel": true,
139
+ "self": 1.6077250019980056
140
  },
141
  "steps_from_proto": {
142
+ "total": 0.24380975100143587,
143
  "count": 130,
144
  "is_parallel": true,
145
+ "self": 0.04959121400065669,
146
  "children": {
147
  "_process_rank_one_or_two_observation": {
148
+ "total": 0.19421853700077918,
149
  "count": 1300,
150
  "is_parallel": true,
151
+ "self": 0.19421853700077918
152
  }
153
  }
154
  }
 
163
  }
164
  },
165
  "trainer_threads": {
166
+ "total": 2.3585000008097268e-05,
167
  "count": 1,
168
+ "self": 2.3585000008097268e-05,
169
  "children": {
170
  "thread_root": {
171
  "total": 0.0,
 
174
  "self": 0.0,
175
  "children": {
176
  "trainer_advance": {
177
+ "total": 2.9356368650082914,
178
+ "count": 4398,
179
  "is_parallel": true,
180
+ "self": 0.09288661300911372,
181
  "children": {
182
  "process_trajectory": {
183
+ "total": 2.8427502519991776,
184
+ "count": 4398,
185
  "is_parallel": true,
186
+ "self": 2.8427502519991776
187
  }
188
  }
189
  }
 
192
  }
193
  },
194
  "TrainerController._save_models": {
195
+ "total": 0.3111394760001076,
196
  "count": 1,
197
+ "self": 0.0013675610000518645,
198
  "children": {
199
  "RLTrainer._checkpoint": {
200
+ "total": 0.30977191500005574,
201
  "count": 1,
202
+ "self": 0.30977191500005574
203
  }
204
  }
205
  }
run_logs/training_status.json CHANGED
@@ -5,7 +5,7 @@
5
  "steps": 1408,
6
  "file_path": "results/SnowballTarget1/SnowballTarget/SnowballTarget-1408.onnx",
7
  "reward": null,
8
- "creation_time": 1680782864.3984804,
9
  "auxillary_file_paths": [
10
  "results/SnowballTarget1/SnowballTarget/SnowballTarget-1408.pt"
11
  ]
@@ -15,7 +15,7 @@
15
  "steps": 1408,
16
  "file_path": "results/SnowballTarget1/SnowballTarget.onnx",
17
  "reward": null,
18
- "creation_time": 1680782864.3984804,
19
  "auxillary_file_paths": [
20
  "results/SnowballTarget1/SnowballTarget/SnowballTarget-1408.pt"
21
  ]
 
5
  "steps": 1408,
6
  "file_path": "results/SnowballTarget1/SnowballTarget/SnowballTarget-1408.onnx",
7
  "reward": null,
8
+ "creation_time": 1680784156.2640371,
9
  "auxillary_file_paths": [
10
  "results/SnowballTarget1/SnowballTarget/SnowballTarget-1408.pt"
11
  ]
 
15
  "steps": 1408,
16
  "file_path": "results/SnowballTarget1/SnowballTarget.onnx",
17
  "reward": null,
18
+ "creation_time": 1680784156.2640371,
19
  "auxillary_file_paths": [
20
  "results/SnowballTarget1/SnowballTarget/SnowballTarget-1408.pt"
21
  ]