pabloyesteb commited on
Commit
98eb99a
1 Parent(s): 6138f93
Pyramids.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0772f6dde506d45b6b1d6a0a30f7f6aa3f72833e39d3878712b8288962571e29
3
  size 1418184
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42cd65f337736011e988a430ccf07abc5b37bcd4e78c4b464c4345d47bb7cb5f
3
  size 1418184
Pyramids/Pyramids-1000093.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42cd65f337736011e988a430ccf07abc5b37bcd4e78c4b464c4345d47bb7cb5f
3
+ size 1418184
Pyramids/Pyramids-1000093.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb4e03a6f446ddd4c2f9b3ba82b01ff3f51b1c1ff02d33ddca58d53ecc64d54f
3
+ size 8650990
Pyramids/Pyramids-499963.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef2d195f4a11cff877bfe69c328a295ad0bca93fc2c0572b02f4c4896220b988
3
+ size 1418184
Pyramids/Pyramids-499963.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6163ab62a97fc78ddbbbafb2c21af8993c66982b892f175de3215044257a458
3
+ size 8650990
Pyramids/Pyramids-999970.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42cd65f337736011e988a430ccf07abc5b37bcd4e78c4b464c4345d47bb7cb5f
3
+ size 1418184
Pyramids/Pyramids-999970.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e63f49c710a930b8e29383277f5b12bf7d690076ab6f77b73bb219973e738a9
3
+ size 8650990
Pyramids/checkpoint.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5375ddf2e5d1d2b91e3a355b7b2769132bdd0b65773be83b074434360bc85752
3
  size 8650990
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb4e03a6f446ddd4c2f9b3ba82b01ff3f51b1c1ff02d33ddca58d53ecc64d54f
3
  size 8650990
Pyramids/events.out.tfevents.1692487454.02766acf09f7.2570.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff8600e943ce7f3dcd711179a99dcbf6fa471724b473c214dc201c141df383d5
3
+ size 314460
run_logs/Player-0.log CHANGED
@@ -1,7 +1,7 @@
1
  Mono path[0] = '/content/ml-agents/training-envs-executables/linux/Pyramids/Pyramids_Data/Managed'
2
  Mono config path = '/content/ml-agents/training-envs-executables/linux/Pyramids/Pyramids_Data/MonoBleedingEdge/etc'
3
  Found 1 interfaces on host : 0) 172.28.0.12
4
- Multi-casting "[IP] 172.28.0.12 [Port] 55033 [Flags] 2 [Guid] 1819434111 [EditorId] 764847374 [Version] 1048832 [Id] LinuxPlayer(13,172.28.0.12) [Debug] 0 [PackageName] LinuxPlayer [ProjectName] UnityEnvironment" to [225.0.0.222:54997]...
5
  Preloaded 'lib_burst_generated.so'
6
  Preloaded 'libgrpc_csharp_ext.x64.so'
7
  PlayerPrefs - Creating folder: /root/.config/unity3d/Unity Technologies
@@ -36,7 +36,7 @@ ALSA lib pcm.c:2664:(snd_pcm_open_noupdate) Unknown PCM default
36
  FMOD failed to initialize the output device.: "Error initializing output device. " (60)
37
  FMOD initialized on nosound output
38
  Begin MonoManager ReloadAssembly
39
- - Completed reload, in 0.095 seconds
40
  ERROR: Shader Sprites/Default shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
41
  ERROR: Shader Sprites/Mask shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
42
  ERROR: Shader Legacy Shaders/VertexLit shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
@@ -68,7 +68,7 @@ ERROR: Shader Standard shader is not supported on this GPU (none of subshaders/f
68
  WARNING: Shader Unsupported: 'Standard' - All subshaders removed
69
  WARNING: Shader Did you use #pragma only_renderers and omit this platform?
70
  WARNING: Shader If subshaders removal was intentional, you may have forgotten turning Fallback off?
71
- UnloadTime: 0.911359 ms
72
  ERROR: Shader UI/Default shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
73
  requesting resize 84 x 84
74
  Setting up 1 worker threads for Enlighten.
@@ -76,7 +76,7 @@ PlayerConnection::CleanupMemory Statistics:
76
  [ALLOC_TEMP_TLS] TLS Allocator
77
  StackAllocators :
78
  [ALLOC_TEMP_MAIN]
79
- Peak usage frame count: [16.0 KB-32.0 KB]: 2023 frames, [32.0 KB-64.0 KB]: 12995 frames, [64.0 KB-128.0 KB]: 3751 frames, [2.0 MB-4.0 MB]: 1 frames
80
  Initial Block Size 4.0 MB
81
  Current Block Size 4.0 MB
82
  Peak Allocated Bytes 2.0 MB
@@ -126,7 +126,7 @@ PlayerConnection::CleanupMemory Statistics:
126
  Current Block Size 64.0 KB
127
  Peak Allocated Bytes 0 B
128
  Overflow Count 0
129
- [ALLOC_TEMP_Background Job.Worker 15]
130
  Initial Block Size 32.0 KB
131
  Current Block Size 32.0 KB
132
  Peak Allocated Bytes 0 B
@@ -136,7 +136,7 @@ PlayerConnection::CleanupMemory Statistics:
136
  Current Block Size 32.0 KB
137
  Peak Allocated Bytes 0 B
138
  Overflow Count 0
139
- [ALLOC_TEMP_Background Job.Worker 2]
140
  Initial Block Size 32.0 KB
141
  Current Block Size 32.0 KB
142
  Peak Allocated Bytes 0 B
@@ -149,7 +149,7 @@ PlayerConnection::CleanupMemory Statistics:
149
  [ALLOC_TEMP_Profiler.Dispatcher]
150
  Initial Block Size 64.0 KB
151
  Current Block Size 64.0 KB
152
- Peak Allocated Bytes 480 B
153
  Overflow Count 0
154
  [ALLOC_TEMP_Background Job.Worker 12]
155
  Initial Block Size 32.0 KB
@@ -186,12 +186,12 @@ PlayerConnection::CleanupMemory Statistics:
186
  Current Block Size 256.0 KB
187
  Peak Allocated Bytes 0.8 KB
188
  Overflow Count 0
189
- [ALLOC_TEMP_Background Job.Worker 9]
190
  Initial Block Size 32.0 KB
191
  Current Block Size 32.0 KB
192
  Peak Allocated Bytes 0 B
193
  Overflow Count 0
194
- [ALLOC_TEMP_Background Job.Worker 14]
195
  Initial Block Size 32.0 KB
196
  Current Block Size 32.0 KB
197
  Peak Allocated Bytes 0 B
@@ -213,22 +213,22 @@ PlayerConnection::CleanupMemory Statistics:
213
  Peak Allocated memory 1.6 MB
214
  Peak Large allocation bytes 0 B
215
  [ALLOC_DEFAULT] Dual Thread Allocator
216
- Peak main deferred allocation count 11919
217
  [ALLOC_BUCKET]
218
  Large Block size 4.0 MB
219
  Used Block count 1
220
  Peak Allocated bytes 1.7 MB
221
  [ALLOC_DEFAULT_MAIN]
222
- Peak usage frame count: [8.0 MB-16.0 MB]: 1 frames, [16.0 MB-32.0 MB]: 18767 frames, [32.0 MB-64.0 MB]: 2 frames
223
  Requested Block Size 16.0 MB
224
  Peak Block count 3
225
- Peak Allocated memory 32.2 MB
226
  Peak Large allocation bytes 0 B
227
  [ALLOC_DEFAULT_THREAD]
228
  Peak usage frame count: [16.0 MB-32.0 MB]: 18770 frames
229
  Requested Block Size 16.0 MB
230
  Peak Block count 1
231
- Peak Allocated memory 21.0 MB
232
  Peak Large allocation bytes 16.0 MB
233
  [ALLOC_TEMP_JOB_1_FRAME]
234
  Initial Block Size 2.0 MB
@@ -284,7 +284,7 @@ PlayerConnection::CleanupMemory Statistics:
284
  Peak usage frame count: [1.0 MB-2.0 MB]: 18769 frames, [4.0 MB-8.0 MB]: 1 frames
285
  Requested Block Size 4.0 MB
286
  Peak Block count 2
287
- Peak Allocated memory 4.4 MB
288
  Peak Large allocation bytes 0 B
289
  [ALLOC_TYPETREE] Dual Thread Allocator
290
  Peak main deferred allocation count 0
@@ -308,10 +308,10 @@ PlayerConnection::CleanupMemory Statistics:
308
  Peak usage frame count: [16.0 KB-32.0 KB]: 18770 frames
309
  Requested Block Size 16.0 MB
310
  Peak Block count 1
311
- Peak Allocated memory 29.0 KB
312
  Peak Large allocation bytes 0 B
313
  [ALLOC_PROFILER_BUCKET]
314
  Large Block size 4.0 MB
315
  Used Block count 1
316
  Peak Allocated bytes 396 B
317
- ##utp:{"type":"MemoryLeaks","version":2,"phase":"Immediate","time":1692476421690,"processId":23139,"allocatedMemory":1970133,"memoryLabels":[{"Default":9033},{"Permanent":1264},{"NewDelete":12801},{"Thread":34460},{"Manager":10603},{"VertexData":12},{"Geometry":280},{"Texture":16},{"Shader":69173},{"Material":24},{"GfxDevice":35248},{"Animation":304},{"Audio":3976},{"Physics":288},{"Serialization":216},{"Input":9176},{"JobScheduler":200},{"Mono":40},{"ScriptingNativeRuntime":216},{"BaseObject":1609212},{"Resource":592},{"Renderer":1936},{"Transform":48},{"File":800},{"WebCam":24},{"Culling":40},{"Terrain":953},{"Wind":24},{"String":3447},{"DynamicArray":30868},{"HashMap":7680},{"Utility":1360},{"PoolAlloc":1160},{"TypeTree":1792},{"ScriptManager":80},{"RuntimeInitializeOnLoadManager":72},{"SpriteAtlas":112},{"GI":3272},{"Unet":16},{"Director":7760},{"WebRequest":720},{"VR":45473},{"SceneManager":424},{"Video":32},{"LazyScriptCache":32},{"NativeArray":384},{"Camera":25},{"Secure":1},{"SerializationCache":624},{"APIUpdating":5872},{"Subsystems":384},{"VirtualTexturing":57552},{"AssetReference":32}]}
 
1
  Mono path[0] = '/content/ml-agents/training-envs-executables/linux/Pyramids/Pyramids_Data/Managed'
2
  Mono config path = '/content/ml-agents/training-envs-executables/linux/Pyramids/Pyramids_Data/MonoBleedingEdge/etc'
3
  Found 1 interfaces on host : 0) 172.28.0.12
4
+ Multi-casting "[IP] 172.28.0.12 [Port] 55140 [Flags] 2 [Guid] 3084268831 [EditorId] 764847374 [Version] 1048832 [Id] LinuxPlayer(13,172.28.0.12) [Debug] 0 [PackageName] LinuxPlayer [ProjectName] UnityEnvironment" to [225.0.0.222:54997]...
5
  Preloaded 'lib_burst_generated.so'
6
  Preloaded 'libgrpc_csharp_ext.x64.so'
7
  PlayerPrefs - Creating folder: /root/.config/unity3d/Unity Technologies
 
36
  FMOD failed to initialize the output device.: "Error initializing output device. " (60)
37
  FMOD initialized on nosound output
38
  Begin MonoManager ReloadAssembly
39
+ - Completed reload, in 0.134 seconds
40
  ERROR: Shader Sprites/Default shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
41
  ERROR: Shader Sprites/Mask shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
42
  ERROR: Shader Legacy Shaders/VertexLit shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
 
68
  WARNING: Shader Unsupported: 'Standard' - All subshaders removed
69
  WARNING: Shader Did you use #pragma only_renderers and omit this platform?
70
  WARNING: Shader If subshaders removal was intentional, you may have forgotten turning Fallback off?
71
+ UnloadTime: 1.039552 ms
72
  ERROR: Shader UI/Default shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
73
  requesting resize 84 x 84
74
  Setting up 1 worker threads for Enlighten.
 
76
  [ALLOC_TEMP_TLS] TLS Allocator
77
  StackAllocators :
78
  [ALLOC_TEMP_MAIN]
79
+ Peak usage frame count: [16.0 KB-32.0 KB]: 1645 frames, [32.0 KB-64.0 KB]: 13877 frames, [64.0 KB-128.0 KB]: 3247 frames, [2.0 MB-4.0 MB]: 1 frames
80
  Initial Block Size 4.0 MB
81
  Current Block Size 4.0 MB
82
  Peak Allocated Bytes 2.0 MB
 
126
  Current Block Size 64.0 KB
127
  Peak Allocated Bytes 0 B
128
  Overflow Count 0
129
+ [ALLOC_TEMP_Background Job.Worker 2]
130
  Initial Block Size 32.0 KB
131
  Current Block Size 32.0 KB
132
  Peak Allocated Bytes 0 B
 
136
  Current Block Size 32.0 KB
137
  Peak Allocated Bytes 0 B
138
  Overflow Count 0
139
+ [ALLOC_TEMP_Background Job.Worker 15]
140
  Initial Block Size 32.0 KB
141
  Current Block Size 32.0 KB
142
  Peak Allocated Bytes 0 B
 
149
  [ALLOC_TEMP_Profiler.Dispatcher]
150
  Initial Block Size 64.0 KB
151
  Current Block Size 64.0 KB
152
+ Peak Allocated Bytes 240 B
153
  Overflow Count 0
154
  [ALLOC_TEMP_Background Job.Worker 12]
155
  Initial Block Size 32.0 KB
 
186
  Current Block Size 256.0 KB
187
  Peak Allocated Bytes 0.8 KB
188
  Overflow Count 0
189
+ [ALLOC_TEMP_Background Job.Worker 14]
190
  Initial Block Size 32.0 KB
191
  Current Block Size 32.0 KB
192
  Peak Allocated Bytes 0 B
193
  Overflow Count 0
194
+ [ALLOC_TEMP_Background Job.Worker 9]
195
  Initial Block Size 32.0 KB
196
  Current Block Size 32.0 KB
197
  Peak Allocated Bytes 0 B
 
213
  Peak Allocated memory 1.6 MB
214
  Peak Large allocation bytes 0 B
215
  [ALLOC_DEFAULT] Dual Thread Allocator
216
+ Peak main deferred allocation count 13485
217
  [ALLOC_BUCKET]
218
  Large Block size 4.0 MB
219
  Used Block count 1
220
  Peak Allocated bytes 1.7 MB
221
  [ALLOC_DEFAULT_MAIN]
222
+ Peak usage frame count: [8.0 MB-16.0 MB]: 1 frames, [16.0 MB-32.0 MB]: 18769 frames
223
  Requested Block Size 16.0 MB
224
  Peak Block count 3
225
+ Peak Allocated memory 30.0 MB
226
  Peak Large allocation bytes 0 B
227
  [ALLOC_DEFAULT_THREAD]
228
  Peak usage frame count: [16.0 MB-32.0 MB]: 18770 frames
229
  Requested Block Size 16.0 MB
230
  Peak Block count 1
231
+ Peak Allocated memory 23.7 MB
232
  Peak Large allocation bytes 16.0 MB
233
  [ALLOC_TEMP_JOB_1_FRAME]
234
  Initial Block Size 2.0 MB
 
284
  Peak usage frame count: [1.0 MB-2.0 MB]: 18769 frames, [4.0 MB-8.0 MB]: 1 frames
285
  Requested Block Size 4.0 MB
286
  Peak Block count 2
287
+ Peak Allocated memory 4.7 MB
288
  Peak Large allocation bytes 0 B
289
  [ALLOC_TYPETREE] Dual Thread Allocator
290
  Peak main deferred allocation count 0
 
308
  Peak usage frame count: [16.0 KB-32.0 KB]: 18770 frames
309
  Requested Block Size 16.0 MB
310
  Peak Block count 1
311
+ Peak Allocated memory 29.2 KB
312
  Peak Large allocation bytes 0 B
313
  [ALLOC_PROFILER_BUCKET]
314
  Large Block size 4.0 MB
315
  Used Block count 1
316
  Peak Allocated bytes 396 B
317
+ ##utp:{"type":"MemoryLeaks","version":2,"phase":"Immediate","time":1692489868043,"processId":2598,"allocatedMemory":1970133,"memoryLabels":[{"Default":9033},{"Permanent":1264},{"NewDelete":12801},{"Thread":34460},{"Manager":10603},{"VertexData":12},{"Geometry":280},{"Texture":16},{"Shader":69173},{"Material":24},{"GfxDevice":35248},{"Animation":304},{"Audio":3976},{"Physics":288},{"Serialization":216},{"Input":9176},{"JobScheduler":200},{"Mono":40},{"ScriptingNativeRuntime":216},{"BaseObject":1609212},{"Resource":592},{"Renderer":1936},{"Transform":48},{"File":800},{"WebCam":24},{"Culling":40},{"Terrain":953},{"Wind":24},{"String":3447},{"DynamicArray":30868},{"HashMap":7680},{"Utility":1360},{"PoolAlloc":1160},{"TypeTree":1792},{"ScriptManager":80},{"RuntimeInitializeOnLoadManager":72},{"SpriteAtlas":112},{"GI":3272},{"Unet":16},{"Director":7760},{"WebRequest":720},{"VR":45473},{"SceneManager":424},{"Video":32},{"LazyScriptCache":32},{"NativeArray":384},{"Camera":25},{"Secure":1},{"SerializationCache":624},{"APIUpdating":5872},{"Subsystems":384},{"VirtualTexturing":57552},{"AssetReference":32}]}
run_logs/timers.json CHANGED
@@ -2,171 +2,171 @@
2
  "name": "root",
3
  "gauges": {
4
  "Pyramids.Policy.Entropy.mean": {
5
- "value": 0.3176240026950836,
6
- "min": 0.3176240026950836,
7
- "max": 1.4078160524368286,
8
  "count": 33
9
  },
10
  "Pyramids.Policy.Entropy.sum": {
11
- "value": 9625.27734375,
12
- "min": 9625.27734375,
13
- "max": 42707.5078125,
14
  "count": 33
15
  },
16
  "Pyramids.Step.mean": {
17
- "value": 989952.0,
18
- "min": 29934.0,
19
- "max": 989952.0,
20
  "count": 33
21
  },
22
  "Pyramids.Step.sum": {
23
- "value": 989952.0,
24
- "min": 29934.0,
25
- "max": 989952.0,
26
  "count": 33
27
  },
28
  "Pyramids.Policy.ExtrinsicValueEstimate.mean": {
29
- "value": 0.6264662146568298,
30
- "min": -0.07948450744152069,
31
- "max": 0.6330125331878662,
32
  "count": 33
33
  },
34
  "Pyramids.Policy.ExtrinsicValueEstimate.sum": {
35
- "value": 179.16934204101562,
36
- "min": -19.155765533447266,
37
- "max": 179.16934204101562,
38
  "count": 33
39
  },
40
  "Pyramids.Policy.RndValueEstimate.mean": {
41
- "value": -0.010167896747589111,
42
- "min": -0.03611525148153305,
43
- "max": 0.405227929353714,
44
  "count": 33
45
  },
46
  "Pyramids.Policy.RndValueEstimate.sum": {
47
- "value": -2.9080185890197754,
48
- "min": -10.148386001586914,
49
- "max": 96.03901672363281,
50
  "count": 33
51
  },
52
  "Pyramids.Losses.PolicyLoss.mean": {
53
- "value": 0.06750961403633329,
54
- "min": 0.06569960350819658,
55
- "max": 0.07435372414840578,
56
  "count": 33
57
  },
58
  "Pyramids.Losses.PolicyLoss.sum": {
59
- "value": 1.0126442105449993,
60
- "min": 0.47958796095064893,
61
- "max": 1.0785029756370932,
62
  "count": 33
63
  },
64
  "Pyramids.Losses.ValueLoss.mean": {
65
- "value": 0.018250664120260017,
66
- "min": 0.0008854382073355037,
67
- "max": 0.01881936385820643,
68
  "count": 33
69
  },
70
  "Pyramids.Losses.ValueLoss.sum": {
71
- "value": 0.2737599618039003,
72
- "min": 0.008854382073355037,
73
- "max": 0.2737599618039003,
74
  "count": 33
75
  },
76
  "Pyramids.Policy.LearningRate.mean": {
77
- "value": 7.5208174930933344e-06,
78
- "min": 7.5208174930933344e-06,
79
  "max": 0.00029515063018788575,
80
  "count": 33
81
  },
82
  "Pyramids.Policy.LearningRate.sum": {
83
- "value": 0.00011281226239640002,
84
- "min": 0.00011281226239640002,
85
- "max": 0.0037603354465548995,
86
  "count": 33
87
  },
88
  "Pyramids.Policy.Epsilon.mean": {
89
- "value": 0.10250690666666668,
90
- "min": 0.10250690666666668,
91
  "max": 0.19838354285714285,
92
  "count": 33
93
  },
94
  "Pyramids.Policy.Epsilon.sum": {
95
- "value": 1.5376036000000002,
96
- "min": 1.3886848,
97
- "max": 2.6534451,
98
  "count": 33
99
  },
100
  "Pyramids.Policy.Beta.mean": {
101
- "value": 0.00026043997600000006,
102
- "min": 0.00026043997600000006,
103
  "max": 0.00983851593142857,
104
  "count": 33
105
  },
106
  "Pyramids.Policy.Beta.sum": {
107
- "value": 0.003906599640000001,
108
- "min": 0.003906599640000001,
109
- "max": 0.12535916548999998,
110
  "count": 33
111
  },
112
  "Pyramids.Losses.RNDLoss.mean": {
113
- "value": 0.009131926111876965,
114
- "min": 0.009062698110938072,
115
- "max": 0.42660340666770935,
116
  "count": 33
117
  },
118
  "Pyramids.Losses.RNDLoss.sum": {
119
- "value": 0.1369788944721222,
120
- "min": 0.1268777698278427,
121
- "max": 2.9862239360809326,
122
  "count": 33
123
  },
124
  "Pyramids.Environment.EpisodeLength.mean": {
125
- "value": 274.2,
126
- "min": 274.2,
127
- "max": 991.8235294117648,
128
  "count": 33
129
  },
130
  "Pyramids.Environment.EpisodeLength.sum": {
131
- "value": 28791.0,
132
- "min": 16861.0,
133
- "max": 32631.0,
134
  "count": 33
135
  },
136
  "Pyramids.Environment.CumulativeReward.mean": {
137
- "value": 1.7257999879973276,
138
- "min": -0.9275563033297658,
139
- "max": 1.7257999879973276,
140
  "count": 33
141
  },
142
  "Pyramids.Environment.CumulativeReward.sum": {
143
- "value": 181.2089987397194,
144
- "min": -29.681801706552505,
145
- "max": 181.2089987397194,
146
  "count": 33
147
  },
148
  "Pyramids.Policy.ExtrinsicReward.mean": {
149
- "value": 1.7257999879973276,
150
- "min": -0.9275563033297658,
151
- "max": 1.7257999879973276,
152
  "count": 33
153
  },
154
  "Pyramids.Policy.ExtrinsicReward.sum": {
155
- "value": 181.2089987397194,
156
- "min": -29.681801706552505,
157
- "max": 181.2089987397194,
158
  "count": 33
159
  },
160
  "Pyramids.Policy.RndReward.mean": {
161
- "value": 0.02603166069444948,
162
- "min": 0.02603166069444948,
163
- "max": 9.20870790674406,
164
  "count": 33
165
  },
166
  "Pyramids.Policy.RndReward.sum": {
167
- "value": 2.7333243729171954,
168
- "min": 2.7333243729171954,
169
- "max": 156.548034414649,
170
  "count": 33
171
  },
172
  "Pyramids.IsTraining.mean": {
@@ -184,7 +184,7 @@
184
  },
185
  "metadata": {
186
  "timer_format_version": "0.1.0",
187
- "start_time_seconds": "1692474035",
188
  "python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
189
  "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
190
  "mlagents_version": "0.31.0.dev0",
@@ -192,59 +192,59 @@
192
  "communication_protocol_version": "1.5.0",
193
  "pytorch_version": "1.11.0+cu102",
194
  "numpy_version": "1.21.2",
195
- "end_time_seconds": "1692476421"
196
  },
197
- "total": 2385.7635191300005,
198
  "count": 1,
199
- "self": 1.1398265700017873,
200
  "children": {
201
  "run_training.setup": {
202
- "total": 0.039800353999453364,
203
  "count": 1,
204
- "self": 0.039800353999453364
205
  },
206
  "TrainerController.start_learning": {
207
- "total": 2384.5838922059993,
208
  "count": 1,
209
- "self": 1.43175545304166,
210
  "children": {
211
  "TrainerController._reset_env": {
212
- "total": 4.028379483999743,
213
  "count": 1,
214
- "self": 4.028379483999743
215
  },
216
  "TrainerController.advance": {
217
- "total": 2378.9574415559573,
218
- "count": 64200,
219
- "self": 1.5593161039860206,
220
  "children": {
221
  "env_step": {
222
- "total": 1696.1974868390162,
223
- "count": 64200,
224
- "self": 1581.1558944628578,
225
  "children": {
226
  "SubprocessEnvManager._take_step": {
227
- "total": 114.15801947410091,
228
- "count": 64200,
229
- "self": 4.988137964079215,
230
  "children": {
231
  "TorchPolicy.evaluate": {
232
- "total": 109.1698815100217,
233
- "count": 62565,
234
- "self": 109.1698815100217
235
  }
236
  }
237
  },
238
  "workers": {
239
- "total": 0.8835729020574945,
240
- "count": 64200,
241
  "self": 0.0,
242
  "children": {
243
  "worker_root": {
244
- "total": 2378.8353249059937,
245
- "count": 64200,
246
  "is_parallel": true,
247
- "self": 920.0597535019606,
248
  "children": {
249
  "run_training.setup": {
250
  "total": 0.0,
@@ -253,48 +253,48 @@
253
  "self": 0.0,
254
  "children": {
255
  "steps_from_proto": {
256
- "total": 0.002013928999986092,
257
  "count": 1,
258
  "is_parallel": true,
259
- "self": 0.000578698000026634,
260
  "children": {
261
  "_process_rank_one_or_two_observation": {
262
- "total": 0.0014352309999594581,
263
  "count": 8,
264
  "is_parallel": true,
265
- "self": 0.0014352309999594581
266
  }
267
  }
268
  },
269
  "UnityEnvironment.step": {
270
- "total": 0.053157956000177364,
271
  "count": 1,
272
  "is_parallel": true,
273
- "self": 0.0006968959996811463,
274
  "children": {
275
  "UnityEnvironment._generate_step_input": {
276
- "total": 0.0006791390005673748,
277
  "count": 1,
278
  "is_parallel": true,
279
- "self": 0.0006791390005673748
280
  },
281
  "communicator.exchange": {
282
- "total": 0.049708182999893324,
283
  "count": 1,
284
  "is_parallel": true,
285
- "self": 0.049708182999893324
286
  },
287
  "steps_from_proto": {
288
- "total": 0.0020737380000355188,
289
  "count": 1,
290
  "is_parallel": true,
291
- "self": 0.0003893570001309854,
292
  "children": {
293
  "_process_rank_one_or_two_observation": {
294
- "total": 0.0016843809999045334,
295
  "count": 8,
296
  "is_parallel": true,
297
- "self": 0.0016843809999045334
298
  }
299
  }
300
  }
@@ -303,34 +303,34 @@
303
  }
304
  },
305
  "UnityEnvironment.step": {
306
- "total": 1458.7755714040331,
307
- "count": 64199,
308
  "is_parallel": true,
309
- "self": 35.262679826835665,
310
  "children": {
311
  "UnityEnvironment._generate_step_input": {
312
- "total": 24.55862793507731,
313
- "count": 64199,
314
  "is_parallel": true,
315
- "self": 24.55862793507731
316
  },
317
  "communicator.exchange": {
318
- "total": 1285.7376632601145,
319
- "count": 64199,
320
  "is_parallel": true,
321
- "self": 1285.7376632601145
322
  },
323
  "steps_from_proto": {
324
- "total": 113.21660038200571,
325
- "count": 64199,
326
  "is_parallel": true,
327
- "self": 22.551804732842356,
328
  "children": {
329
  "_process_rank_one_or_two_observation": {
330
- "total": 90.66479564916335,
331
- "count": 513592,
332
  "is_parallel": true,
333
- "self": 90.66479564916335
334
  }
335
  }
336
  }
@@ -343,31 +343,31 @@
343
  }
344
  },
345
  "trainer_advance": {
346
- "total": 681.2006386129551,
347
- "count": 64200,
348
- "self": 2.826157913771567,
349
  "children": {
350
  "process_trajectory": {
351
- "total": 118.92526791418186,
352
- "count": 64200,
353
- "self": 118.63489813118122,
354
  "children": {
355
  "RLTrainer._checkpoint": {
356
- "total": 0.2903697830006422,
357
  "count": 2,
358
- "self": 0.2903697830006422
359
  }
360
  }
361
  },
362
  "_update_policy": {
363
- "total": 559.4492127850017,
364
- "count": 459,
365
- "self": 364.9500068950574,
366
  "children": {
367
  "TorchPPOOptimizer.update": {
368
- "total": 194.49920588994428,
369
- "count": 22845,
370
- "self": 194.49920588994428
371
  }
372
  }
373
  }
@@ -376,19 +376,19 @@
376
  }
377
  },
378
  "trainer_threads": {
379
- "total": 1.4260003808885813e-06,
380
  "count": 1,
381
- "self": 1.4260003808885813e-06
382
  },
383
  "TrainerController._save_models": {
384
- "total": 0.16631428700020479,
385
  "count": 1,
386
- "self": 0.001997520999793778,
387
  "children": {
388
  "RLTrainer._checkpoint": {
389
- "total": 0.164316766000411,
390
  "count": 1,
391
- "self": 0.164316766000411
392
  }
393
  }
394
  }
 
2
  "name": "root",
3
  "gauges": {
4
  "Pyramids.Policy.Entropy.mean": {
5
+ "value": 0.32608816027641296,
6
+ "min": 0.3260343372821808,
7
+ "max": 1.4834710359573364,
8
  "count": 33
9
  },
10
  "Pyramids.Policy.Entropy.sum": {
11
+ "value": 9860.90625,
12
+ "min": 9629.75,
13
+ "max": 45002.578125,
14
  "count": 33
15
  },
16
  "Pyramids.Step.mean": {
17
+ "value": 989958.0,
18
+ "min": 29952.0,
19
+ "max": 989958.0,
20
  "count": 33
21
  },
22
  "Pyramids.Step.sum": {
23
+ "value": 989958.0,
24
+ "min": 29952.0,
25
+ "max": 989958.0,
26
  "count": 33
27
  },
28
  "Pyramids.Policy.ExtrinsicValueEstimate.mean": {
29
+ "value": 0.4889722764492035,
30
+ "min": -0.0827411636710167,
31
+ "max": 0.5884976387023926,
32
  "count": 33
33
  },
34
  "Pyramids.Policy.ExtrinsicValueEstimate.sum": {
35
+ "value": 133.00045776367188,
36
+ "min": -19.94062042236328,
37
+ "max": 160.07135009765625,
38
  "count": 33
39
  },
40
  "Pyramids.Policy.RndValueEstimate.mean": {
41
+ "value": -0.018497852608561516,
42
+ "min": -0.018497852608561516,
43
+ "max": 0.26822128891944885,
44
  "count": 33
45
  },
46
  "Pyramids.Policy.RndValueEstimate.sum": {
47
+ "value": -5.031415939331055,
48
+ "min": -5.031415939331055,
49
+ "max": 64.37310791015625,
50
  "count": 33
51
  },
52
  "Pyramids.Losses.PolicyLoss.mean": {
53
+ "value": 0.06846923154430837,
54
+ "min": 0.06331308550439078,
55
+ "max": 0.0734606170154934,
56
  "count": 33
57
  },
58
  "Pyramids.Losses.PolicyLoss.sum": {
59
+ "value": 0.9585692416203172,
60
+ "min": 0.4967418422241061,
61
+ "max": 1.0548762422857931,
62
  "count": 33
63
  },
64
  "Pyramids.Losses.ValueLoss.mean": {
65
+ "value": 0.01632231401597265,
66
+ "min": 0.0006867401268466135,
67
+ "max": 0.017558857621283484,
68
  "count": 33
69
  },
70
  "Pyramids.Losses.ValueLoss.sum": {
71
+ "value": 0.22851239622361713,
72
+ "min": 0.007554141395312749,
73
+ "max": 0.24582400669796875,
74
  "count": 33
75
  },
76
  "Pyramids.Policy.LearningRate.mean": {
77
+ "value": 7.700476004635716e-06,
78
+ "min": 7.700476004635716e-06,
79
  "max": 0.00029515063018788575,
80
  "count": 33
81
  },
82
  "Pyramids.Policy.LearningRate.sum": {
83
+ "value": 0.00010780666406490002,
84
+ "min": 0.00010780666406490002,
85
+ "max": 0.003256397314534299,
86
  "count": 33
87
  },
88
  "Pyramids.Policy.Epsilon.mean": {
89
+ "value": 0.10256679285714286,
90
+ "min": 0.10256679285714286,
91
  "max": 0.19838354285714285,
92
  "count": 33
93
  },
94
  "Pyramids.Policy.Epsilon.sum": {
95
+ "value": 1.4359351,
96
+ "min": 1.3691136000000002,
97
+ "max": 2.4840998,
98
  "count": 33
99
  },
100
  "Pyramids.Policy.Beta.mean": {
101
+ "value": 0.0002664226064285716,
102
+ "min": 0.0002664226064285716,
103
  "max": 0.00983851593142857,
104
  "count": 33
105
  },
106
  "Pyramids.Policy.Beta.sum": {
107
+ "value": 0.0037299164900000017,
108
+ "min": 0.0037299164900000017,
109
+ "max": 0.10856802343,
110
  "count": 33
111
  },
112
  "Pyramids.Losses.RNDLoss.mean": {
113
+ "value": 0.010939222760498524,
114
+ "min": 0.010939222760498524,
115
+ "max": 0.30432891845703125,
116
  "count": 33
117
  },
118
  "Pyramids.Losses.RNDLoss.sum": {
119
+ "value": 0.15314911305904388,
120
+ "min": 0.15314911305904388,
121
+ "max": 2.1303024291992188,
122
  "count": 33
123
  },
124
  "Pyramids.Environment.EpisodeLength.mean": {
125
+ "value": 365.175,
126
+ "min": 328.57471264367814,
127
+ "max": 999.0,
128
  "count": 33
129
  },
130
  "Pyramids.Environment.EpisodeLength.sum": {
131
+ "value": 29214.0,
132
+ "min": 15984.0,
133
+ "max": 32773.0,
134
  "count": 33
135
  },
136
  "Pyramids.Environment.CumulativeReward.mean": {
137
+ "value": 1.5848124746233225,
138
+ "min": -1.0000000521540642,
139
+ "max": 1.6680941045284272,
140
  "count": 33
141
  },
142
  "Pyramids.Environment.CumulativeReward.sum": {
143
+ "value": 126.7849979698658,
144
+ "min": -32.000001668930054,
145
+ "max": 142.0559982061386,
146
  "count": 33
147
  },
148
  "Pyramids.Policy.ExtrinsicReward.mean": {
149
+ "value": 1.5848124746233225,
150
+ "min": -1.0000000521540642,
151
+ "max": 1.6680941045284272,
152
  "count": 33
153
  },
154
  "Pyramids.Policy.ExtrinsicReward.sum": {
155
+ "value": 126.7849979698658,
156
+ "min": -32.000001668930054,
157
+ "max": 142.0559982061386,
158
  "count": 33
159
  },
160
  "Pyramids.Policy.RndReward.mean": {
161
+ "value": 0.04148480015937821,
162
+ "min": 0.040637715126858785,
163
+ "max": 5.9434097283519804,
164
  "count": 33
165
  },
166
  "Pyramids.Policy.RndReward.sum": {
167
+ "value": 3.318784012750257,
168
+ "min": 3.318784012750257,
169
+ "max": 95.09455565363169,
170
  "count": 33
171
  },
172
  "Pyramids.IsTraining.mean": {
 
184
  },
185
  "metadata": {
186
  "timer_format_version": "0.1.0",
187
+ "start_time_seconds": "1692487452",
188
  "python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
189
  "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
190
  "mlagents_version": "0.31.0.dev0",
 
192
  "communication_protocol_version": "1.5.0",
193
  "pytorch_version": "1.11.0+cu102",
194
  "numpy_version": "1.21.2",
195
+ "end_time_seconds": "1692489868"
196
  },
197
+ "total": 2415.498542838,
198
  "count": 1,
199
+ "self": 0.495236699000543,
200
  "children": {
201
  "run_training.setup": {
202
+ "total": 0.07260470699998223,
203
  "count": 1,
204
+ "self": 0.07260470699998223
205
  },
206
  "TrainerController.start_learning": {
207
+ "total": 2414.9307014319998,
208
  "count": 1,
209
+ "self": 1.4973012879968337,
210
  "children": {
211
  "TrainerController._reset_env": {
212
+ "total": 4.916503325000008,
213
  "count": 1,
214
+ "self": 4.916503325000008
215
  },
216
  "TrainerController.advance": {
217
+ "total": 2408.4185666210033,
218
+ "count": 63863,
219
+ "self": 1.5154665369973372,
220
  "children": {
221
  "env_step": {
222
+ "total": 1701.9834163420128,
223
+ "count": 63863,
224
+ "self": 1583.2717276829908,
225
  "children": {
226
  "SubprocessEnvManager._take_step": {
227
+ "total": 117.76041172798875,
228
+ "count": 63863,
229
+ "self": 4.990111474026207,
230
  "children": {
231
  "TorchPolicy.evaluate": {
232
+ "total": 112.77030025396255,
233
+ "count": 62566,
234
+ "self": 112.77030025396255
235
  }
236
  }
237
  },
238
  "workers": {
239
+ "total": 0.9512769310332487,
240
+ "count": 63863,
241
  "self": 0.0,
242
  "children": {
243
  "worker_root": {
244
+ "total": 2409.2957030169414,
245
+ "count": 63863,
246
  "is_parallel": true,
247
+ "self": 950.3409683858717,
248
  "children": {
249
  "run_training.setup": {
250
  "total": 0.0,
 
253
  "self": 0.0,
254
  "children": {
255
  "steps_from_proto": {
256
+ "total": 0.006422931000088283,
257
  "count": 1,
258
  "is_parallel": true,
259
+ "self": 0.004851466000332039,
260
  "children": {
261
  "_process_rank_one_or_two_observation": {
262
+ "total": 0.0015714649997562447,
263
  "count": 8,
264
  "is_parallel": true,
265
+ "self": 0.0015714649997562447
266
  }
267
  }
268
  },
269
  "UnityEnvironment.step": {
270
+ "total": 0.04976937899994027,
271
  "count": 1,
272
  "is_parallel": true,
273
+ "self": 0.0006456529999923077,
274
  "children": {
275
  "UnityEnvironment._generate_step_input": {
276
+ "total": 0.0005717809999623569,
277
  "count": 1,
278
  "is_parallel": true,
279
+ "self": 0.0005717809999623569
280
  },
281
  "communicator.exchange": {
282
+ "total": 0.04651998499991805,
283
  "count": 1,
284
  "is_parallel": true,
285
+ "self": 0.04651998499991805
286
  },
287
  "steps_from_proto": {
288
+ "total": 0.0020319600000675564,
289
  "count": 1,
290
  "is_parallel": true,
291
+ "self": 0.00039121600002545165,
292
  "children": {
293
  "_process_rank_one_or_two_observation": {
294
+ "total": 0.0016407440000421047,
295
  "count": 8,
296
  "is_parallel": true,
297
+ "self": 0.0016407440000421047
298
  }
299
  }
300
  }
 
303
  }
304
  },
305
  "UnityEnvironment.step": {
306
+ "total": 1458.9547346310696,
307
+ "count": 63862,
308
  "is_parallel": true,
309
+ "self": 36.96907865810954,
310
  "children": {
311
  "UnityEnvironment._generate_step_input": {
312
+ "total": 24.42175522797504,
313
+ "count": 63862,
314
  "is_parallel": true,
315
+ "self": 24.42175522797504
316
  },
317
  "communicator.exchange": {
318
+ "total": 1283.5673147770228,
319
+ "count": 63862,
320
  "is_parallel": true,
321
+ "self": 1283.5673147770228
322
  },
323
  "steps_from_proto": {
324
+ "total": 113.99658596796235,
325
+ "count": 63862,
326
  "is_parallel": true,
327
+ "self": 22.70101223880124,
328
  "children": {
329
  "_process_rank_one_or_two_observation": {
330
+ "total": 91.2955737291611,
331
+ "count": 510896,
332
  "is_parallel": true,
333
+ "self": 91.2955737291611
334
  }
335
  }
336
  }
 
343
  }
344
  },
345
  "trainer_advance": {
346
+ "total": 704.9196837419933,
347
+ "count": 63863,
348
+ "self": 2.9158751109690684,
349
  "children": {
350
  "process_trajectory": {
351
+ "total": 117.81247502202996,
352
+ "count": 63863,
353
+ "self": 117.58268309902951,
354
  "children": {
355
  "RLTrainer._checkpoint": {
356
+ "total": 0.22979192300044815,
357
  "count": 2,
358
+ "self": 0.22979192300044815
359
  }
360
  }
361
  },
362
  "_update_policy": {
363
+ "total": 584.1913336089942,
364
+ "count": 443,
365
+ "self": 383.9529126359703,
366
  "children": {
367
  "TorchPPOOptimizer.update": {
368
+ "total": 200.23842097302395,
369
+ "count": 22812,
370
+ "self": 200.23842097302395
371
  }
372
  }
373
  }
 
376
  }
377
  },
378
  "trainer_threads": {
379
+ "total": 1.1539996194187552e-06,
380
  "count": 1,
381
+ "self": 1.1539996194187552e-06
382
  },
383
  "TrainerController._save_models": {
384
+ "total": 0.09832904399991094,
385
  "count": 1,
386
+ "self": 0.0014845659998172778,
387
  "children": {
388
  "RLTrainer._checkpoint": {
389
+ "total": 0.09684447800009366,
390
  "count": 1,
391
+ "self": 0.09684447800009366
392
  }
393
  }
394
  }
run_logs/training_status.json CHANGED
@@ -2,40 +2,40 @@
2
  "Pyramids": {
3
  "checkpoints": [
4
  {
5
- "steps": 499896,
6
- "file_path": "results/Pyramids Training/Pyramids/Pyramids-499896.onnx",
7
- "reward": 1.6084999814629555,
8
- "creation_time": 1692475096.5741851,
9
  "auxillary_file_paths": [
10
- "results/Pyramids Training/Pyramids/Pyramids-499896.pt"
11
  ]
12
  },
13
  {
14
- "steps": 999983,
15
- "file_path": "results/Pyramids Training/Pyramids/Pyramids-999983.onnx",
16
- "reward": null,
17
- "creation_time": 1692476420.4213278,
18
  "auxillary_file_paths": [
19
- "results/Pyramids Training/Pyramids/Pyramids-999983.pt"
20
  ]
21
  },
22
  {
23
- "steps": 1000111,
24
- "file_path": "results/Pyramids Training/Pyramids/Pyramids-1000111.onnx",
25
- "reward": null,
26
- "creation_time": 1692476420.604002,
27
  "auxillary_file_paths": [
28
- "results/Pyramids Training/Pyramids/Pyramids-1000111.pt"
29
  ]
30
  }
31
  ],
32
  "final_checkpoint": {
33
- "steps": 1000111,
34
  "file_path": "results/Pyramids Training/Pyramids.onnx",
35
- "reward": null,
36
- "creation_time": 1692476420.604002,
37
  "auxillary_file_paths": [
38
- "results/Pyramids Training/Pyramids/Pyramids-1000111.pt"
39
  ]
40
  }
41
  },
 
2
  "Pyramids": {
3
  "checkpoints": [
4
  {
5
+ "steps": 499963,
6
+ "file_path": "results/Pyramids Training/Pyramids/Pyramids-499963.onnx",
7
+ "reward": 1.2039999663829803,
8
+ "creation_time": 1692488572.7203572,
9
  "auxillary_file_paths": [
10
+ "results/Pyramids Training/Pyramids/Pyramids-499963.pt"
11
  ]
12
  },
13
  {
14
+ "steps": 999970,
15
+ "file_path": "results/Pyramids Training/Pyramids/Pyramids-999970.onnx",
16
+ "reward": 1.7237499617040157,
17
+ "creation_time": 1692489867.5082932,
18
  "auxillary_file_paths": [
19
+ "results/Pyramids Training/Pyramids/Pyramids-999970.pt"
20
  ]
21
  },
22
  {
23
+ "steps": 1000093,
24
+ "file_path": "results/Pyramids Training/Pyramids/Pyramids-1000093.onnx",
25
+ "reward": 1.70339997112751,
26
+ "creation_time": 1692489867.6160803,
27
  "auxillary_file_paths": [
28
+ "results/Pyramids Training/Pyramids/Pyramids-1000093.pt"
29
  ]
30
  }
31
  ],
32
  "final_checkpoint": {
33
+ "steps": 1000093,
34
  "file_path": "results/Pyramids Training/Pyramids.onnx",
35
+ "reward": 1.70339997112751,
36
+ "creation_time": 1692489867.6160803,
37
  "auxillary_file_paths": [
38
+ "results/Pyramids Training/Pyramids/Pyramids-1000093.pt"
39
  ]
40
  }
41
  },