dbaibak commited on
Commit
0bc779f
β€’
1 Parent(s): fb9107b

Adjusted parameters

Browse files
Pyramids.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d9bb3a8af92f4b133d0472f540d1acfb2b47091353cea77aa845388a6350fc40
3
  size 1417437
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2202ce134f5c363d9184eea66cede604f3ce94f889c109cf5fd6682776346c4d
3
  size 1417437
Pyramids/{Pyramids-1000064.onnx β†’ Pyramids-1499928.onnx} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d9bb3a8af92f4b133d0472f540d1acfb2b47091353cea77aa845388a6350fc40
3
  size 1417437
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41737e9c5bca314c9e1dd391758a68b6272ac4c2a015b4c0cb836856ba20b352
3
  size 1417437
Pyramids/{Pyramids-499927.pt β†’ Pyramids-1499928.pt} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:16d8552a5e7a8b68da10eb66394601983424046f43ea81671151cf5996464ee0
3
  size 8652382
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5289d0513b8a9e09e238629b330663cca608e437729bad46f2e9d1f0dd422afb
3
  size 8652382
Pyramids/{Pyramids-999985.onnx β†’ Pyramids-1999927.onnx} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d9bb3a8af92f4b133d0472f540d1acfb2b47091353cea77aa845388a6350fc40
3
  size 1417437
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f811e4f96b315f4bd89a9bc3dd8204d85acae0e5535cb75f2969d5f22f28e6c6
3
  size 1417437
Pyramids/{Pyramids-1000064.pt β†’ Pyramids-1999927.pt} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1510b7f4f6d2ae1d80d5cc71f566164e0f9217d455fd309b4a55a184b7829cc3
3
  size 8652382
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d8bfee5b34d846f32b77ab23eb9a0c9791abf13e74105fc62ea6a258331cb63
3
  size 8652382
Pyramids/{Pyramids-499927.onnx β†’ Pyramids-2499964.onnx} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a2a4c6e40d2db09df89c511ea1922b92a458b94f05f907b422a010373d0fcf54
3
  size 1417437
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4307a8ab5b640d5a30e0da5feb352ff6c526ddd93e4b52e4c0748aea95da8f25
3
  size 1417437
Pyramids/{Pyramids-999985.pt β†’ Pyramids-2499964.pt} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:729fc69d87a27f81df37a7bc0fae032575223de65fe2439243fad4c7134b6c08
3
  size 8652382
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4823423adabc6c3afc97771af33e36a38066633c73befaab9d8ce977a96ede9f
3
  size 8652382
Pyramids/{Pyramids-24192.pt β†’ Pyramids-2999981.onnx} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9565db4d50e0dbef463f52b139b5a5974a8da3ef61c0c825f71958641df9c393
3
- size 241
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2202ce134f5c363d9184eea66cede604f3ce94f889c109cf5fd6682776346c4d
3
+ size 1417437
Pyramids/Pyramids-2999981.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94b817aa1a046a3b3774bd1ebc56f7eaa15725cf78b89f623a8b62654fb3dd33
3
+ size 8652382
Pyramids/Pyramids-3000109.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2202ce134f5c363d9184eea66cede604f3ce94f889c109cf5fd6682776346c4d
3
+ size 1417437
Pyramids/Pyramids-3000109.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:065a50526f49c5eb5973cf6b260552c8948f11ce55199be31377c58b6155621c
3
+ size 8652382
Pyramids/checkpoint.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1510b7f4f6d2ae1d80d5cc71f566164e0f9217d455fd309b4a55a184b7829cc3
3
  size 8652382
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:065a50526f49c5eb5973cf6b260552c8948f11ce55199be31377c58b6155621c
3
  size 8652382
Pyramids/{events.out.tfevents.1673709222.cc33aa7b43a4.2690.0 β†’ events.out.tfevents.1673783629.1621ca8d36da.20573.0} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:91ae147a256e4ebfa4bb99cf7e0caa4c1595f56d41528d306386ddeacdd43bce
3
- size 323677
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7c3b91917158977ea5ee166bc813f677f3cf241556c89f185133a78119bc9a9
3
+ size 998771
config.json CHANGED
@@ -1 +1 @@
1
- {"default_settings": null, "behaviors": {"Pyramids": {"trainer_type": "ppo", "hyperparameters": {"batch_size": 128, "buffer_size": 2048, "learning_rate": 0.0003, "beta": 0.01, "epsilon": 0.2, "lambd": 0.95, "num_epoch": 3, "learning_rate_schedule": "linear", "beta_schedule": "linear", "epsilon_schedule": "linear"}, "network_settings": {"normalize": false, "hidden_units": 512, "num_layers": 2, "vis_encode_type": "simple", "memory": null, "goal_conditioning_type": "hyper", "deterministic": false}, "reward_signals": {"extrinsic": {"gamma": 0.99, "strength": 1.0, "network_settings": {"normalize": false, "hidden_units": 128, "num_layers": 2, "vis_encode_type": "simple", "memory": null, "goal_conditioning_type": "hyper", "deterministic": false}}, "rnd": {"gamma": 0.99, "strength": 0.01, "network_settings": {"normalize": false, "hidden_units": 64, "num_layers": 3, "vis_encode_type": "simple", "memory": null, "goal_conditioning_type": "hyper", "deterministic": false}, "learning_rate": 0.0001, "encoding_size": null}}, "init_path": null, "keep_checkpoints": 5, "checkpoint_interval": 500000, "max_steps": 1000000, "time_horizon": 128, "summary_freq": 30000, "threaded": false, "self_play": null, "behavioral_cloning": null}}, "env_settings": {"env_path": "./training-envs-executables/linux/Pyramids/Pyramids", "env_args": null, "base_port": 5005, "num_envs": 1, "num_areas": 1, "seed": -1, "max_lifetime_restarts": 10, "restarts_rate_limit_n": 1, "restarts_rate_limit_period_s": 60}, "engine_settings": {"width": 84, "height": 84, "quality_level": 5, "time_scale": 20, "target_frame_rate": -1, "capture_frame_rate": 60, "no_graphics": true}, "environment_parameters": null, "checkpoint_settings": {"run_id": "Pyramids Training", "initialize_from": null, "load_model": false, "resume": false, "force": true, "train_model": false, "inference": false, "results_dir": "results"}, "torch_settings": {"device": null}, "debug": false}
1
+ {"default_settings": null, "behaviors": {"Pyramids": {"trainer_type": "ppo", "hyperparameters": {"batch_size": 256, "buffer_size": 2048, "learning_rate": 0.0001, "beta": 0.01, "epsilon": 0.2, "lambd": 0.95, "num_epoch": 5, "learning_rate_schedule": "linear", "beta_schedule": "linear", "epsilon_schedule": "linear"}, "network_settings": {"normalize": false, "hidden_units": 512, "num_layers": 2, "vis_encode_type": "simple", "memory": null, "goal_conditioning_type": "hyper", "deterministic": false}, "reward_signals": {"extrinsic": {"gamma": 0.99, "strength": 1.0, "network_settings": {"normalize": false, "hidden_units": 128, "num_layers": 2, "vis_encode_type": "simple", "memory": null, "goal_conditioning_type": "hyper", "deterministic": false}}, "rnd": {"gamma": 0.99, "strength": 0.01, "network_settings": {"normalize": false, "hidden_units": 64, "num_layers": 3, "vis_encode_type": "simple", "memory": null, "goal_conditioning_type": "hyper", "deterministic": false}, "learning_rate": 0.0001, "encoding_size": null}}, "init_path": null, "keep_checkpoints": 5, "checkpoint_interval": 500000, "max_steps": 3000000, "time_horizon": 128, "summary_freq": 30000, "threaded": false, "self_play": null, "behavioral_cloning": null}}, "env_settings": {"env_path": "./training-envs-executables/linux/Pyramids/Pyramids", "env_args": null, "base_port": 5005, "num_envs": 1, "num_areas": 1, "seed": -1, "max_lifetime_restarts": 10, "restarts_rate_limit_n": 1, "restarts_rate_limit_period_s": 60}, "engine_settings": {"width": 84, "height": 84, "quality_level": 5, "time_scale": 20, "target_frame_rate": -1, "capture_frame_rate": 60, "no_graphics": true}, "environment_parameters": null, "checkpoint_settings": {"run_id": "Pyramids Training", "initialize_from": null, "load_model": false, "resume": false, "force": true, "train_model": false, "inference": false, "results_dir": "results"}, "torch_settings": {"device": null}, "debug": false}
configuration.yaml CHANGED
@@ -3,13 +3,13 @@ behaviors:
3
  Pyramids:
4
  trainer_type: ppo
5
  hyperparameters:
6
- batch_size: 128
7
  buffer_size: 2048
8
- learning_rate: 0.0003
9
  beta: 0.01
10
  epsilon: 0.2
11
  lambd: 0.95
12
- num_epoch: 3
13
  learning_rate_schedule: linear
14
  beta_schedule: linear
15
  epsilon_schedule: linear
@@ -49,7 +49,7 @@ behaviors:
49
  init_path: null
50
  keep_checkpoints: 5
51
  checkpoint_interval: 500000
52
- max_steps: 1000000
53
  time_horizon: 128
54
  summary_freq: 30000
55
  threaded: false
3
  Pyramids:
4
  trainer_type: ppo
5
  hyperparameters:
6
+ batch_size: 256
7
  buffer_size: 2048
8
+ learning_rate: 0.0001
9
  beta: 0.01
10
  epsilon: 0.2
11
  lambd: 0.95
12
+ num_epoch: 5
13
  learning_rate_schedule: linear
14
  beta_schedule: linear
15
  epsilon_schedule: linear
49
  init_path: null
50
  keep_checkpoints: 5
51
  checkpoint_interval: 500000
52
+ max_steps: 3000000
53
  time_horizon: 128
54
  summary_freq: 30000
55
  threaded: false
run_logs/Player-0.log CHANGED
@@ -1,9 +1,12 @@
1
  Mono path[0] = '/content/ml-agents/training-envs-executables/linux/Pyramids/Pyramids_Data/Managed'
2
  Mono config path = '/content/ml-agents/training-envs-executables/linux/Pyramids/Pyramids_Data/MonoBleedingEdge/etc'
3
  Found 1 interfaces on host : 0) 172.28.0.2
4
- Multi-casting "[IP] 172.28.0.2 [Port] 55055 [Flags] 2 [Guid] 364859310 [EditorId] 764847374 [Version] 1048832 [Id] LinuxPlayer(13,172.28.0.2) [Debug] 0 [PackageName] LinuxPlayer [ProjectName] UnityEnvironment" to [225.0.0.222:54997]...
5
  Preloaded 'lib_burst_generated.so'
6
  Preloaded 'libgrpc_csharp_ext.x64.so'
 
 
 
7
  Initialize engine version: 2021.3.5f1 (40eb3a945986)
8
  [Subsystems] Discovering subsystems at path /content/ml-agents/training-envs-executables/linux/Pyramids/Pyramids_Data/UnitySubsystems
9
  Forcing GfxDevice: Null
@@ -33,7 +36,7 @@ ALSA lib pcm.c:2495:(snd_pcm_open_noupdate) Unknown PCM default
33
  FMOD failed to initialize the output device.: "Error initializing output device. " (60)
34
  FMOD initialized on nosound output
35
  Begin MonoManager ReloadAssembly
36
- - Completed reload, in 0.081 seconds
37
  ERROR: Shader Sprites/Default shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
38
  ERROR: Shader Sprites/Mask shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
39
  ERROR: Shader Legacy Shaders/VertexLit shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
@@ -65,7 +68,7 @@ ERROR: Shader Standard shader is not supported on this GPU (none of subshaders/f
65
  WARNING: Shader Unsupported: 'Standard' - All subshaders removed
66
  WARNING: Shader Did you use #pragma only_renderers and omit this platform?
67
  WARNING: Shader If subshaders removal was intentional, you may have forgotten turning Fallback off?
68
- UnloadTime: 0.967208 ms
69
  ERROR: Shader UI/Default shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
70
  requesting resize 84 x 84
71
  Setting up 1 worker threads for Enlighten.
@@ -73,7 +76,7 @@ PlayerConnection::CleanupMemory Statistics:
73
  [ALLOC_TEMP_TLS] TLS Allocator
74
  StackAllocators :
75
  [ALLOC_TEMP_MAIN]
76
- Peak usage frame count: [16.0 KB-32.0 KB]: 2910 frames, [32.0 KB-64.0 KB]: 13103 frames, [64.0 KB-128.0 KB]: 2755 frames, [2.0 MB-4.0 MB]: 1 frames
77
  Initial Block Size 4.0 MB
78
  Current Block Size 4.0 MB
79
  Peak Allocated Bytes 2.0 MB
@@ -204,28 +207,28 @@ PlayerConnection::CleanupMemory Statistics:
204
  Peak Allocated Bytes 128 B
205
  Overflow Count 0
206
  [ALLOC_MEMORYPROFILER]
207
- Peak usage frame count: [0.5 MB-1.0 MB]: 299 frames, [1.0 MB-2.0 MB]: 18470 frames
208
  Requested Block Size 1.0 MB
209
  Peak Block count 2
210
  Peak Allocated memory 1.6 MB
211
  Peak Large allocation bytes 0 B
212
  [ALLOC_DEFAULT] Dual Thread Allocator
213
- Peak main deferred allocation count 12624
214
  [ALLOC_BUCKET]
215
  Large Block size 4.0 MB
216
  Used Block count 1
217
  Peak Allocated bytes 1.6 MB
218
  [ALLOC_DEFAULT_MAIN]
219
- Peak usage frame count: [8.0 MB-16.0 MB]: 1 frames, [16.0 MB-32.0 MB]: 18767 frames, [32.0 MB-64.0 MB]: 1 frames
220
  Requested Block Size 16.0 MB
221
  Peak Block count 3
222
- Peak Allocated memory 32.2 MB
223
  Peak Large allocation bytes 0 B
224
  [ALLOC_DEFAULT_THREAD]
225
- Peak usage frame count: [16.0 MB-32.0 MB]: 18769 frames
226
  Requested Block Size 16.0 MB
227
  Peak Block count 1
228
- Peak Allocated memory 21.3 MB
229
  Peak Large allocation bytes 16.0 MB
230
  [ALLOC_TEMP_JOB_1_FRAME]
231
  Initial Block Size 2.0 MB
@@ -254,13 +257,13 @@ PlayerConnection::CleanupMemory Statistics:
254
  Used Block count 1
255
  Peak Allocated bytes 1.6 MB
256
  [ALLOC_GFX_MAIN]
257
- Peak usage frame count: [32.0 KB-64.0 KB]: 18768 frames, [64.0 KB-128.0 KB]: 1 frames
258
  Requested Block Size 16.0 MB
259
  Peak Block count 1
260
  Peak Allocated memory 67.0 KB
261
  Peak Large allocation bytes 0 B
262
  [ALLOC_GFX_THREAD]
263
- Peak usage frame count: [64.0 KB-128.0 KB]: 18769 frames
264
  Requested Block Size 16.0 MB
265
  Peak Block count 1
266
  Peak Allocated memory 67.7 KB
@@ -272,13 +275,13 @@ PlayerConnection::CleanupMemory Statistics:
272
  Used Block count 1
273
  Peak Allocated bytes 1.6 MB
274
  [ALLOC_CACHEOBJECTS_MAIN]
275
- Peak usage frame count: [0.5 MB-1.0 MB]: 299 frames, [1.0 MB-2.0 MB]: 18470 frames
276
  Requested Block Size 4.0 MB
277
  Peak Block count 1
278
  Peak Allocated memory 1.4 MB
279
  Peak Large allocation bytes 0 B
280
  [ALLOC_CACHEOBJECTS_THREAD]
281
- Peak usage frame count: [1.0 MB-2.0 MB]: 18768 frames, [4.0 MB-8.0 MB]: 1 frames
282
  Requested Block Size 4.0 MB
283
  Peak Block count 2
284
  Peak Allocated memory 4.7 MB
@@ -290,19 +293,19 @@ PlayerConnection::CleanupMemory Statistics:
290
  Used Block count 1
291
  Peak Allocated bytes 1.6 MB
292
  [ALLOC_TYPETREE_MAIN]
293
- Peak usage frame count: [0-1.0 KB]: 18769 frames
294
  Requested Block Size 2.0 MB
295
  Peak Block count 1
296
  Peak Allocated memory 1.0 KB
297
  Peak Large allocation bytes 0 B
298
  [ALLOC_TYPETREE_THREAD]
299
- Peak usage frame count: [1.0 KB-2.0 KB]: 18769 frames
300
  Requested Block Size 2.0 MB
301
  Peak Block count 1
302
  Peak Allocated memory 1.9 KB
303
  Peak Large allocation bytes 0 B
304
  [ALLOC_PROFILER]
305
- Peak usage frame count: [16.0 KB-32.0 KB]: 18769 frames
306
  Requested Block Size 16.0 MB
307
  Peak Block count 1
308
  Peak Allocated memory 29.2 KB
@@ -311,4 +314,4 @@ PlayerConnection::CleanupMemory Statistics:
311
  Large Block size 4.0 MB
312
  Used Block count 1
313
  Peak Allocated bytes 396 B
314
- ##utp:{"type":"MemoryLeaks","version":2,"phase":"Immediate","time":1673711273004,"processId":2709,"allocatedMemory":1957332,"memoryLabels":[{"Default":9033},{"Permanent":1264},{"Thread":34460},{"Manager":10603},{"VertexData":12},{"Geometry":280},{"Texture":16},{"Shader":69173},{"Material":24},{"GfxDevice":35248},{"Animation":304},{"Audio":3976},{"Physics":288},{"Serialization":216},{"Input":9176},{"JobScheduler":200},{"Mono":40},{"ScriptingNativeRuntime":216},{"BaseObject":1609212},{"Resource":592},{"Renderer":1936},{"Transform":48},{"File":800},{"WebCam":24},{"Culling":40},{"Terrain":953},{"Wind":24},{"String":3447},{"DynamicArray":30868},{"HashMap":7680},{"Utility":1360},{"PoolAlloc":1160},{"TypeTree":1792},{"ScriptManager":80},{"RuntimeInitializeOnLoadManager":72},{"SpriteAtlas":112},{"GI":3272},{"Unet":16},{"Director":7760},{"WebRequest":720},{"VR":45473},{"SceneManager":424},{"Video":32},{"LazyScriptCache":32},{"NativeArray":384},{"Camera":25},{"Secure":1},{"SerializationCache":624},{"APIUpdating":5872},{"Subsystems":384},{"VirtualTexturing":57552},{"AssetReference":32}]}
1
  Mono path[0] = '/content/ml-agents/training-envs-executables/linux/Pyramids/Pyramids_Data/Managed'
2
  Mono config path = '/content/ml-agents/training-envs-executables/linux/Pyramids/Pyramids_Data/MonoBleedingEdge/etc'
3
  Found 1 interfaces on host : 0) 172.28.0.2
4
+ Multi-casting "[IP] 172.28.0.2 [Port] 55083 [Flags] 2 [Guid] 218355938 [EditorId] 764847374 [Version] 1048832 [Id] LinuxPlayer(13,172.28.0.2) [Debug] 0 [PackageName] LinuxPlayer [ProjectName] UnityEnvironment" to [225.0.0.222:54997]...
5
  Preloaded 'lib_burst_generated.so'
6
  Preloaded 'libgrpc_csharp_ext.x64.so'
7
+ PlayerPrefs - Creating folder: /root/.config/unity3d/Unity Technologies
8
+ PlayerPrefs - Creating folder: /root/.config/unity3d/Unity Technologies/UnityEnvironment
9
+ Unable to load player prefs
10
  Initialize engine version: 2021.3.5f1 (40eb3a945986)
11
  [Subsystems] Discovering subsystems at path /content/ml-agents/training-envs-executables/linux/Pyramids/Pyramids_Data/UnitySubsystems
12
  Forcing GfxDevice: Null
36
  FMOD failed to initialize the output device.: "Error initializing output device. " (60)
37
  FMOD initialized on nosound output
38
  Begin MonoManager ReloadAssembly
39
+ - Completed reload, in 0.091 seconds
40
  ERROR: Shader Sprites/Default shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
41
  ERROR: Shader Sprites/Mask shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
42
  ERROR: Shader Legacy Shaders/VertexLit shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
68
  WARNING: Shader Unsupported: 'Standard' - All subshaders removed
69
  WARNING: Shader Did you use #pragma only_renderers and omit this platform?
70
  WARNING: Shader If subshaders removal was intentional, you may have forgotten turning Fallback off?
71
+ UnloadTime: 0.794913 ms
72
  ERROR: Shader UI/Default shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
73
  requesting resize 84 x 84
74
  Setting up 1 worker threads for Enlighten.
76
  [ALLOC_TEMP_TLS] TLS Allocator
77
  StackAllocators :
78
  [ALLOC_TEMP_MAIN]
79
+ Peak usage frame count: [16.0 KB-32.0 KB]: 3888 frames, [32.0 KB-64.0 KB]: 40923 frames, [64.0 KB-128.0 KB]: 11453 frames, [2.0 MB-4.0 MB]: 1 frames
80
  Initial Block Size 4.0 MB
81
  Current Block Size 4.0 MB
82
  Peak Allocated Bytes 2.0 MB
207
  Peak Allocated Bytes 128 B
208
  Overflow Count 0
209
  [ALLOC_MEMORYPROFILER]
210
+ Peak usage frame count: [0.5 MB-1.0 MB]: 299 frames, [1.0 MB-2.0 MB]: 55966 frames
211
  Requested Block Size 1.0 MB
212
  Peak Block count 2
213
  Peak Allocated memory 1.6 MB
214
  Peak Large allocation bytes 0 B
215
  [ALLOC_DEFAULT] Dual Thread Allocator
216
+ Peak main deferred allocation count 12047
217
  [ALLOC_BUCKET]
218
  Large Block size 4.0 MB
219
  Used Block count 1
220
  Peak Allocated bytes 1.6 MB
221
  [ALLOC_DEFAULT_MAIN]
222
+ Peak usage frame count: [8.0 MB-16.0 MB]: 1 frames, [16.0 MB-32.0 MB]: 56264 frames
223
  Requested Block Size 16.0 MB
224
  Peak Block count 3
225
+ Peak Allocated memory 31.8 MB
226
  Peak Large allocation bytes 0 B
227
  [ALLOC_DEFAULT_THREAD]
228
+ Peak usage frame count: [16.0 MB-32.0 MB]: 56265 frames
229
  Requested Block Size 16.0 MB
230
  Peak Block count 1
231
+ Peak Allocated memory 22.0 MB
232
  Peak Large allocation bytes 16.0 MB
233
  [ALLOC_TEMP_JOB_1_FRAME]
234
  Initial Block Size 2.0 MB
257
  Used Block count 1
258
  Peak Allocated bytes 1.6 MB
259
  [ALLOC_GFX_MAIN]
260
+ Peak usage frame count: [32.0 KB-64.0 KB]: 56264 frames, [64.0 KB-128.0 KB]: 1 frames
261
  Requested Block Size 16.0 MB
262
  Peak Block count 1
263
  Peak Allocated memory 67.0 KB
264
  Peak Large allocation bytes 0 B
265
  [ALLOC_GFX_THREAD]
266
+ Peak usage frame count: [64.0 KB-128.0 KB]: 56265 frames
267
  Requested Block Size 16.0 MB
268
  Peak Block count 1
269
  Peak Allocated memory 67.7 KB
275
  Used Block count 1
276
  Peak Allocated bytes 1.6 MB
277
  [ALLOC_CACHEOBJECTS_MAIN]
278
+ Peak usage frame count: [0.5 MB-1.0 MB]: 299 frames, [1.0 MB-2.0 MB]: 55966 frames
279
  Requested Block Size 4.0 MB
280
  Peak Block count 1
281
  Peak Allocated memory 1.4 MB
282
  Peak Large allocation bytes 0 B
283
  [ALLOC_CACHEOBJECTS_THREAD]
284
+ Peak usage frame count: [1.0 MB-2.0 MB]: 56264 frames, [4.0 MB-8.0 MB]: 1 frames
285
  Requested Block Size 4.0 MB
286
  Peak Block count 2
287
  Peak Allocated memory 4.7 MB
293
  Used Block count 1
294
  Peak Allocated bytes 1.6 MB
295
  [ALLOC_TYPETREE_MAIN]
296
+ Peak usage frame count: [0-1.0 KB]: 56265 frames
297
  Requested Block Size 2.0 MB
298
  Peak Block count 1
299
  Peak Allocated memory 1.0 KB
300
  Peak Large allocation bytes 0 B
301
  [ALLOC_TYPETREE_THREAD]
302
+ Peak usage frame count: [1.0 KB-2.0 KB]: 56265 frames
303
  Requested Block Size 2.0 MB
304
  Peak Block count 1
305
  Peak Allocated memory 1.9 KB
306
  Peak Large allocation bytes 0 B
307
  [ALLOC_PROFILER]
308
+ Peak usage frame count: [16.0 KB-32.0 KB]: 56265 frames
309
  Requested Block Size 16.0 MB
310
  Peak Block count 1
311
  Peak Allocated memory 29.2 KB
314
  Large Block size 4.0 MB
315
  Used Block count 1
316
  Peak Allocated bytes 396 B
317
+ ##utp:{"type":"MemoryLeaks","version":2,"phase":"Immediate","time":1673791034412,"processId":20594,"allocatedMemory":1957332,"memoryLabels":[{"Default":9033},{"Permanent":1264},{"Thread":34460},{"Manager":10603},{"VertexData":12},{"Geometry":280},{"Texture":16},{"Shader":69173},{"Material":24},{"GfxDevice":35248},{"Animation":304},{"Audio":3976},{"Physics":288},{"Serialization":216},{"Input":9176},{"JobScheduler":200},{"Mono":40},{"ScriptingNativeRuntime":216},{"BaseObject":1609212},{"Resource":592},{"Renderer":1936},{"Transform":48},{"File":800},{"WebCam":24},{"Culling":40},{"Terrain":953},{"Wind":24},{"String":3447},{"DynamicArray":30868},{"HashMap":7680},{"Utility":1360},{"PoolAlloc":1160},{"TypeTree":1792},{"ScriptManager":80},{"RuntimeInitializeOnLoadManager":72},{"SpriteAtlas":112},{"GI":3272},{"Unet":16},{"Director":7760},{"WebRequest":720},{"VR":45473},{"SceneManager":424},{"Video":32},{"LazyScriptCache":32},{"NativeArray":384},{"Camera":25},{"Secure":1},{"SerializationCache":624},{"APIUpdating":5872},{"Subsystems":384},{"VirtualTexturing":57552},{"AssetReference":32}]}
run_logs/timers.json CHANGED
@@ -2,189 +2,189 @@
2
  "name": "root",
3
  "gauges": {
4
  "Pyramids.Policy.Entropy.mean": {
5
- "value": 0.3963216245174408,
6
- "min": 0.37892216444015503,
7
- "max": 1.4495625495910645,
8
- "count": 33
9
  },
10
  "Pyramids.Policy.Entropy.sum": {
11
- "value": 12060.859375,
12
- "min": 11373.6796875,
13
- "max": 43973.9296875,
14
- "count": 33
15
  },
16
  "Pyramids.Step.mean": {
17
- "value": 989936.0,
18
  "min": 29952.0,
19
- "max": 989936.0,
20
- "count": 33
21
  },
22
  "Pyramids.Step.sum": {
23
- "value": 989936.0,
24
  "min": 29952.0,
25
- "max": 989936.0,
26
- "count": 33
27
  },
28
  "Pyramids.Policy.ExtrinsicValueEstimate.mean": {
29
- "value": 0.43158861994743347,
30
- "min": -0.0929163247346878,
31
- "max": 0.5519277453422546,
32
- "count": 33
33
  },
34
  "Pyramids.Policy.ExtrinsicValueEstimate.sum": {
35
- "value": 117.39210510253906,
36
- "min": -22.392833709716797,
37
- "max": 154.53976440429688,
38
- "count": 33
39
  },
40
  "Pyramids.Policy.RndValueEstimate.mean": {
41
- "value": 0.0027870223857462406,
42
- "min": -0.00042706672684289515,
43
- "max": 0.23599685728549957,
44
- "count": 33
45
  },
46
  "Pyramids.Policy.RndValueEstimate.sum": {
47
- "value": 0.7580701112747192,
48
- "min": -0.11402681469917297,
49
- "max": 55.93125534057617,
50
- "count": 33
51
  },
52
  "Pyramids.Losses.PolicyLoss.mean": {
53
- "value": 0.06696148093582856,
54
- "min": 0.062427873386958585,
55
- "max": 0.07537894976558164,
56
- "count": 33
57
  },
58
  "Pyramids.Losses.PolicyLoss.sum": {
59
- "value": 1.0044222140374284,
60
- "min": 0.49671674601435284,
61
- "max": 1.1306842464837246,
62
- "count": 33
63
  },
64
  "Pyramids.Losses.ValueLoss.mean": {
65
- "value": 0.01608618958335784,
66
- "min": 0.000992208526063701,
67
- "max": 0.01623464779271439,
68
- "count": 33
69
  },
70
  "Pyramids.Losses.ValueLoss.sum": {
71
- "value": 0.24129284375036758,
72
- "min": 0.0075454792245495655,
73
- "max": 0.24129284375036758,
74
- "count": 33
75
  },
76
  "Pyramids.Policy.LearningRate.mean": {
77
- "value": 7.573997475366666e-06,
78
- "min": 7.573997475366666e-06,
79
- "max": 0.00029515063018788575,
80
- "count": 33
81
  },
82
  "Pyramids.Policy.LearningRate.sum": {
83
- "value": 0.00011360996213049999,
84
- "min": 0.00011360996213049999,
85
- "max": 0.0035081249306250996,
86
- "count": 33
87
  },
88
  "Pyramids.Policy.Epsilon.mean": {
89
- "value": 0.10252463333333336,
90
- "min": 0.10252463333333336,
91
- "max": 0.19838354285714285,
92
- "count": 33
93
  },
94
  "Pyramids.Policy.Epsilon.sum": {
95
- "value": 1.5378695000000004,
96
- "min": 1.3691136000000002,
97
- "max": 2.5693748999999997,
98
- "count": 33
99
  },
100
  "Pyramids.Policy.Beta.mean": {
101
- "value": 0.00026221087,
102
- "min": 0.00026221087,
103
- "max": 0.00983851593142857,
104
- "count": 33
105
  },
106
  "Pyramids.Policy.Beta.sum": {
107
- "value": 0.003933163050000001,
108
- "min": 0.003933163050000001,
109
- "max": 0.11696055251,
110
- "count": 33
111
  },
112
  "Pyramids.Losses.RNDLoss.mean": {
113
- "value": 0.00945773720741272,
114
- "min": 0.009271085262298584,
115
- "max": 0.3552553951740265,
116
- "count": 33
117
  },
118
  "Pyramids.Losses.RNDLoss.sum": {
119
- "value": 0.1418660581111908,
120
- "min": 0.12979519367218018,
121
- "max": 2.486787796020508,
122
- "count": 33
123
  },
124
  "Pyramids.Environment.EpisodeLength.mean": {
125
- "value": 435.5844155844156,
126
- "min": 361.8152173913044,
127
  "max": 999.0,
128
- "count": 33
129
  },
130
  "Pyramids.Environment.EpisodeLength.sum": {
131
- "value": 33540.0,
132
  "min": 15984.0,
133
- "max": 33540.0,
134
- "count": 33
135
  },
136
  "Pyramids.Environment.CumulativeReward.mean": {
137
- "value": 1.3825324446736993,
138
  "min": -1.0000000521540642,
139
- "max": 1.5077173760079818,
140
- "count": 33
141
  },
142
  "Pyramids.Environment.CumulativeReward.sum": {
143
- "value": 106.45499823987484,
144
  "min": -32.000001668930054,
145
- "max": 138.70999859273434,
146
- "count": 33
147
  },
148
  "Pyramids.Policy.ExtrinsicReward.mean": {
149
- "value": 1.3825324446736993,
150
  "min": -1.0000000521540642,
151
- "max": 1.5077173760079818,
152
- "count": 33
153
  },
154
  "Pyramids.Policy.ExtrinsicReward.sum": {
155
- "value": 106.45499823987484,
156
  "min": -32.000001668930054,
157
- "max": 138.70999859273434,
158
- "count": 33
159
  },
160
  "Pyramids.Policy.RndReward.mean": {
161
- "value": 0.04263687019112411,
162
- "min": 0.03542026030481793,
163
- "max": 6.730314579792321,
164
- "count": 33
165
  },
166
  "Pyramids.Policy.RndReward.sum": {
167
- "value": 3.2830390047165565,
168
- "min": 2.6668454946629936,
169
- "max": 107.68503327667713,
170
- "count": 33
171
  },
172
  "Pyramids.IsTraining.mean": {
173
  "value": 1.0,
174
  "min": 1.0,
175
  "max": 1.0,
176
- "count": 33
177
  },
178
  "Pyramids.IsTraining.sum": {
179
  "value": 1.0,
180
  "min": 1.0,
181
  "max": 1.0,
182
- "count": 33
183
  }
184
  },
185
  "metadata": {
186
  "timer_format_version": "0.1.0",
187
- "start_time_seconds": "1673709219",
188
  "python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
189
  "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
190
  "mlagents_version": "0.29.0.dev0",
@@ -192,66 +192,66 @@
192
  "communication_protocol_version": "1.5.0",
193
  "pytorch_version": "1.8.1+cu102",
194
  "numpy_version": "1.21.6",
195
- "end_time_seconds": "1673711273"
196
  },
197
- "total": 2053.37231426,
198
  "count": 1,
199
- "self": 0.487154233000183,
200
  "children": {
201
  "run_training.setup": {
202
- "total": 0.11296040399997764,
203
  "count": 1,
204
- "self": 0.11296040399997764
205
  },
206
  "TrainerController.start_learning": {
207
- "total": 2052.7721996230002,
208
  "count": 1,
209
- "self": 1.2402444569856925,
210
  "children": {
211
  "TrainerController._reset_env": {
212
- "total": 6.496398377999981,
213
  "count": 1,
214
- "self": 6.496398377999981
215
  },
216
  "TrainerController.advance": {
217
- "total": 2044.9392552420145,
218
- "count": 63764,
219
- "self": 1.307802944031664,
220
  "children": {
221
  "env_step": {
222
- "total": 1380.8280767199785,
223
- "count": 63764,
224
- "self": 1274.9483338659165,
225
  "children": {
226
  "SubprocessEnvManager._take_step": {
227
- "total": 105.10758953706988,
228
- "count": 63764,
229
- "self": 4.38078287908786,
230
  "children": {
231
  "TorchPolicy.evaluate": {
232
- "total": 100.72680665798202,
233
- "count": 62561,
234
- "self": 34.03783737599417,
235
  "children": {
236
  "TorchPolicy.sample_actions": {
237
- "total": 66.68896928198785,
238
- "count": 62561,
239
- "self": 66.68896928198785
240
  }
241
  }
242
  }
243
  }
244
  },
245
  "workers": {
246
- "total": 0.7721533169919894,
247
- "count": 63764,
248
  "self": 0.0,
249
  "children": {
250
  "worker_root": {
251
- "total": 2048.2412592330234,
252
- "count": 63764,
253
  "is_parallel": true,
254
- "self": 872.4286040499896,
255
  "children": {
256
  "run_training.setup": {
257
  "total": 0.0,
@@ -260,48 +260,48 @@
260
  "self": 0.0,
261
  "children": {
262
  "steps_from_proto": {
263
- "total": 0.001805626999953347,
264
  "count": 1,
265
  "is_parallel": true,
266
- "self": 0.000685975999999755,
267
  "children": {
268
  "_process_rank_one_or_two_observation": {
269
- "total": 0.001119650999953592,
270
  "count": 8,
271
  "is_parallel": true,
272
- "self": 0.001119650999953592
273
  }
274
  }
275
  },
276
  "UnityEnvironment.step": {
277
- "total": 0.05277994700009003,
278
  "count": 1,
279
  "is_parallel": true,
280
- "self": 0.0005411750000803295,
281
  "children": {
282
  "UnityEnvironment._generate_step_input": {
283
- "total": 0.0005054120000522744,
284
  "count": 1,
285
  "is_parallel": true,
286
- "self": 0.0005054120000522744
287
  },
288
  "communicator.exchange": {
289
- "total": 0.05002884099997118,
290
  "count": 1,
291
  "is_parallel": true,
292
- "self": 0.05002884099997118
293
  },
294
  "steps_from_proto": {
295
- "total": 0.0017045189999862487,
296
  "count": 1,
297
  "is_parallel": true,
298
- "self": 0.00046017399972697604,
299
  "children": {
300
  "_process_rank_one_or_two_observation": {
301
- "total": 0.0012443450002592726,
302
  "count": 8,
303
  "is_parallel": true,
304
- "self": 0.0012443450002592726
305
  }
306
  }
307
  }
@@ -310,34 +310,34 @@
310
  }
311
  },
312
  "UnityEnvironment.step": {
313
- "total": 1175.8126551830337,
314
- "count": 63763,
315
  "is_parallel": true,
316
- "self": 28.385981297061335,
317
  "children": {
318
  "UnityEnvironment._generate_step_input": {
319
- "total": 24.030136245979634,
320
- "count": 63763,
321
  "is_parallel": true,
322
- "self": 24.030136245979634
323
  },
324
  "communicator.exchange": {
325
- "total": 1017.9619269290041,
326
- "count": 63763,
327
  "is_parallel": true,
328
- "self": 1017.9619269290041
329
  },
330
  "steps_from_proto": {
331
- "total": 105.43461071098875,
332
- "count": 63763,
333
  "is_parallel": true,
334
- "self": 23.384566760032158,
335
  "children": {
336
  "_process_rank_one_or_two_observation": {
337
- "total": 82.05004395095659,
338
- "count": 510104,
339
  "is_parallel": true,
340
- "self": 82.05004395095659
341
  }
342
  }
343
  }
@@ -350,31 +350,31 @@
350
  }
351
  },
352
  "trainer_advance": {
353
- "total": 662.8033755780043,
354
- "count": 63764,
355
- "self": 2.202008867012296,
356
  "children": {
357
  "process_trajectory": {
358
- "total": 150.2878955409965,
359
- "count": 63764,
360
- "self": 150.0755036999965,
361
  "children": {
362
  "RLTrainer._checkpoint": {
363
- "total": 0.21239184099999875,
364
- "count": 2,
365
- "self": 0.21239184099999875
366
  }
367
  }
368
  },
369
  "_update_policy": {
370
- "total": 510.3134711699955,
371
- "count": 446,
372
- "self": 193.58307704400738,
373
  "children": {
374
  "TorchPPOOptimizer.update": {
375
- "total": 316.73039412598814,
376
- "count": 22773,
377
- "self": 316.73039412598814
378
  }
379
  }
380
  }
@@ -383,19 +383,19 @@
383
  }
384
  },
385
  "trainer_threads": {
386
- "total": 1.0690000635804608e-06,
387
  "count": 1,
388
- "self": 1.0690000635804608e-06
389
  },
390
  "TrainerController._save_models": {
391
- "total": 0.09630047700011346,
392
  "count": 1,
393
- "self": 0.0014672840002276644,
394
  "children": {
395
  "RLTrainer._checkpoint": {
396
- "total": 0.0948331929998858,
397
  "count": 1,
398
- "self": 0.0948331929998858
399
  }
400
  }
401
  }
2
  "name": "root",
3
  "gauges": {
4
  "Pyramids.Policy.Entropy.mean": {
5
+ "value": 0.172193706035614,
6
+ "min": 0.1647656112909317,
7
+ "max": 1.4083733558654785,
8
+ "count": 100
9
  },
10
  "Pyramids.Policy.Entropy.sum": {
11
+ "value": 5141.01513671875,
12
+ "min": 4979.8759765625,
13
+ "max": 42724.4140625,
14
+ "count": 100
15
  },
16
  "Pyramids.Step.mean": {
17
+ "value": 2999981.0,
18
  "min": 29952.0,
19
+ "max": 2999981.0,
20
+ "count": 100
21
  },
22
  "Pyramids.Step.sum": {
23
+ "value": 2999981.0,
24
  "min": 29952.0,
25
+ "max": 2999981.0,
26
+ "count": 100
27
  },
28
  "Pyramids.Policy.ExtrinsicValueEstimate.mean": {
29
+ "value": 0.6161277294158936,
30
+ "min": -0.09271835535764694,
31
+ "max": 0.6173363924026489,
32
+ "count": 100
33
  },
34
  "Pyramids.Policy.ExtrinsicValueEstimate.sum": {
35
+ "value": 174.36415100097656,
36
+ "min": -22.530559539794922,
37
+ "max": 174.36415100097656,
38
+ "count": 100
39
  },
40
  "Pyramids.Policy.RndValueEstimate.mean": {
41
+ "value": -0.014366395771503448,
42
+ "min": -1.7395180463790894,
43
+ "max": 1.7319082021713257,
44
+ "count": 100
45
  },
46
  "Pyramids.Policy.RndValueEstimate.sum": {
47
+ "value": -4.065690040588379,
48
+ "min": -455.75372314453125,
49
+ "max": 453.75994873046875,
50
+ "count": 100
51
  },
52
  "Pyramids.Losses.PolicyLoss.mean": {
53
+ "value": 0.04603456548027074,
54
+ "min": 0.04314542232702175,
55
+ "max": 0.0513265693273956,
56
+ "count": 100
57
  },
58
  "Pyramids.Losses.PolicyLoss.sum": {
59
+ "value": 0.6905184822040611,
60
+ "min": 0.33428702012669026,
61
+ "max": 0.7394997566007078,
62
+ "count": 100
63
  },
64
  "Pyramids.Losses.ValueLoss.mean": {
65
+ "value": 0.015738900086532035,
66
+ "min": 0.00041164355064317423,
67
+ "max": 0.40088433486276437,
68
+ "count": 100
69
  },
70
  "Pyramids.Losses.ValueLoss.sum": {
71
+ "value": 0.23608350129798053,
72
+ "min": 0.004528079057074917,
73
+ "max": 5.6123806880787015,
74
+ "count": 100
75
  },
76
  "Pyramids.Policy.LearningRate.mean": {
77
+ "value": 4.911817311399989e-07,
78
+ "min": 4.911817311399989e-07,
79
+ "max": 9.946118149120001e-05,
80
+ "count": 100
81
  },
82
  "Pyramids.Policy.LearningRate.sum": {
83
+ "value": 7.367725967099983e-06,
84
+ "min": 7.367725967099983e-06,
85
+ "max": 0.0013274454725547,
86
+ "count": 100
87
  },
88
  "Pyramids.Policy.Epsilon.mean": {
89
+ "value": 0.10049108222222224,
90
+ "min": 0.10049108222222224,
91
+ "max": 0.19946118095238097,
92
+ "count": 100
93
  },
94
  "Pyramids.Policy.Epsilon.sum": {
95
+ "value": 1.5073662333333335,
96
+ "min": 1.3897045333333333,
97
+ "max": 2.8274453000000004,
98
+ "count": 100
99
  },
100
  "Pyramids.Policy.Beta.mean": {
101
+ "value": 5.905911399999989e-05,
102
+ "min": 5.905911399999989e-05,
103
+ "max": 0.009946171977142856,
104
+ "count": 100
105
  },
106
  "Pyramids.Policy.Beta.sum": {
107
+ "value": 0.0008858867099999984,
108
+ "min": 0.0008858867099999984,
109
+ "max": 0.13276178547,
110
+ "count": 100
111
  },
112
  "Pyramids.Losses.RNDLoss.mean": {
113
+ "value": 0.026145173236727715,
114
+ "min": 0.026145173236727715,
115
+ "max": 0.5980517268180847,
116
+ "count": 100
117
  },
118
  "Pyramids.Losses.RNDLoss.sum": {
119
+ "value": 0.39217761158943176,
120
+ "min": 0.36809539794921875,
121
+ "max": 4.186362266540527,
122
+ "count": 100
123
  },
124
  "Pyramids.Environment.EpisodeLength.mean": {
125
+ "value": 310.4,
126
+ "min": 289.4848484848485,
127
  "max": 999.0,
128
+ "count": 100
129
  },
130
  "Pyramids.Environment.EpisodeLength.sum": {
131
+ "value": 29488.0,
132
  "min": 15984.0,
133
+ "max": 34277.0,
134
+ "count": 100
135
  },
136
  "Pyramids.Environment.CumulativeReward.mean": {
137
+ "value": 1.6895999808060496,
138
  "min": -1.0000000521540642,
139
+ "max": 1.6911179810762405,
140
+ "count": 100
141
  },
142
  "Pyramids.Environment.CumulativeReward.sum": {
143
+ "value": 160.5119981765747,
144
  "min": -32.000001668930054,
145
+ "max": 169.11179810762405,
146
+ "count": 100
147
  },
148
  "Pyramids.Policy.ExtrinsicReward.mean": {
149
+ "value": 1.6895999808060496,
150
  "min": -1.0000000521540642,
151
+ "max": 1.6911179810762405,
152
+ "count": 100
153
  },
154
  "Pyramids.Policy.ExtrinsicReward.sum": {
155
+ "value": 160.5119981765747,
156
  "min": -32.000001668930054,
157
+ "max": 169.11179810762405,
158
+ "count": 100
159
  },
160
  "Pyramids.Policy.RndReward.mean": {
161
+ "value": 0.08286011184835317,
162
+ "min": 0.07857821287703701,
163
+ "max": 11.01363092660904,
164
+ "count": 100
165
  },
166
  "Pyramids.Policy.RndReward.sum": {
167
+ "value": 7.8717106255935505,
168
+ "min": 7.174652462999802,
169
+ "max": 176.21809482574463,
170
+ "count": 100
171
  },
172
  "Pyramids.IsTraining.mean": {
173
  "value": 1.0,
174
  "min": 1.0,
175
  "max": 1.0,
176
+ "count": 100
177
  },
178
  "Pyramids.IsTraining.sum": {
179
  "value": 1.0,
180
  "min": 1.0,
181
  "max": 1.0,
182
+ "count": 100
183
  }
184
  },
185
  "metadata": {
186
  "timer_format_version": "0.1.0",
187
+ "start_time_seconds": "1673783626",
188
  "python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
189
  "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
190
  "mlagents_version": "0.29.0.dev0",
192
  "communication_protocol_version": "1.5.0",
193
  "pytorch_version": "1.8.1+cu102",
194
  "numpy_version": "1.21.6",
195
+ "end_time_seconds": "1673791034"
196
  },
197
+ "total": 7408.060608971001,
198
  "count": 1,
199
+ "self": 0.4775461550007094,
200
  "children": {
201
  "run_training.setup": {
202
+ "total": 0.11163453599965578,
203
  "count": 1,
204
+ "self": 0.11163453599965578
205
  },
206
  "TrainerController.start_learning": {
207
+ "total": 7407.4714282800005,
208
  "count": 1,
209
+ "self": 5.1130147120175025,
210
  "children": {
211
  "TrainerController._reset_env": {
212
+ "total": 6.665063098000246,
213
  "count": 1,
214
+ "self": 6.665063098000246
215
  },
216
  "TrainerController.advance": {
217
+ "total": 7395.578573279985,
218
+ "count": 192507,
219
+ "self": 5.180274127194934,
220
  "children": {
221
  "env_step": {
222
+ "total": 4772.939104372194,
223
+ "count": 192507,
224
+ "self": 4390.172587254402,
225
  "children": {
226
  "SubprocessEnvManager._take_step": {
227
+ "total": 379.5830282929637,
228
+ "count": 192507,
229
+ "self": 15.347926996098977,
230
  "children": {
231
  "TorchPolicy.evaluate": {
232
+ "total": 364.23510129686474,
233
+ "count": 187549,
234
+ "self": 121.69350056174335,
235
  "children": {
236
  "TorchPolicy.sample_actions": {
237
+ "total": 242.5416007351214,
238
+ "count": 187549,
239
+ "self": 242.5416007351214
240
  }
241
  }
242
  }
243
  }
244
  },
245
  "workers": {
246
+ "total": 3.1834888248286006,
247
+ "count": 192507,
248
  "self": 0.0,
249
  "children": {
250
  "worker_root": {
251
+ "total": 7391.3437224265335,
252
+ "count": 192507,
253
  "is_parallel": true,
254
+ "self": 3360.819595076673,
255
  "children": {
256
  "run_training.setup": {
257
  "total": 0.0,
260
  "self": 0.0,
261
  "children": {
262
  "steps_from_proto": {
263
+ "total": 0.0019106769996142248,
264
  "count": 1,
265
  "is_parallel": true,
266
+ "self": 0.0006517159990835353,
267
  "children": {
268
  "_process_rank_one_or_two_observation": {
269
+ "total": 0.0012589610005306895,
270
  "count": 8,
271
  "is_parallel": true,
272
+ "self": 0.0012589610005306895
273
  }
274
  }
275
  },
276
  "UnityEnvironment.step": {
277
+ "total": 0.04981924700041418,
278
  "count": 1,
279
  "is_parallel": true,
280
+ "self": 0.0005316970000421861,
281
  "children": {
282
  "UnityEnvironment._generate_step_input": {
283
+ "total": 0.0004576800001814263,
284
  "count": 1,
285
  "is_parallel": true,
286
+ "self": 0.0004576800001814263
287
  },
288
  "communicator.exchange": {
289
+ "total": 0.04700930600029096,
290
  "count": 1,
291
  "is_parallel": true,
292
+ "self": 0.04700930600029096
293
  },
294
  "steps_from_proto": {
295
+ "total": 0.0018205639998996048,
296
  "count": 1,
297
  "is_parallel": true,
298
+ "self": 0.0004515860009632888,
299
  "children": {
300
  "_process_rank_one_or_two_observation": {
301
+ "total": 0.001368977998936316,
302
  "count": 8,
303
  "is_parallel": true,
304
+ "self": 0.001368977998936316
305
  }
306
  }
307
  }
310
  }
311
  },
312
  "UnityEnvironment.step": {
313
+ "total": 4030.5241273498605,
314
+ "count": 192506,
315
  "is_parallel": true,
316
+ "self": 93.57510296254623,
317
  "children": {
318
  "UnityEnvironment._generate_step_input": {
319
+ "total": 77.95089503732379,
320
+ "count": 192506,
321
  "is_parallel": true,
322
+ "self": 77.95089503732379
323
  },
324
  "communicator.exchange": {
325
+ "total": 3511.6009194647468,
326
+ "count": 192506,
327
  "is_parallel": true,
328
+ "self": 3511.6009194647468
329
  },
330
  "steps_from_proto": {
331
+ "total": 347.3972098852437,
332
+ "count": 192506,
333
  "is_parallel": true,
334
+ "self": 78.90584047933771,
335
  "children": {
336
  "_process_rank_one_or_two_observation": {
337
+ "total": 268.49136940590597,
338
+ "count": 1540048,
339
  "is_parallel": true,
340
+ "self": 268.49136940590597
341
  }
342
  }
343
  }
350
  }
351
  },
352
  "trainer_advance": {
353
+ "total": 2617.4591947805957,
354
+ "count": 192507,
355
+ "self": 9.635674524552996,
356
  "children": {
357
  "process_trajectory": {
358
+ "total": 479.09892203501477,
359
+ "count": 192507,
360
+ "self": 478.4984932260122,
361
  "children": {
362
  "RLTrainer._checkpoint": {
363
+ "total": 0.6004288090025511,
364
+ "count": 6,
365
+ "self": 0.6004288090025511
366
  }
367
  }
368
  },
369
  "_update_policy": {
370
+ "total": 2128.724598221028,
371
+ "count": 1391,
372
+ "self": 996.1768557010346,
373
  "children": {
374
  "TorchPPOOptimizer.update": {
375
+ "total": 1132.5477425199933,
376
+ "count": 56690,
377
+ "self": 1132.5477425199933
378
  }
379
  }
380
  }
383
  }
384
  },
385
  "trainer_threads": {
386
+ "total": 9.919986041495577e-07,
387
  "count": 1,
388
+ "self": 9.919986041495577e-07
389
  },
390
  "TrainerController._save_models": {
391
+ "total": 0.11477619799916283,
392
  "count": 1,
393
+ "self": 0.0014028049990884028,
394
  "children": {
395
  "RLTrainer._checkpoint": {
396
+ "total": 0.11337339300007443,
397
  "count": 1,
398
+ "self": 0.11337339300007443
399
  }
400
  }
401
  }
run_logs/training_status.json CHANGED
@@ -2,40 +2,58 @@
2
  "Pyramids": {
3
  "checkpoints": [
4
  {
5
- "steps": 499927,
6
- "file_path": "results/Pyramids Training/Pyramids/Pyramids-499927.onnx",
7
- "reward": 1.6300000324845314,
8
- "creation_time": 1673710188.7621791,
9
  "auxillary_file_paths": [
10
- "results/Pyramids Training/Pyramids/Pyramids-499927.pt"
11
  ]
12
  },
13
  {
14
- "steps": 999985,
15
- "file_path": "results/Pyramids Training/Pyramids/Pyramids-999985.onnx",
16
- "reward": 1.156199972331524,
17
- "creation_time": 1673711272.438426,
18
  "auxillary_file_paths": [
19
- "results/Pyramids Training/Pyramids/Pyramids-999985.pt"
20
  ]
21
  },
22
  {
23
- "steps": 1000064,
24
- "file_path": "results/Pyramids Training/Pyramids/Pyramids-1000064.onnx",
25
- "reward": 1.1558332927525043,
26
- "creation_time": 1673711272.5450068,
27
  "auxillary_file_paths": [
28
- "results/Pyramids Training/Pyramids/Pyramids-1000064.pt"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  ]
30
  }
31
  ],
32
  "final_checkpoint": {
33
- "steps": 1000064,
34
  "file_path": "results/Pyramids Training/Pyramids.onnx",
35
- "reward": 1.1558332927525043,
36
- "creation_time": 1673711272.5450068,
37
  "auxillary_file_paths": [
38
- "results/Pyramids Training/Pyramids/Pyramids-1000064.pt"
39
  ]
40
  }
41
  },
2
  "Pyramids": {
3
  "checkpoints": [
4
  {
5
+ "steps": 1499928,
6
+ "file_path": "results/Pyramids Training/Pyramids/Pyramids-1499928.onnx",
7
+ "reward": 1.7164999544620514,
8
+ "creation_time": 1673787174.8848825,
9
  "auxillary_file_paths": [
10
+ "results/Pyramids Training/Pyramids/Pyramids-1499928.pt"
11
  ]
12
  },
13
  {
14
+ "steps": 1999927,
15
+ "file_path": "results/Pyramids Training/Pyramids/Pyramids-1999927.onnx",
16
+ "reward": 1.7209999859333038,
17
+ "creation_time": 1673788444.4714148,
18
  "auxillary_file_paths": [
19
+ "results/Pyramids Training/Pyramids/Pyramids-1999927.pt"
20
  ]
21
  },
22
  {
23
+ "steps": 2499964,
24
+ "file_path": "results/Pyramids Training/Pyramids/Pyramids-2499964.onnx",
25
+ "reward": 1.5172500237822533,
26
+ "creation_time": 1673789713.851386,
27
  "auxillary_file_paths": [
28
+ "results/Pyramids Training/Pyramids/Pyramids-2499964.pt"
29
+ ]
30
+ },
31
+ {
32
+ "steps": 2999981,
33
+ "file_path": "results/Pyramids Training/Pyramids/Pyramids-2999981.onnx",
34
+ "reward": null,
35
+ "creation_time": 1673791033.8515344,
36
+ "auxillary_file_paths": [
37
+ "results/Pyramids Training/Pyramids/Pyramids-2999981.pt"
38
+ ]
39
+ },
40
+ {
41
+ "steps": 3000109,
42
+ "file_path": "results/Pyramids Training/Pyramids/Pyramids-3000109.onnx",
43
+ "reward": null,
44
+ "creation_time": 1673791033.9810536,
45
+ "auxillary_file_paths": [
46
+ "results/Pyramids Training/Pyramids/Pyramids-3000109.pt"
47
  ]
48
  }
49
  ],
50
  "final_checkpoint": {
51
+ "steps": 3000109,
52
  "file_path": "results/Pyramids Training/Pyramids.onnx",
53
+ "reward": null,
54
+ "creation_time": 1673791033.9810536,
55
  "auxillary_file_paths": [
56
+ "results/Pyramids Training/Pyramids/Pyramids-3000109.pt"
57
  ]
58
  }
59
  },