Chirag Malhotra commited on
Commit
fb86ab2
1 Parent(s): 18c41f3
Huggy.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e5df61e715c862e4288defa2d2987294fb8b4241630cd137782feff68b4c68a8
3
- size 2271327
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4751265976c98c039b3caa29e644ee319e483d87c6c11092b2481114861be51c
3
+ size 3322382
Huggy/Huggy-2000006.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e5df61e715c862e4288defa2d2987294fb8b4241630cd137782feff68b4c68a8
3
- size 2271327
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d80c459706605cf54ca91e091ffbf1cf9ba8ede13f9d74c2e16c3c613fe8ef77
3
+ size 3322382
Huggy/Huggy-2000006.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:627bc6dd36d555a16ff6174a43b0575e87edea67932a1e676bd4de5e13661372
3
- size 13503717
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e4a2ecf9a0b0a8e38a2ced4f5616d9b68af110147edefc3550f891ddca1012a
3
+ size 6610047
Huggy/Huggy-2199787.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:342480e05d5288bd7da3f840a33913ee303cb6bc6b09c163fccce99fa190f04e
3
+ size 3322382
Huggy/Huggy-2199787.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:887b8f988b0ae619a0b3df680bea5509144b77698c58a865b891bfd21ef9c44d
3
+ size 19811521
Huggy/Huggy-2399928.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ea5525e26224a54b2a3b8d2e88f1f54c61fcb13fd01b38630941f004b914c08
3
+ size 3322382
Huggy/Huggy-2399928.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ae4a12b75c42a0107c9668dce70fede8cae5c33a9642e300320ab09f5fd96da
3
+ size 19811521
Huggy/Huggy-2599910.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9c7cb33b071aa628c1f9eca7f255fd784e3523711a5fb838ee6dd9b9663d207
3
+ size 3322382
Huggy/Huggy-2599910.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4967c70d41776a19e7bb3d90eb5b22a5df1cdbdb8c988da0b855d49147bbad65
3
+ size 19811521
Huggy/Huggy-2799974.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7c530031473583abcaf5f46723ed5c598a9d7fb2a7b1dca360287679192cd29
3
+ size 3322382
Huggy/Huggy-2799974.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b89920790d5412db95ad26b8e331d31b8044563d170fa3dfae1e7c4475f3a77e
3
+ size 19811521
Huggy/Huggy-2999906.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eaa4d9d27f9bdcd4d2984a90c455d56105cc36217c654c0d6283db2582b4a05b
3
+ size 3322382
Huggy/Huggy-2999906.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86580c96ef84cf351f9498b14e4408d209cf4fd29535bab8c2adca20fb8ddcdf
3
+ size 19811521
Huggy/Huggy-3000023.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4751265976c98c039b3caa29e644ee319e483d87c6c11092b2481114861be51c
3
+ size 3322382
Huggy/Huggy-3000023.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47073de84ea01ef76af9b1cda2c251e59bdf76ea106139676b29de607d34f553
3
+ size 19811521
Huggy/checkpoint.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:627bc6dd36d555a16ff6174a43b0575e87edea67932a1e676bd4de5e13661372
3
- size 13503717
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47073de84ea01ef76af9b1cda2c251e59bdf76ea106139676b29de607d34f553
3
+ size 19811521
Huggy/events.out.tfevents.1688070493.bd58b5858c4a.22581.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93e3f3a969151049f1d628f1086220c6c859a9a30e57faa468bd75efc5ce7257
3
+ size 1121
Huggy/events.out.tfevents.1688070624.bd58b5858c4a.23172.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:668605c78eaa906cb3b3b43d1546224b7fa9845ef76cf2091c2c06b5189864b0
3
+ size 1121
Huggy/events.out.tfevents.1688070655.bd58b5858c4a.23359.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d0835253ae2631615364a14d7c38322a3f0c59de13bda1f8bf6f4f0ce880c20
3
+ size 1121
Huggy/events.out.tfevents.1688070683.bd58b5858c4a.23534.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:169b3f45c59abb0d2ebc16f973f4b7532bf18dc564073c134ab2b7d706e8c2c6
3
+ size 1121
Huggy/events.out.tfevents.1688070708.bd58b5858c4a.23695.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cab1f182b77a376916ab49c411e7bc5e945865815a8e8fe0b8aa586cc6164b6
3
+ size 1121
Huggy/events.out.tfevents.1688070733.bd58b5858c4a.23856.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e693dc746a96a2d52c9679814fcb7f0258290a2de0b1e8a1c977e9f8801c87aa
3
+ size 1122
Huggy/events.out.tfevents.1688070765.bd58b5858c4a.24047.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05cf06a3e4d989705e0ccda4aea32ea1f3895c6c8108cd5041a29976059f63be
3
+ size 1123
Huggy/events.out.tfevents.1688070788.bd58b5858c4a.24200.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8c7f2390a634e6041dd73989539969dd3973d42d69067ae9f799eaa954b965e
3
+ size 352991
Huggy/events.out.tfevents.1688073208.bd58b5858c4a.34168.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3db1c878b29b480cedc70fed2d61bae5383ae945202ddebc521e0b9fbb2e23e7
3
+ size 195347
config.json CHANGED
@@ -1 +1 @@
1
- {"default_settings": null, "behaviors": {"Huggy": {"trainer_type": "ppo", "hyperparameters": {"batch_size": 2048, "buffer_size": 20480, "learning_rate": 0.0003, "beta": 0.005, "epsilon": 0.2, "lambd": 0.95, "num_epoch": 3, "shared_critic": false, "learning_rate_schedule": "linear", "beta_schedule": "linear", "epsilon_schedule": "linear"}, "checkpoint_interval": 200000, "network_settings": {"normalize": true, "hidden_units": 512, "num_layers": 3, "vis_encode_type": "simple", "memory": null, "goal_conditioning_type": "hyper", "deterministic": false}, "reward_signals": {"extrinsic": {"gamma": 0.995, "strength": 1.0, "network_settings": {"normalize": false, "hidden_units": 128, "num_layers": 2, "vis_encode_type": "simple", "memory": null, "goal_conditioning_type": "hyper", "deterministic": false}}}, "init_path": null, "keep_checkpoints": 15, "even_checkpoints": false, "max_steps": 2000000, "time_horizon": 1000, "summary_freq": 50000, "threaded": false, "self_play": null, "behavioral_cloning": null}}, "env_settings": {"env_path": "./trained-envs-executables/linux/Huggy/Huggy", "env_args": null, "base_port": 5005, "num_envs": 1, "num_areas": 1, "seed": -1, "max_lifetime_restarts": 10, "restarts_rate_limit_n": 1, "restarts_rate_limit_period_s": 60}, "engine_settings": {"width": 84, "height": 84, "quality_level": 5, "time_scale": 20, "target_frame_rate": -1, "capture_frame_rate": 60, "no_graphics": true}, "environment_parameters": null, "checkpoint_settings": {"run_id": "Huggy", "initialize_from": null, "load_model": false, "resume": false, "force": false, "train_model": false, "inference": false, "results_dir": "results"}, "torch_settings": {"device": null}, "debug": false}
 
1
+ {"default_settings": null, "behaviors": {"Huggy": {"trainer_type": "ppo", "hyperparameters": {"batch_size": 2048, "buffer_size": 20480, "learning_rate": 0.0003, "beta": 0.005, "epsilon": 0.2, "lambd": 0.95, "num_epoch": 4, "shared_critic": false, "learning_rate_schedule": "linear", "beta_schedule": "linear", "epsilon_schedule": "linear"}, "checkpoint_interval": 200000, "network_settings": {"normalize": true, "hidden_units": 512, "num_layers": 4, "vis_encode_type": "simple", "memory": null, "goal_conditioning_type": "hyper", "deterministic": false}, "reward_signals": {"extrinsic": {"gamma": 0.995, "strength": 1.0, "network_settings": {"normalize": false, "hidden_units": 128, "num_layers": 2, "vis_encode_type": "simple", "memory": null, "goal_conditioning_type": "hyper", "deterministic": false}}}, "init_path": null, "keep_checkpoints": 15, "even_checkpoints": false, "max_steps": 3000000, "time_horizon": 10000, "summary_freq": 50000, "threaded": false, "self_play": null, "behavioral_cloning": null}}, "env_settings": {"env_path": "./trained-envs-executables/linux/Huggy/Huggy", "env_args": null, "base_port": 5005, "num_envs": 1, "num_areas": 1, "seed": -1, "max_lifetime_restarts": 10, "restarts_rate_limit_n": 1, "restarts_rate_limit_period_s": 60}, "engine_settings": {"width": 84, "height": 84, "quality_level": 5, "time_scale": 20, "target_frame_rate": -1, "capture_frame_rate": 60, "no_graphics": true}, "environment_parameters": null, "checkpoint_settings": {"run_id": "Huggy", "initialize_from": null, "load_model": false, "resume": true, "force": false, "train_model": false, "inference": false, "results_dir": "results"}, "torch_settings": {"device": null}, "debug": false}
configuration.yaml CHANGED
@@ -9,7 +9,7 @@ behaviors:
9
  beta: 0.005
10
  epsilon: 0.2
11
  lambd: 0.95
12
- num_epoch: 3
13
  shared_critic: false
14
  learning_rate_schedule: linear
15
  beta_schedule: linear
@@ -18,7 +18,7 @@ behaviors:
18
  network_settings:
19
  normalize: true
20
  hidden_units: 512
21
- num_layers: 3
22
  vis_encode_type: simple
23
  memory: null
24
  goal_conditioning_type: hyper
@@ -38,8 +38,8 @@ behaviors:
38
  init_path: null
39
  keep_checkpoints: 15
40
  even_checkpoints: false
41
- max_steps: 2000000
42
- time_horizon: 1000
43
  summary_freq: 50000
44
  threaded: false
45
  self_play: null
@@ -67,7 +67,7 @@ checkpoint_settings:
67
  run_id: Huggy
68
  initialize_from: null
69
  load_model: false
70
- resume: false
71
  force: false
72
  train_model: false
73
  inference: false
 
9
  beta: 0.005
10
  epsilon: 0.2
11
  lambd: 0.95
12
+ num_epoch: 4
13
  shared_critic: false
14
  learning_rate_schedule: linear
15
  beta_schedule: linear
 
18
  network_settings:
19
  normalize: true
20
  hidden_units: 512
21
+ num_layers: 4
22
  vis_encode_type: simple
23
  memory: null
24
  goal_conditioning_type: hyper
 
38
  init_path: null
39
  keep_checkpoints: 15
40
  even_checkpoints: false
41
+ max_steps: 3000000
42
+ time_horizon: 10000
43
  summary_freq: 50000
44
  threaded: false
45
  self_play: null
 
67
  run_id: Huggy
68
  initialize_from: null
69
  load_model: false
70
+ resume: true
71
  force: false
72
  train_model: false
73
  inference: false
run_logs/Player-0.log CHANGED
@@ -2,9 +2,6 @@ Mono path[0] = '/content/ml-agents/trained-envs-executables/linux/Huggy/Huggy_Da
2
  Mono config path = '/content/ml-agents/trained-envs-executables/linux/Huggy/Huggy_Data/MonoBleedingEdge/etc'
3
  Preloaded 'lib_burst_generated.so'
4
  Preloaded 'libgrpc_csharp_ext.x64.so'
5
- PlayerPrefs - Creating folder: /root/.config/unity3d/Hugging Face
6
- PlayerPrefs - Creating folder: /root/.config/unity3d/Hugging Face/Huggy
7
- Unable to load player prefs
8
  Initialize engine version: 2021.3.14f1 (eee1884e7226)
9
  [Subsystems] Discovering subsystems at path /content/ml-agents/trained-envs-executables/linux/Huggy/Huggy_Data/UnitySubsystems
10
  Forcing GfxDevice: Null
@@ -34,7 +31,7 @@ ALSA lib pcm.c:2642:(snd_pcm_open_noupdate) Unknown PCM default
34
  FMOD failed to initialize the output device.: "Error initializing output device. " (60)
35
  FMOD initialized on nosound output
36
  Begin MonoManager ReloadAssembly
37
- - Completed reload, in 0.090 seconds
38
  ERROR: Shader Hidden/Universal Render Pipeline/Blit shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
39
  ERROR: Shader Hidden/Universal Render Pipeline/CopyDepth shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
40
  ERROR: Shader Hidden/Universal Render Pipeline/ScreenSpaceShadows shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
@@ -78,14 +75,14 @@ ERROR: Shader Universal Render Pipeline/Lit shader is not supported on this GPU
78
  WARNING: Shader Unsupported: 'Universal Render Pipeline/Lit' - All subshaders removed
79
  WARNING: Shader Did you use #pragma only_renderers and omit this platform?
80
  WARNING: Shader If subshaders removal was intentional, you may have forgotten turning Fallback off?
81
- UnloadTime: 0.734120 ms
82
  requesting resize 84 x 84
83
  Setting up 1 worker threads for Enlighten.
84
  Memory Statistics:
85
  [ALLOC_TEMP_TLS] TLS Allocator
86
  StackAllocators :
87
  [ALLOC_TEMP_MAIN]
88
- Peak usage frame count: [4.0 KB-8.0 KB]: 26757 frames, [2.0 MB-4.0 MB]: 1 frames
89
  Initial Block Size 4.0 MB
90
  Current Block Size 4.0 MB
91
  Peak Allocated Bytes 3.6 MB
@@ -93,7 +90,7 @@ Memory Statistics:
93
  [ALLOC_TEMP_Loading.AsyncRead]
94
  Initial Block Size 64.0 KB
95
  Current Block Size 64.0 KB
96
- Peak Allocated Bytes 192 B
97
  Overflow Count 0
98
  [ALLOC_TEMP_Loading.PreloadManager]
99
  Initial Block Size 256.0 KB
@@ -201,19 +198,19 @@ Memory Statistics:
201
  Peak Allocated Bytes 0 B
202
  Overflow Count 0
203
  [ALLOC_DEFAULT] Dual Thread Allocator
204
- Peak main deferred allocation count 230
205
  [ALLOC_BUCKET]
206
  Large Block size 4.0 MB
207
  Used Block count 1
208
  Peak Allocated bytes 1.4 MB
209
  [ALLOC_DEFAULT_MAIN]
210
- Peak usage frame count: [16.0 MB-32.0 MB]: 26758 frames
211
  Requested Block Size 16.0 MB
212
  Peak Block count 1
213
- Peak Allocated memory 23.5 MB
214
  Peak Large allocation bytes 16.0 MB
215
  [ALLOC_DEFAULT_THREAD]
216
- Peak usage frame count: [2.0 MB-4.0 MB]: 26758 frames
217
  Requested Block Size 16.0 MB
218
  Peak Block count 1
219
  Peak Allocated memory 2.5 MB
@@ -245,13 +242,13 @@ Memory Statistics:
245
  Used Block count 1
246
  Peak Allocated bytes 1.4 MB
247
  [ALLOC_GFX_MAIN]
248
- Peak usage frame count: [32.0 KB-64.0 KB]: 26757 frames, [64.0 KB-128.0 KB]: 1 frames
249
  Requested Block Size 16.0 MB
250
  Peak Block count 1
251
  Peak Allocated memory 65.6 KB
252
  Peak Large allocation bytes 0 B
253
  [ALLOC_GFX_THREAD]
254
- Peak usage frame count: [64.0 KB-128.0 KB]: 26758 frames
255
  Requested Block Size 16.0 MB
256
  Peak Block count 1
257
  Peak Allocated memory 81.8 KB
@@ -263,13 +260,13 @@ Memory Statistics:
263
  Used Block count 1
264
  Peak Allocated bytes 1.4 MB
265
  [ALLOC_CACHEOBJECTS_MAIN]
266
- Peak usage frame count: [1.0 MB-2.0 MB]: 26757 frames, [16.0 MB-32.0 MB]: 1 frames
267
  Requested Block Size 4.0 MB
268
  Peak Block count 2
269
  Peak Allocated memory 30.6 MB
270
  Peak Large allocation bytes 24.9 MB
271
  [ALLOC_CACHEOBJECTS_THREAD]
272
- Peak usage frame count: [0.5 MB-1.0 MB]: 26757 frames, [2.0 MB-4.0 MB]: 1 frames
273
  Requested Block Size 4.0 MB
274
  Peak Block count 1
275
  Peak Allocated memory 2.6 MB
@@ -281,13 +278,13 @@ Memory Statistics:
281
  Used Block count 1
282
  Peak Allocated bytes 1.4 MB
283
  [ALLOC_TYPETREE_MAIN]
284
- Peak usage frame count: [0-1.0 KB]: 26758 frames
285
  Requested Block Size 2.0 MB
286
  Peak Block count 1
287
  Peak Allocated memory 1.0 KB
288
  Peak Large allocation bytes 0 B
289
  [ALLOC_TYPETREE_THREAD]
290
- Peak usage frame count: [4.0 KB-8.0 KB]: 26758 frames
291
  Requested Block Size 2.0 MB
292
  Peak Block count 1
293
  Peak Allocated memory 7.3 KB
 
2
  Mono config path = '/content/ml-agents/trained-envs-executables/linux/Huggy/Huggy_Data/MonoBleedingEdge/etc'
3
  Preloaded 'lib_burst_generated.so'
4
  Preloaded 'libgrpc_csharp_ext.x64.so'
 
 
 
5
  Initialize engine version: 2021.3.14f1 (eee1884e7226)
6
  [Subsystems] Discovering subsystems at path /content/ml-agents/trained-envs-executables/linux/Huggy/Huggy_Data/UnitySubsystems
7
  Forcing GfxDevice: Null
 
31
  FMOD failed to initialize the output device.: "Error initializing output device. " (60)
32
  FMOD initialized on nosound output
33
  Begin MonoManager ReloadAssembly
34
+ - Completed reload, in 0.159 seconds
35
  ERROR: Shader Hidden/Universal Render Pipeline/Blit shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
36
  ERROR: Shader Hidden/Universal Render Pipeline/CopyDepth shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
37
  ERROR: Shader Hidden/Universal Render Pipeline/ScreenSpaceShadows shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
 
75
  WARNING: Shader Unsupported: 'Universal Render Pipeline/Lit' - All subshaders removed
76
  WARNING: Shader Did you use #pragma only_renderers and omit this platform?
77
  WARNING: Shader If subshaders removal was intentional, you may have forgotten turning Fallback off?
78
+ UnloadTime: 0.943528 ms
79
  requesting resize 84 x 84
80
  Setting up 1 worker threads for Enlighten.
81
  Memory Statistics:
82
  [ALLOC_TEMP_TLS] TLS Allocator
83
  StackAllocators :
84
  [ALLOC_TEMP_MAIN]
85
+ Peak usage frame count: [4.0 KB-8.0 KB]: 13375 frames, [2.0 MB-4.0 MB]: 1 frames
86
  Initial Block Size 4.0 MB
87
  Current Block Size 4.0 MB
88
  Peak Allocated Bytes 3.6 MB
 
90
  [ALLOC_TEMP_Loading.AsyncRead]
91
  Initial Block Size 64.0 KB
92
  Current Block Size 64.0 KB
93
+ Peak Allocated Bytes 128 B
94
  Overflow Count 0
95
  [ALLOC_TEMP_Loading.PreloadManager]
96
  Initial Block Size 256.0 KB
 
198
  Peak Allocated Bytes 0 B
199
  Overflow Count 0
200
  [ALLOC_DEFAULT] Dual Thread Allocator
201
+ Peak main deferred allocation count 288
202
  [ALLOC_BUCKET]
203
  Large Block size 4.0 MB
204
  Used Block count 1
205
  Peak Allocated bytes 1.4 MB
206
  [ALLOC_DEFAULT_MAIN]
207
+ Peak usage frame count: [16.0 MB-32.0 MB]: 13376 frames
208
  Requested Block Size 16.0 MB
209
  Peak Block count 1
210
+ Peak Allocated memory 22.5 MB
211
  Peak Large allocation bytes 16.0 MB
212
  [ALLOC_DEFAULT_THREAD]
213
+ Peak usage frame count: [2.0 MB-4.0 MB]: 13376 frames
214
  Requested Block Size 16.0 MB
215
  Peak Block count 1
216
  Peak Allocated memory 2.5 MB
 
242
  Used Block count 1
243
  Peak Allocated bytes 1.4 MB
244
  [ALLOC_GFX_MAIN]
245
+ Peak usage frame count: [32.0 KB-64.0 KB]: 13375 frames, [64.0 KB-128.0 KB]: 1 frames
246
  Requested Block Size 16.0 MB
247
  Peak Block count 1
248
  Peak Allocated memory 65.6 KB
249
  Peak Large allocation bytes 0 B
250
  [ALLOC_GFX_THREAD]
251
+ Peak usage frame count: [64.0 KB-128.0 KB]: 13376 frames
252
  Requested Block Size 16.0 MB
253
  Peak Block count 1
254
  Peak Allocated memory 81.8 KB
 
260
  Used Block count 1
261
  Peak Allocated bytes 1.4 MB
262
  [ALLOC_CACHEOBJECTS_MAIN]
263
+ Peak usage frame count: [1.0 MB-2.0 MB]: 13375 frames, [16.0 MB-32.0 MB]: 1 frames
264
  Requested Block Size 4.0 MB
265
  Peak Block count 2
266
  Peak Allocated memory 30.6 MB
267
  Peak Large allocation bytes 24.9 MB
268
  [ALLOC_CACHEOBJECTS_THREAD]
269
+ Peak usage frame count: [0.5 MB-1.0 MB]: 13375 frames, [2.0 MB-4.0 MB]: 1 frames
270
  Requested Block Size 4.0 MB
271
  Peak Block count 1
272
  Peak Allocated memory 2.6 MB
 
278
  Used Block count 1
279
  Peak Allocated bytes 1.4 MB
280
  [ALLOC_TYPETREE_MAIN]
281
+ Peak usage frame count: [0-1.0 KB]: 13376 frames
282
  Requested Block Size 2.0 MB
283
  Peak Block count 1
284
  Peak Allocated memory 1.0 KB
285
  Peak Large allocation bytes 0 B
286
  [ALLOC_TYPETREE_THREAD]
287
+ Peak usage frame count: [4.0 KB-8.0 KB]: 13376 frames
288
  Requested Block Size 2.0 MB
289
  Peak Block count 1
290
  Peak Allocated memory 7.3 KB
run_logs/timers.json CHANGED
@@ -2,213 +2,213 @@
2
  "name": "root",
3
  "gauges": {
4
  "Huggy.Policy.Entropy.mean": {
5
- "value": 1.400726318359375,
6
- "min": 1.400726318359375,
7
- "max": 1.4249807596206665,
8
- "count": 40
9
  },
10
  "Huggy.Policy.Entropy.sum": {
11
- "value": 71309.578125,
12
- "min": 68498.6171875,
13
- "max": 76380.78125,
14
- "count": 40
15
  },
16
  "Huggy.Environment.EpisodeLength.mean": {
17
- "value": 79.9448051948052,
18
- "min": 76.91419656786272,
19
- "max": 434.4869565217391,
20
- "count": 40
21
  },
22
  "Huggy.Environment.EpisodeLength.sum": {
23
- "value": 49246.0,
24
- "min": 48813.0,
25
- "max": 50188.0,
26
- "count": 40
27
  },
28
  "Huggy.Step.mean": {
29
- "value": 1999922.0,
30
- "min": 49684.0,
31
- "max": 1999922.0,
32
- "count": 40
33
  },
34
  "Huggy.Step.sum": {
35
- "value": 1999922.0,
36
- "min": 49684.0,
37
- "max": 1999922.0,
38
- "count": 40
39
  },
40
  "Huggy.Policy.ExtrinsicValueEstimate.mean": {
41
- "value": 2.555288314819336,
42
- "min": 0.13948076963424683,
43
- "max": 2.572084903717041,
44
- "count": 40
45
  },
46
  "Huggy.Policy.ExtrinsicValueEstimate.sum": {
47
- "value": 1574.0576171875,
48
- "min": 15.90080738067627,
49
- "max": 1586.824951171875,
50
- "count": 40
51
  },
52
  "Huggy.Environment.CumulativeReward.mean": {
53
- "value": 3.890321247860209,
54
- "min": 1.8745373069194324,
55
- "max": 3.958467331453532,
56
- "count": 40
57
  },
58
  "Huggy.Environment.CumulativeReward.sum": {
59
- "value": 2396.4378886818886,
60
- "min": 213.6972529888153,
61
- "max": 2438.3624428510666,
62
- "count": 40
63
  },
64
  "Huggy.Policy.ExtrinsicReward.mean": {
65
- "value": 3.890321247860209,
66
- "min": 1.8745373069194324,
67
- "max": 3.958467331453532,
68
- "count": 40
69
  },
70
  "Huggy.Policy.ExtrinsicReward.sum": {
71
- "value": 2396.4378886818886,
72
- "min": 213.6972529888153,
73
- "max": 2438.3624428510666,
74
- "count": 40
75
  },
76
  "Huggy.Losses.PolicyLoss.mean": {
77
- "value": 0.0176878971147946,
78
- "min": 0.012596390061177468,
79
- "max": 0.01987136222208695,
80
- "count": 40
81
  },
82
  "Huggy.Losses.PolicyLoss.sum": {
83
- "value": 0.053063691344383795,
84
- "min": 0.025192780122354937,
85
- "max": 0.05961408666626085,
86
- "count": 40
87
  },
88
  "Huggy.Losses.ValueLoss.mean": {
89
- "value": 0.06274337193204298,
90
- "min": 0.021939164493232965,
91
- "max": 0.06539257361243168,
92
- "count": 40
93
  },
94
  "Huggy.Losses.ValueLoss.sum": {
95
- "value": 0.18823011579612892,
96
- "min": 0.04387832898646593,
97
- "max": 0.19470821258922416,
98
- "count": 40
99
  },
100
  "Huggy.Policy.LearningRate.mean": {
101
- "value": 3.8071487309833353e-06,
102
- "min": 3.8071487309833353e-06,
103
- "max": 0.00029530185156604997,
104
- "count": 40
105
  },
106
  "Huggy.Policy.LearningRate.sum": {
107
- "value": 1.1421446192950005e-05,
108
- "min": 1.1421446192950005e-05,
109
- "max": 0.00084410566863145,
110
- "count": 40
111
  },
112
  "Huggy.Policy.Epsilon.mean": {
113
- "value": 0.1012690166666667,
114
- "min": 0.1012690166666667,
115
- "max": 0.19843395000000003,
116
- "count": 40
117
  },
118
  "Huggy.Policy.Epsilon.sum": {
119
- "value": 0.3038070500000001,
120
- "min": 0.20766764999999998,
121
- "max": 0.58136855,
122
- "count": 40
123
  },
124
  "Huggy.Policy.Beta.mean": {
125
- "value": 7.332393166666668e-05,
126
- "min": 7.332393166666668e-05,
127
- "max": 0.004921854105,
128
- "count": 40
129
  },
130
  "Huggy.Policy.Beta.sum": {
131
- "value": 0.00021997179500000006,
132
- "min": 0.00021997179500000006,
133
- "max": 0.014070290644999997,
134
- "count": 40
135
  },
136
  "Huggy.IsTraining.mean": {
137
  "value": 1.0,
138
  "min": 1.0,
139
  "max": 1.0,
140
- "count": 40
141
  },
142
  "Huggy.IsTraining.sum": {
143
  "value": 1.0,
144
  "min": 1.0,
145
  "max": 1.0,
146
- "count": 40
147
  }
148
  },
149
  "metadata": {
150
  "timer_format_version": "0.1.0",
151
- "start_time_seconds": "1688067380",
152
  "python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
153
- "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
154
  "mlagents_version": "0.31.0.dev0",
155
  "mlagents_envs_version": "0.31.0.dev0",
156
  "communication_protocol_version": "1.5.0",
157
  "pytorch_version": "1.11.0+cu102",
158
  "numpy_version": "1.21.2",
159
- "end_time_seconds": "1688069838"
160
  },
161
- "total": 2458.0779196629996,
162
  "count": 1,
163
- "self": 0.43725267999889184,
164
  "children": {
165
  "run_training.setup": {
166
- "total": 0.043405263000295236,
167
  "count": 1,
168
- "self": 0.043405263000295236
169
  },
170
  "TrainerController.start_learning": {
171
- "total": 2457.5972617200005,
172
  "count": 1,
173
- "self": 4.425367995973829,
174
  "children": {
175
  "TrainerController._reset_env": {
176
- "total": 4.326155080999797,
177
  "count": 1,
178
- "self": 4.326155080999797
179
  },
180
  "TrainerController.advance": {
181
- "total": 2448.728760195026,
182
- "count": 233026,
183
- "self": 4.627487433881015,
184
  "children": {
185
  "env_step": {
186
- "total": 1906.5150680700845,
187
- "count": 233026,
188
- "self": 1607.9886222099549,
189
  "children": {
190
  "SubprocessEnvManager._take_step": {
191
- "total": 295.59493119311946,
192
- "count": 233026,
193
- "self": 17.382588481978473,
194
  "children": {
195
  "TorchPolicy.evaluate": {
196
- "total": 278.212342711141,
197
- "count": 222980,
198
- "self": 278.212342711141
199
  }
200
  }
201
  },
202
  "workers": {
203
- "total": 2.9315146670101058,
204
- "count": 233026,
205
  "self": 0.0,
206
  "children": {
207
  "worker_root": {
208
- "total": 2449.8591234389846,
209
- "count": 233026,
210
  "is_parallel": true,
211
- "self": 1138.5625666251317,
212
  "children": {
213
  "run_training.setup": {
214
  "total": 0.0,
@@ -217,48 +217,48 @@
217
  "self": 0.0,
218
  "children": {
219
  "steps_from_proto": {
220
- "total": 0.001194677000057709,
221
  "count": 1,
222
  "is_parallel": true,
223
- "self": 0.00039951600001586485,
224
  "children": {
225
  "_process_rank_one_or_two_observation": {
226
- "total": 0.0007951610000418441,
227
  "count": 2,
228
  "is_parallel": true,
229
- "self": 0.0007951610000418441
230
  }
231
  }
232
  },
233
  "UnityEnvironment.step": {
234
- "total": 0.030308107999644562,
235
  "count": 1,
236
  "is_parallel": true,
237
- "self": 0.0003765889991882432,
238
  "children": {
239
  "UnityEnvironment._generate_step_input": {
240
- "total": 0.000228140000217536,
241
  "count": 1,
242
  "is_parallel": true,
243
- "self": 0.000228140000217536
244
  },
245
  "communicator.exchange": {
246
- "total": 0.02885427000001073,
247
  "count": 1,
248
  "is_parallel": true,
249
- "self": 0.02885427000001073
250
  },
251
  "steps_from_proto": {
252
- "total": 0.0008491090002280544,
253
  "count": 1,
254
  "is_parallel": true,
255
- "self": 0.00024096500055748038,
256
  "children": {
257
  "_process_rank_one_or_two_observation": {
258
- "total": 0.000608143999670574,
259
  "count": 2,
260
  "is_parallel": true,
261
- "self": 0.000608143999670574
262
  }
263
  }
264
  }
@@ -267,34 +267,34 @@
267
  }
268
  },
269
  "UnityEnvironment.step": {
270
- "total": 1311.296556813853,
271
- "count": 233025,
272
  "is_parallel": true,
273
- "self": 39.77586232188014,
274
  "children": {
275
  "UnityEnvironment._generate_step_input": {
276
- "total": 78.92728995208336,
277
- "count": 233025,
278
  "is_parallel": true,
279
- "self": 78.92728995208336
280
  },
281
  "communicator.exchange": {
282
- "total": 1095.8950552539727,
283
- "count": 233025,
284
  "is_parallel": true,
285
- "self": 1095.8950552539727
286
  },
287
  "steps_from_proto": {
288
- "total": 96.69834928591672,
289
- "count": 233025,
290
  "is_parallel": true,
291
- "self": 33.56607584891799,
292
  "children": {
293
  "_process_rank_one_or_two_observation": {
294
- "total": 63.13227343699873,
295
- "count": 466050,
296
  "is_parallel": true,
297
- "self": 63.13227343699873
298
  }
299
  }
300
  }
@@ -307,31 +307,31 @@
307
  }
308
  },
309
  "trainer_advance": {
310
- "total": 537.5862046910606,
311
- "count": 233026,
312
- "self": 6.7593281281710915,
313
  "children": {
314
  "process_trajectory": {
315
- "total": 138.6723028718884,
316
- "count": 233026,
317
- "self": 137.4289053408893,
318
  "children": {
319
  "RLTrainer._checkpoint": {
320
- "total": 1.2433975309991183,
321
- "count": 10,
322
- "self": 1.2433975309991183
323
  }
324
  }
325
  },
326
  "_update_policy": {
327
- "total": 392.15457369100113,
328
- "count": 97,
329
- "self": 331.260746327001,
330
  "children": {
331
  "TorchPPOOptimizer.update": {
332
- "total": 60.89382736400012,
333
- "count": 2910,
334
- "self": 60.89382736400012
335
  }
336
  }
337
  }
@@ -340,19 +340,19 @@
340
  }
341
  },
342
  "trainer_threads": {
343
- "total": 1.2099999366910197e-06,
344
  "count": 1,
345
- "self": 1.2099999366910197e-06
346
  },
347
  "TrainerController._save_models": {
348
- "total": 0.11697723800079984,
349
  "count": 1,
350
- "self": 0.0018440250005369307,
351
  "children": {
352
  "RLTrainer._checkpoint": {
353
- "total": 0.11513321300026291,
354
  "count": 1,
355
- "self": 0.11513321300026291
356
  }
357
  }
358
  }
 
2
  "name": "root",
3
  "gauges": {
4
  "Huggy.Policy.Entropy.mean": {
5
+ "value": 1.3961669206619263,
6
+ "min": 1.3961669206619263,
7
+ "max": 1.4021852016448975,
8
+ "count": 20
9
  },
10
  "Huggy.Policy.Entropy.sum": {
11
+ "value": 69712.0078125,
12
+ "min": 67643.140625,
13
+ "max": 76855.328125,
14
+ "count": 20
15
  },
16
  "Huggy.Environment.EpisodeLength.mean": {
17
+ "value": 83.41722972972973,
18
+ "min": 82.49749582637729,
19
+ "max": 359.10714285714283,
20
+ "count": 20
21
  },
22
  "Huggy.Environment.EpisodeLength.sum": {
23
+ "value": 49383.0,
24
+ "min": 49329.0,
25
+ "max": 50275.0,
26
+ "count": 20
27
  },
28
  "Huggy.Step.mean": {
29
+ "value": 2999906.0,
30
+ "min": 2049996.0,
31
+ "max": 2999906.0,
32
+ "count": 20
33
  },
34
  "Huggy.Step.sum": {
35
+ "value": 2999906.0,
36
+ "min": 2049996.0,
37
+ "max": 2999906.0,
38
+ "count": 20
39
  },
40
  "Huggy.Policy.ExtrinsicValueEstimate.mean": {
41
+ "value": 2.3936758041381836,
42
+ "min": 0.18066732585430145,
43
+ "max": 2.3936758041381836,
44
+ "count": 20
45
  },
46
  "Huggy.Policy.ExtrinsicValueEstimate.sum": {
47
+ "value": 1417.0560302734375,
48
+ "min": 25.11275863647461,
49
+ "max": 1417.0560302734375,
50
+ "count": 20
51
  },
52
  "Huggy.Environment.CumulativeReward.mean": {
53
+ "value": 3.8116866416625075,
54
+ "min": 2.1311165274475976,
55
+ "max": 3.8116866416625075,
56
+ "count": 20
57
  },
58
  "Huggy.Environment.CumulativeReward.sum": {
59
+ "value": 2256.5184918642044,
60
+ "min": 296.22519731521606,
61
+ "max": 2256.5184918642044,
62
+ "count": 20
63
  },
64
  "Huggy.Policy.ExtrinsicReward.mean": {
65
+ "value": 3.8116866416625075,
66
+ "min": 2.1311165274475976,
67
+ "max": 3.8116866416625075,
68
+ "count": 20
69
  },
70
  "Huggy.Policy.ExtrinsicReward.sum": {
71
+ "value": 2256.5184918642044,
72
+ "min": 296.22519731521606,
73
+ "max": 2256.5184918642044,
74
+ "count": 20
75
  },
76
  "Huggy.Losses.PolicyLoss.mean": {
77
+ "value": 0.015746447624405847,
78
+ "min": 0.013269851225595632,
79
+ "max": 0.019969996209450374,
80
+ "count": 20
81
  },
82
  "Huggy.Losses.PolicyLoss.sum": {
83
+ "value": 0.03149289524881169,
84
+ "min": 0.026539702451191263,
85
+ "max": 0.059909988628351125,
86
+ "count": 20
87
  },
88
  "Huggy.Losses.ValueLoss.mean": {
89
+ "value": 0.06341568497009575,
90
+ "min": 0.01594442441128194,
91
+ "max": 0.06341568497009575,
92
+ "count": 20
93
  },
94
  "Huggy.Losses.ValueLoss.sum": {
95
+ "value": 0.1268313699401915,
96
+ "min": 0.03242806715425104,
97
+ "max": 0.167779172770679,
98
+ "count": 20
99
  },
100
  "Huggy.Policy.LearningRate.mean": {
101
+ "value": 2.188799270433343e-06,
102
+ "min": 2.188799270433343e-06,
103
+ "max": 9.688491770504999e-05,
104
+ "count": 20
105
  },
106
  "Huggy.Policy.LearningRate.sum": {
107
+ "value": 4.377598540866686e-06,
108
+ "min": 4.377598540866686e-06,
109
+ "max": 0.00026275811241403336,
110
+ "count": 20
111
  },
112
  "Huggy.Policy.Epsilon.mean": {
113
+ "value": 0.10072956666666667,
114
+ "min": 0.10072956666666667,
115
+ "max": 0.13229495000000002,
116
+ "count": 20
117
  },
118
  "Huggy.Policy.Epsilon.sum": {
119
+ "value": 0.20145913333333335,
120
+ "min": 0.20145913333333335,
121
+ "max": 0.38758596666666667,
122
+ "count": 20
123
  },
124
  "Huggy.Policy.Beta.mean": {
125
+ "value": 4.640537666666684e-05,
126
+ "min": 4.640537666666684e-05,
127
+ "max": 0.0016215180050000002,
128
+ "count": 20
129
  },
130
  "Huggy.Policy.Beta.sum": {
131
+ "value": 9.281075333333368e-05,
132
+ "min": 9.281075333333368e-05,
133
+ "max": 0.004400539736666667,
134
+ "count": 20
135
  },
136
  "Huggy.IsTraining.mean": {
137
  "value": 1.0,
138
  "min": 1.0,
139
  "max": 1.0,
140
+ "count": 20
141
  },
142
  "Huggy.IsTraining.sum": {
143
  "value": 1.0,
144
  "min": 1.0,
145
  "max": 1.0,
146
+ "count": 20
147
  }
148
  },
149
  "metadata": {
150
  "timer_format_version": "0.1.0",
151
+ "start_time_seconds": "1688073207",
152
  "python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
153
+ "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics --resume",
154
  "mlagents_version": "0.31.0.dev0",
155
  "mlagents_envs_version": "0.31.0.dev0",
156
  "communication_protocol_version": "1.5.0",
157
  "pytorch_version": "1.11.0+cu102",
158
  "numpy_version": "1.21.2",
159
+ "end_time_seconds": "1688074551"
160
  },
161
+ "total": 1344.2894796050005,
162
  "count": 1,
163
+ "self": 0.48781199700169964,
164
  "children": {
165
  "run_training.setup": {
166
+ "total": 0.058565417999489,
167
  "count": 1,
168
+ "self": 0.058565417999489
169
  },
170
  "TrainerController.start_learning": {
171
+ "total": 1343.7431021899993,
172
  "count": 1,
173
+ "self": 2.4203594279033496,
174
  "children": {
175
  "TrainerController._reset_env": {
176
+ "total": 5.096684301000096,
177
  "count": 1,
178
+ "self": 5.096684301000096
179
  },
180
  "TrainerController.advance": {
181
+ "total": 1336.0642789030971,
182
+ "count": 115518,
183
+ "self": 2.3473333340643876,
184
  "children": {
185
  "env_step": {
186
+ "total": 990.418231647116,
187
+ "count": 115518,
188
+ "self": 828.1844275509502,
189
  "children": {
190
  "SubprocessEnvManager._take_step": {
191
+ "total": 160.72991279362304,
192
+ "count": 115518,
193
+ "self": 9.117237977531659,
194
  "children": {
195
  "TorchPolicy.evaluate": {
196
+ "total": 151.61267481609138,
197
+ "count": 111459,
198
+ "self": 151.61267481609138
199
  }
200
  }
201
  },
202
  "workers": {
203
+ "total": 1.5038913025427973,
204
+ "count": 115518,
205
  "self": 0.0,
206
  "children": {
207
  "worker_root": {
208
+ "total": 1339.5226123449602,
209
+ "count": 115518,
210
  "is_parallel": true,
211
+ "self": 662.2078289916471,
212
  "children": {
213
  "run_training.setup": {
214
  "total": 0.0,
 
217
  "self": 0.0,
218
  "children": {
219
  "steps_from_proto": {
220
+ "total": 0.0008973949998107855,
221
  "count": 1,
222
  "is_parallel": true,
223
+ "self": 0.00027297899941913784,
224
  "children": {
225
  "_process_rank_one_or_two_observation": {
226
+ "total": 0.0006244160003916477,
227
  "count": 2,
228
  "is_parallel": true,
229
+ "self": 0.0006244160003916477
230
  }
231
  }
232
  },
233
  "UnityEnvironment.step": {
234
+ "total": 0.03314095600035216,
235
  "count": 1,
236
  "is_parallel": true,
237
+ "self": 0.00029605300005641766,
238
  "children": {
239
  "UnityEnvironment._generate_step_input": {
240
+ "total": 0.00021390599977166858,
241
  "count": 1,
242
  "is_parallel": true,
243
+ "self": 0.00021390599977166858
244
  },
245
  "communicator.exchange": {
246
+ "total": 0.03187321499990503,
247
  "count": 1,
248
  "is_parallel": true,
249
+ "self": 0.03187321499990503
250
  },
251
  "steps_from_proto": {
252
+ "total": 0.000757782000619045,
253
  "count": 1,
254
  "is_parallel": true,
255
+ "self": 0.00021587099945463706,
256
  "children": {
257
  "_process_rank_one_or_two_observation": {
258
+ "total": 0.0005419110011644079,
259
  "count": 2,
260
  "is_parallel": true,
261
+ "self": 0.0005419110011644079
262
  }
263
  }
264
  }
 
267
  }
268
  },
269
  "UnityEnvironment.step": {
270
+ "total": 677.314783353313,
271
+ "count": 115517,
272
  "is_parallel": true,
273
+ "self": 20.05421794414451,
274
  "children": {
275
  "UnityEnvironment._generate_step_input": {
276
+ "total": 40.19784080896261,
277
+ "count": 115517,
278
  "is_parallel": true,
279
+ "self": 40.19784080896261
280
  },
281
  "communicator.exchange": {
282
+ "total": 567.7721280694213,
283
+ "count": 115517,
284
  "is_parallel": true,
285
+ "self": 567.7721280694213
286
  },
287
  "steps_from_proto": {
288
+ "total": 49.29059653078457,
289
+ "count": 115517,
290
  "is_parallel": true,
291
+ "self": 17.211001795385528,
292
  "children": {
293
  "_process_rank_one_or_two_observation": {
294
+ "total": 32.07959473539904,
295
+ "count": 231034,
296
  "is_parallel": true,
297
+ "self": 32.07959473539904
298
  }
299
  }
300
  }
 
307
  }
308
  },
309
  "trainer_advance": {
310
+ "total": 343.2987139219167,
311
+ "count": 115518,
312
+ "self": 3.6773810712311388,
313
  "children": {
314
  "process_trajectory": {
315
+ "total": 66.87622583869233,
316
+ "count": 115518,
317
+ "self": 65.9710374116894,
318
  "children": {
319
  "RLTrainer._checkpoint": {
320
+ "total": 0.9051884270029404,
321
+ "count": 5,
322
+ "self": 0.9051884270029404
323
  }
324
  }
325
  },
326
  "_update_policy": {
327
+ "total": 272.74510701199324,
328
+ "count": 48,
329
+ "self": 226.106096075996,
330
  "children": {
331
  "TorchPPOOptimizer.update": {
332
+ "total": 46.63901093599725,
333
+ "count": 1920,
334
+ "self": 46.63901093599725
335
  }
336
  }
337
  }
 
340
  }
341
  },
342
  "trainer_threads": {
343
+ "total": 9.999985195463523e-07,
344
  "count": 1,
345
+ "self": 9.999985195463523e-07
346
  },
347
  "TrainerController._save_models": {
348
+ "total": 0.16177855800015095,
349
  "count": 1,
350
+ "self": 0.0041508690010232385,
351
  "children": {
352
  "RLTrainer._checkpoint": {
353
+ "total": 0.1576276889991277,
354
  "count": 1,
355
+ "self": 0.1576276889991277
356
  }
357
  }
358
  }
run_logs/training_status.json CHANGED
@@ -2,112 +2,148 @@
2
  "Huggy": {
3
  "checkpoints": [
4
  {
5
- "steps": 199918,
6
- "file_path": "results/Huggy/Huggy/Huggy-199918.onnx",
7
- "reward": 3.8245207306110496,
8
- "creation_time": 1688067632.590595,
9
  "auxillary_file_paths": [
10
- "results/Huggy/Huggy/Huggy-199918.pt"
11
  ]
12
  },
13
  {
14
- "steps": 399986,
15
- "file_path": "results/Huggy/Huggy/Huggy-399986.onnx",
16
- "reward": 4.158072991554554,
17
- "creation_time": 1688067882.4040315,
18
  "auxillary_file_paths": [
19
- "results/Huggy/Huggy/Huggy-399986.pt"
20
  ]
21
  },
22
  {
23
- "steps": 599412,
24
- "file_path": "results/Huggy/Huggy/Huggy-599412.onnx",
25
- "reward": 3.5288519805127923,
26
- "creation_time": 1688068122.6042614,
27
  "auxillary_file_paths": [
28
- "results/Huggy/Huggy/Huggy-599412.pt"
29
  ]
30
  },
31
  {
32
- "steps": 799859,
33
- "file_path": "results/Huggy/Huggy/Huggy-799859.onnx",
34
- "reward": 4.069710771341135,
35
- "creation_time": 1688068360.1809177,
36
  "auxillary_file_paths": [
37
- "results/Huggy/Huggy/Huggy-799859.pt"
38
  ]
39
  },
40
  {
41
- "steps": 999988,
42
- "file_path": "results/Huggy/Huggy/Huggy-999988.onnx",
43
- "reward": 3.603951033371598,
44
- "creation_time": 1688068610.0689116,
45
  "auxillary_file_paths": [
46
- "results/Huggy/Huggy/Huggy-999988.pt"
47
  ]
48
  },
49
  {
50
- "steps": 1199905,
51
- "file_path": "results/Huggy/Huggy/Huggy-1199905.onnx",
52
- "reward": 3.661744071476495,
53
- "creation_time": 1688068858.5257802,
54
  "auxillary_file_paths": [
55
- "results/Huggy/Huggy/Huggy-1199905.pt"
56
  ]
57
  },
58
  {
59
- "steps": 1399961,
60
- "file_path": "results/Huggy/Huggy/Huggy-1399961.onnx",
61
- "reward": 3.06927078217268,
62
- "creation_time": 1688069104.515438,
63
  "auxillary_file_paths": [
64
- "results/Huggy/Huggy/Huggy-1399961.pt"
65
  ]
66
  },
67
  {
68
- "steps": 1599327,
69
- "file_path": "results/Huggy/Huggy/Huggy-1599327.onnx",
70
- "reward": 3.9818750013505793,
71
- "creation_time": 1688069344.1513155,
72
  "auxillary_file_paths": [
73
- "results/Huggy/Huggy/Huggy-1599327.pt"
74
  ]
75
  },
76
  {
77
- "steps": 1799830,
78
- "file_path": "results/Huggy/Huggy/Huggy-1799830.onnx",
79
- "reward": 3.9760619323043263,
80
- "creation_time": 1688069591.0157752,
81
  "auxillary_file_paths": [
82
- "results/Huggy/Huggy/Huggy-1799830.pt"
83
  ]
84
  },
85
  {
86
- "steps": 1999922,
87
- "file_path": "results/Huggy/Huggy/Huggy-1999922.onnx",
88
- "reward": 3.8654306222652566,
89
- "creation_time": 1688069838.4345078,
90
  "auxillary_file_paths": [
91
- "results/Huggy/Huggy/Huggy-1999922.pt"
92
  ]
93
  },
94
  {
95
- "steps": 2000006,
96
- "file_path": "results/Huggy/Huggy/Huggy-2000006.onnx",
97
- "reward": 3.866448467060671,
98
- "creation_time": 1688069838.5553,
99
  "auxillary_file_paths": [
100
- "results/Huggy/Huggy/Huggy-2000006.pt"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
  ]
102
  }
103
  ],
104
  "final_checkpoint": {
105
- "steps": 2000006,
106
  "file_path": "results/Huggy/Huggy.onnx",
107
- "reward": 3.866448467060671,
108
- "creation_time": 1688069838.5553,
109
  "auxillary_file_paths": [
110
- "results/Huggy/Huggy/Huggy-2000006.pt"
111
  ]
112
  }
113
  },
 
2
  "Huggy": {
3
  "checkpoints": [
4
  {
5
+ "steps": 1999922,
6
+ "file_path": "results/Huggy/Huggy/Huggy-1999922.onnx",
7
+ "reward": 3.8654306222652566,
8
+ "creation_time": 1688069838.4345078,
9
  "auxillary_file_paths": [
10
+ "results/Huggy/Huggy/Huggy-1999922.pt"
11
  ]
12
  },
13
  {
14
+ "steps": 2000006,
15
+ "file_path": "results/Huggy/Huggy/Huggy-2000006.onnx",
16
+ "reward": 3.866448467060671,
17
+ "creation_time": 1688069838.5553,
18
  "auxillary_file_paths": [
19
+ "results/Huggy/Huggy/Huggy-2000006.pt"
20
  ]
21
  },
22
  {
23
+ "steps": 2000006,
24
+ "file_path": "results/Huggy/Huggy/Huggy-2000006.onnx",
25
+ "reward": null,
26
+ "creation_time": 1688070497.2221453,
27
  "auxillary_file_paths": [
28
+ "results/Huggy/Huggy/Huggy-2000006.pt"
29
  ]
30
  },
31
  {
32
+ "steps": 2000006,
33
+ "file_path": "results/Huggy/Huggy/Huggy-2000006.onnx",
34
+ "reward": null,
35
+ "creation_time": 1688070628.3207586,
36
  "auxillary_file_paths": [
37
+ "results/Huggy/Huggy/Huggy-2000006.pt"
38
  ]
39
  },
40
  {
41
+ "steps": 2000006,
42
+ "file_path": "results/Huggy/Huggy/Huggy-2000006.onnx",
43
+ "reward": null,
44
+ "creation_time": 1688070658.377307,
45
  "auxillary_file_paths": [
46
+ "results/Huggy/Huggy/Huggy-2000006.pt"
47
  ]
48
  },
49
  {
50
+ "steps": 2000006,
51
+ "file_path": "results/Huggy/Huggy/Huggy-2000006.onnx",
52
+ "reward": null,
53
+ "creation_time": 1688070686.7492425,
54
  "auxillary_file_paths": [
55
+ "results/Huggy/Huggy/Huggy-2000006.pt"
56
  ]
57
  },
58
  {
59
+ "steps": 2000006,
60
+ "file_path": "results/Huggy/Huggy/Huggy-2000006.onnx",
61
+ "reward": null,
62
+ "creation_time": 1688070711.7361941,
63
  "auxillary_file_paths": [
64
+ "results/Huggy/Huggy/Huggy-2000006.pt"
65
  ]
66
  },
67
  {
68
+ "steps": 2000006,
69
+ "file_path": "results/Huggy/Huggy/Huggy-2000006.onnx",
70
+ "reward": null,
71
+ "creation_time": 1688070737.78152,
72
  "auxillary_file_paths": [
73
+ "results/Huggy/Huggy/Huggy-2000006.pt"
74
  ]
75
  },
76
  {
77
+ "steps": 2000006,
78
+ "file_path": "results/Huggy/Huggy/Huggy-2000006.onnx",
79
+ "reward": null,
80
+ "creation_time": 1688070769.7891285,
81
  "auxillary_file_paths": [
82
+ "results/Huggy/Huggy/Huggy-2000006.pt"
83
  ]
84
  },
85
  {
86
+ "steps": 2199787,
87
+ "file_path": "results/Huggy/Huggy/Huggy-2199787.onnx",
88
+ "reward": 3.345773091081713,
89
+ "creation_time": 1688073473.460917,
90
  "auxillary_file_paths": [
91
+ "results/Huggy/Huggy/Huggy-2199787.pt"
92
  ]
93
  },
94
  {
95
+ "steps": 2399928,
96
+ "file_path": "results/Huggy/Huggy/Huggy-2399928.onnx",
97
+ "reward": 3.478961085356199,
98
+ "creation_time": 1688073742.9096844,
99
  "auxillary_file_paths": [
100
+ "results/Huggy/Huggy/Huggy-2399928.pt"
101
+ ]
102
+ },
103
+ {
104
+ "steps": 2599910,
105
+ "file_path": "results/Huggy/Huggy/Huggy-2599910.onnx",
106
+ "reward": 3.68402361869812,
107
+ "creation_time": 1688074013.6015458,
108
+ "auxillary_file_paths": [
109
+ "results/Huggy/Huggy/Huggy-2599910.pt"
110
+ ]
111
+ },
112
+ {
113
+ "steps": 2799974,
114
+ "file_path": "results/Huggy/Huggy/Huggy-2799974.onnx",
115
+ "reward": 3.8231999143880073,
116
+ "creation_time": 1688074275.5085018,
117
+ "auxillary_file_paths": [
118
+ "results/Huggy/Huggy/Huggy-2799974.pt"
119
+ ]
120
+ },
121
+ {
122
+ "steps": 2999906,
123
+ "file_path": "results/Huggy/Huggy/Huggy-2999906.onnx",
124
+ "reward": 3.703413576704182,
125
+ "creation_time": 1688074550.6508315,
126
+ "auxillary_file_paths": [
127
+ "results/Huggy/Huggy/Huggy-2999906.pt"
128
+ ]
129
+ },
130
+ {
131
+ "steps": 3000023,
132
+ "file_path": "results/Huggy/Huggy/Huggy-3000023.onnx",
133
+ "reward": 3.7201410008936513,
134
+ "creation_time": 1688074550.815447,
135
+ "auxillary_file_paths": [
136
+ "results/Huggy/Huggy/Huggy-3000023.pt"
137
  ]
138
  }
139
  ],
140
  "final_checkpoint": {
141
+ "steps": 3000023,
142
  "file_path": "results/Huggy/Huggy.onnx",
143
+ "reward": 3.7201410008936513,
144
+ "creation_time": 1688074550.815447,
145
  "auxillary_file_paths": [
146
+ "results/Huggy/Huggy/Huggy-3000023.pt"
147
  ]
148
  }
149
  },