jrauch4 commited on
Commit
e4a72ad
β€’
1 Parent(s): b056ea8

SnowballTarget - second model

Browse files
Files changed (31) hide show
  1. SnowballTarget.onnx +1 -1
  2. SnowballTarget/SnowballTarget-1408.pt +0 -3
  3. SnowballTarget/SnowballTarget-149984.onnx +1 -1
  4. SnowballTarget/SnowballTarget-149984.pt +1 -1
  5. SnowballTarget/SnowballTarget-199984.onnx +1 -1
  6. SnowballTarget/SnowballTarget-199984.pt +1 -1
  7. SnowballTarget/{SnowballTarget-125592.onnx β†’ SnowballTarget-249880.onnx} +1 -1
  8. SnowballTarget/{SnowballTarget-200368.pt β†’ SnowballTarget-249880.pt} +1 -1
  9. SnowballTarget/{SnowballTarget-200368.onnx β†’ SnowballTarget-299968.onnx} +1 -1
  10. SnowballTarget/{SnowballTarget-125592.pt β†’ SnowballTarget-299968.pt} +1 -1
  11. SnowballTarget/{SnowballTarget-1408.onnx β†’ SnowballTarget-349928.onnx} +1 -1
  12. SnowballTarget/{SnowballTarget-49936.pt β†’ SnowballTarget-349928.pt} +1 -1
  13. SnowballTarget/{SnowballTarget-49936.onnx β†’ SnowballTarget-399968.onnx} +1 -1
  14. SnowballTarget/{SnowballTarget-99960.pt β†’ SnowballTarget-399968.pt} +1 -1
  15. SnowballTarget/SnowballTarget-449952.onnx +3 -0
  16. SnowballTarget/SnowballTarget-449952.pt +3 -0
  17. SnowballTarget/SnowballTarget-499912.onnx +3 -0
  18. SnowballTarget/SnowballTarget-499912.pt +3 -0
  19. SnowballTarget/SnowballTarget-500808.onnx +3 -0
  20. SnowballTarget/SnowballTarget-500808.pt +3 -0
  21. SnowballTarget/SnowballTarget-99896.onnx +3 -0
  22. SnowballTarget/SnowballTarget-99896.pt +3 -0
  23. SnowballTarget/SnowballTarget-99960.onnx +0 -3
  24. SnowballTarget/checkpoint.pt +1 -1
  25. SnowballTarget/events.out.tfevents.1673952569.8a42fd845b75.6872.0 +0 -3
  26. SnowballTarget/{events.out.tfevents.1673952243.8a42fd845b75.5303.0 β†’ events.out.tfevents.1673953149.8a42fd845b75.9878.0} +2 -2
  27. config.json +1 -1
  28. configuration.yaml +8 -8
  29. run_logs/Player-0.log +14 -14
  30. run_logs/timers.json +166 -166
  31. run_logs/training_status.json +71 -35
SnowballTarget.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cb5d4c7d4a5e8c0f00c74cd60fa4c2508eb0659607e4948deb15832e1ccacac8
3
  size 645119
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8ec1fa0a8e813bce4ad86d2751761c9557a55a26b8fa32c85c9457bba5fdf59
3
  size 645119
SnowballTarget/SnowballTarget-1408.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:bac96f70de7d9f346bd05cfd0425a8509aed910bb20046d77b4d27f6654409b3
3
- size 1285728
 
 
 
 
SnowballTarget/SnowballTarget-149984.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:08c63aa2b5c0f419c3f54dc809b1e01c0125d35504601e68a8c24ad1e95b7497
3
  size 645119
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e778d105fbf5a2f8b7c8a8cf082d06ff2f37d6e3ae14ec0d1088b044fc0b86ba
3
  size 645119
SnowballTarget/SnowballTarget-149984.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:046f4a0d0165a428ad06d0e2add83306a36b114fe2aaf01932ac011c5199da55
3
  size 3845312
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18da9fb5c684527f7e20947aafd21450c8eee0ed06b3fe3b2e044d729c50c5eb
3
  size 3845312
SnowballTarget/SnowballTarget-199984.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cb5d4c7d4a5e8c0f00c74cd60fa4c2508eb0659607e4948deb15832e1ccacac8
3
  size 645119
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6864993fbff634a20e4d3e184db3e3bc575b53db9e6578a07335229e84a00d6
3
  size 645119
SnowballTarget/SnowballTarget-199984.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:daa970b63d53b6592c74c7a462271950b80d7bf0a86cb2c6e3babfe0aee34984
3
  size 3845312
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d712f09d939ed3ce15b4288ce253c19230f169f44128575d38abd411dcf15818
3
  size 3845312
SnowballTarget/{SnowballTarget-125592.onnx β†’ SnowballTarget-249880.onnx} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:215bbdd5e692ee7ade626e82e19035a135ed39176e60b1646eff782d3776b5fe
3
  size 645119
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2a1b842815a817a25936e4db1802b209dbdfd42eb753ecd5abe569620f6a6b8
3
  size 645119
SnowballTarget/{SnowballTarget-200368.pt β†’ SnowballTarget-249880.pt} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:60d8e0cc10c9f41f330802cf4f8a2958a4678e6494c19f9da0ea611b3a981dd4
3
  size 3845312
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4425703647d39fdd3dc5a5286b3a997db6058fd94c8ceaa4dc0835e90165e38
3
  size 3845312
SnowballTarget/{SnowballTarget-200368.onnx β†’ SnowballTarget-299968.onnx} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cb5d4c7d4a5e8c0f00c74cd60fa4c2508eb0659607e4948deb15832e1ccacac8
3
  size 645119
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b6b3a71b0cae734fe4720a12a6674d89e2ea4f93b7ca75478dde453c6ef0a65
3
  size 645119
SnowballTarget/{SnowballTarget-125592.pt β†’ SnowballTarget-299968.pt} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2dcbbb66c41f20d93074c389f74b55a5d9793c58108fc2e065736bba1c8a66d4
3
  size 3845312
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f46c549641165b229e2e610e107970a4e67c1b5eae185e4105f9386afdef84ca
3
  size 3845312
SnowballTarget/{SnowballTarget-1408.onnx β†’ SnowballTarget-349928.onnx} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:579bc1cc9e4a5129349a0d054364ea21bcd896a384f0af181b2dee8a9d89d73f
3
  size 645119
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3602998f6c4001791276db01468b0c8d823008ecd744276e9f36ad64f83d247a
3
  size 645119
SnowballTarget/{SnowballTarget-49936.pt β†’ SnowballTarget-349928.pt} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a854d81907d5b5dc9263059b3db9bc4d34151fd470ebc1602f00fb45d95a3e88
3
  size 3845312
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:053cfc94c9ba686fc0595b5753a04400841066946b0c4594ba9c4bf327770896
3
  size 3845312
SnowballTarget/{SnowballTarget-49936.onnx β†’ SnowballTarget-399968.onnx} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:06f89b74c62b4cef4c7c8ea8b0620a4d18df79e6582564dd0e45a748952c8f57
3
  size 645119
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc132014ce7489f75359585b48c8061d4ab5ea4a3ec37746071f11f27f3e9a2e
3
  size 645119
SnowballTarget/{SnowballTarget-99960.pt β†’ SnowballTarget-399968.pt} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4c4e5bc31668c596264d68a59ebf9fe0a286df4e18e75b5bab6a8fd145fee488
3
  size 3845312
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19bff0c2c82809c32c687e93487f80b83193599e7d60c60b4d5abf2411ba4c2a
3
  size 3845312
SnowballTarget/SnowballTarget-449952.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc3b30a4db62c55913998b67e010e16dd439a0524869b0c6fd9b2eda61fc7011
3
+ size 645119
SnowballTarget/SnowballTarget-449952.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:192499951d4b0cc404bb21b6cdab6df70edae4bba5c4022996ebd973c41dae89
3
+ size 3845312
SnowballTarget/SnowballTarget-499912.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8ec1fa0a8e813bce4ad86d2751761c9557a55a26b8fa32c85c9457bba5fdf59
3
+ size 645119
SnowballTarget/SnowballTarget-499912.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e695ce1b687ea703e015b5e8940374eba8abdd83608766e960011d3a10d892a5
3
+ size 3845312
SnowballTarget/SnowballTarget-500808.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8ec1fa0a8e813bce4ad86d2751761c9557a55a26b8fa32c85c9457bba5fdf59
3
+ size 645119
SnowballTarget/SnowballTarget-500808.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4440433e37a7f5465b3a1d55efb16ee5d6f48e320520089a6ba95fc3f3101bd3
3
+ size 3845312
SnowballTarget/SnowballTarget-99896.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d60d977635b86ea25fb72a0adb5c6dc3f42ed4c256ca948ddf6e0c9f7c21166
3
+ size 645119
SnowballTarget/SnowballTarget-99896.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a7fb36b5d9e832d960bfbb90f12165fc1289d4bc6744bf2fec355a399e258d3
3
+ size 3845312
SnowballTarget/SnowballTarget-99960.onnx DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c25cd4e017767ce894ddcbeab83b28091712adadcfd253932fa03ae33f1aa73b
3
- size 645119
 
 
 
 
SnowballTarget/checkpoint.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:60d8e0cc10c9f41f330802cf4f8a2958a4678e6494c19f9da0ea611b3a981dd4
3
  size 3845312
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4440433e37a7f5465b3a1d55efb16ee5d6f48e320520089a6ba95fc3f3101bd3
3
  size 3845312
SnowballTarget/events.out.tfevents.1673952569.8a42fd845b75.6872.0 DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:18bc4029a11f0ebbd87f73b7aed687fa9099fed276f9dfcba52bcebcfb968157
3
- size 7737
 
 
 
 
SnowballTarget/{events.out.tfevents.1673952243.8a42fd845b75.5303.0 β†’ events.out.tfevents.1673953149.8a42fd845b75.9878.0} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9a2d93e8ef2b487acb168530b260605856c427043786026d41e43b8e934e3fa9
3
- size 17301
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20299a7a959232a5562571916cc50b7380b2175446b0b56e33a1302b39f4f992
3
+ size 53958
config.json CHANGED
@@ -1 +1 @@
1
- {"default_settings": null, "behaviors": {"SnowballTarget": {"trainer_type": "ppo", "hyperparameters": {"batch_size": 128, "buffer_size": 4096, "learning_rate": 0.0005, "beta": 0.005, "epsilon": 0.2, "lambd": 0.95, "num_epoch": 5, "learning_rate_schedule": "linear", "beta_schedule": "linear", "epsilon_schedule": "linear"}, "network_settings": {"normalize": false, "hidden_units": 256, "num_layers": 2, "vis_encode_type": "simple", "memory": null, "goal_conditioning_type": "hyper", "deterministic": false}, "reward_signals": {"extrinsic": {"gamma": 0.99, "strength": 1.0, "network_settings": {"normalize": false, "hidden_units": 128, "num_layers": 2, "vis_encode_type": "simple", "memory": null, "goal_conditioning_type": "hyper", "deterministic": false}}}, "init_path": null, "keep_checkpoints": 10, "checkpoint_interval": 50000, "max_steps": 200000, "time_horizon": 64, "summary_freq": 10000, "threaded": true, "self_play": null, "behavioral_cloning": null}}, "env_settings": {"env_path": "./training-envs-executables/linux/SnowballTarget/SnowballTarget", "env_args": null, "base_port": 5005, "num_envs": 1, "num_areas": 1, "seed": -1, "max_lifetime_restarts": 10, "restarts_rate_limit_n": 1, "restarts_rate_limit_period_s": 60}, "engine_settings": {"width": 84, "height": 84, "quality_level": 5, "time_scale": 20, "target_frame_rate": -1, "capture_frame_rate": 60, "no_graphics": true}, "environment_parameters": null, "checkpoint_settings": {"run_id": "SnowballTarget1", "initialize_from": null, "load_model": false, "resume": true, "force": false, "train_model": false, "inference": false, "results_dir": "results"}, "torch_settings": {"device": null}, "debug": false}
 
1
+ {"default_settings": null, "behaviors": {"SnowballTarget": {"trainer_type": "ppo", "hyperparameters": {"batch_size": 256, "buffer_size": 8192, "learning_rate": 0.001, "beta": 0.005, "epsilon": 0.2, "lambd": 0.95, "num_epoch": 5, "learning_rate_schedule": "linear", "beta_schedule": "linear", "epsilon_schedule": "linear"}, "network_settings": {"normalize": false, "hidden_units": 256, "num_layers": 2, "vis_encode_type": "simple", "memory": null, "goal_conditioning_type": "hyper", "deterministic": false}, "reward_signals": {"extrinsic": {"gamma": 0.95, "strength": 1.0, "network_settings": {"normalize": false, "hidden_units": 128, "num_layers": 2, "vis_encode_type": "simple", "memory": null, "goal_conditioning_type": "hyper", "deterministic": false}}}, "init_path": null, "keep_checkpoints": 10, "checkpoint_interval": 50000, "max_steps": 500000, "time_horizon": 128, "summary_freq": 10000, "threaded": true, "self_play": null, "behavioral_cloning": null}}, "env_settings": {"env_path": "./training-envs-executables/linux/SnowballTarget/SnowballTarget", "env_args": null, "base_port": 5005, "num_envs": 1, "num_areas": 1, "seed": -1, "max_lifetime_restarts": 10, "restarts_rate_limit_n": 1, "restarts_rate_limit_period_s": 60}, "engine_settings": {"width": 84, "height": 84, "quality_level": 5, "time_scale": 20, "target_frame_rate": -1, "capture_frame_rate": 60, "no_graphics": true}, "environment_parameters": null, "checkpoint_settings": {"run_id": "SnowballTarget2", "initialize_from": null, "load_model": false, "resume": false, "force": false, "train_model": false, "inference": false, "results_dir": "results"}, "torch_settings": {"device": null}, "debug": false}
configuration.yaml CHANGED
@@ -3,9 +3,9 @@ behaviors:
3
  SnowballTarget:
4
  trainer_type: ppo
5
  hyperparameters:
6
- batch_size: 128
7
- buffer_size: 4096
8
- learning_rate: 0.0005
9
  beta: 0.005
10
  epsilon: 0.2
11
  lambd: 0.95
@@ -23,7 +23,7 @@ behaviors:
23
  deterministic: false
24
  reward_signals:
25
  extrinsic:
26
- gamma: 0.99
27
  strength: 1.0
28
  network_settings:
29
  normalize: false
@@ -36,8 +36,8 @@ behaviors:
36
  init_path: null
37
  keep_checkpoints: 10
38
  checkpoint_interval: 50000
39
- max_steps: 200000
40
- time_horizon: 64
41
  summary_freq: 10000
42
  threaded: true
43
  self_play: null
@@ -62,10 +62,10 @@ engine_settings:
62
  no_graphics: true
63
  environment_parameters: null
64
  checkpoint_settings:
65
- run_id: SnowballTarget1
66
  initialize_from: null
67
  load_model: false
68
- resume: true
69
  force: false
70
  train_model: false
71
  inference: false
 
3
  SnowballTarget:
4
  trainer_type: ppo
5
  hyperparameters:
6
+ batch_size: 256
7
+ buffer_size: 8192
8
+ learning_rate: 0.001
9
  beta: 0.005
10
  epsilon: 0.2
11
  lambd: 0.95
 
23
  deterministic: false
24
  reward_signals:
25
  extrinsic:
26
+ gamma: 0.95
27
  strength: 1.0
28
  network_settings:
29
  normalize: false
 
36
  init_path: null
37
  keep_checkpoints: 10
38
  checkpoint_interval: 50000
39
+ max_steps: 500000
40
+ time_horizon: 128
41
  summary_freq: 10000
42
  threaded: true
43
  self_play: null
 
62
  no_graphics: true
63
  environment_parameters: null
64
  checkpoint_settings:
65
+ run_id: SnowballTarget2
66
  initialize_from: null
67
  load_model: false
68
+ resume: false
69
  force: false
70
  train_model: false
71
  inference: false
run_logs/Player-0.log CHANGED
@@ -31,7 +31,7 @@ ALSA lib pcm.c:2495:(snd_pcm_open_noupdate) Unknown PCM default
31
  FMOD failed to initialize the output device.: "Error initializing output device. " (60)
32
  FMOD initialized on nosound output
33
  Begin MonoManager ReloadAssembly
34
- - Completed reload, in 0.073 seconds
35
  ERROR: Shader Sprites/Default shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
36
  ERROR: Shader Sprites/Mask shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
37
  ERROR: Shader Legacy Shaders/VertexLit shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
@@ -42,7 +42,7 @@ ERROR: Shader Standard shader is not supported on this GPU (none of subshaders/f
42
  WARNING: Shader Unsupported: 'Standard' - All subshaders removed
43
  WARNING: Shader Did you use #pragma only_renderers and omit this platform?
44
  WARNING: Shader If subshaders removal was intentional, you may have forgotten turning Fallback off?
45
- UnloadTime: 0.618724 ms
46
  ERROR: Shader UI/Default shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
47
  requesting resize 84 x 84
48
  Setting up 1 worker threads for Enlighten.
@@ -50,7 +50,7 @@ Memory Statistics:
50
  [ALLOC_TEMP_TLS] TLS Allocator
51
  StackAllocators :
52
  [ALLOC_TEMP_MAIN]
53
- Peak usage frame count: [8.0 KB-16.0 KB]: 2006 frames, [16.0 KB-32.0 KB]: 34 frames, [2.0 MB-4.0 MB]: 1 frames
54
  Initial Block Size 4.0 MB
55
  Current Block Size 4.0 MB
56
  Peak Allocated Bytes 2.0 MB
@@ -166,19 +166,19 @@ Memory Statistics:
166
  Peak Allocated Bytes 0 B
167
  Overflow Count 0
168
  [ALLOC_DEFAULT] Dual Thread Allocator
169
- Peak main deferred allocation count 33
170
  [ALLOC_BUCKET]
171
  Large Block size 4.0 MB
172
  Used Block count 1
173
  Peak Allocated bytes 0.9 MB
174
  [ALLOC_DEFAULT_MAIN]
175
- Peak usage frame count: [4.0 MB-8.0 MB]: 2041 frames
176
  Requested Block Size 16.0 MB
177
  Peak Block count 1
178
- Peak Allocated memory 5.6 MB
179
  Peak Large allocation bytes 0 B
180
  [ALLOC_DEFAULT_THREAD]
181
- Peak usage frame count: [16.0 MB-32.0 MB]: 2041 frames
182
  Requested Block Size 16.0 MB
183
  Peak Block count 1
184
  Peak Allocated memory 17.8 MB
@@ -210,13 +210,13 @@ Memory Statistics:
210
  Used Block count 1
211
  Peak Allocated bytes 0.9 MB
212
  [ALLOC_GFX_MAIN]
213
- Peak usage frame count: [32.0 KB-64.0 KB]: 1993 frames, [64.0 KB-128.0 KB]: 48 frames
214
  Requested Block Size 16.0 MB
215
  Peak Block count 1
216
- Peak Allocated memory 65.6 KB
217
  Peak Large allocation bytes 0 B
218
  [ALLOC_GFX_THREAD]
219
- Peak usage frame count: [32.0 KB-64.0 KB]: 2041 frames
220
  Requested Block Size 16.0 MB
221
  Peak Block count 1
222
  Peak Allocated memory 39.6 KB
@@ -228,13 +228,13 @@ Memory Statistics:
228
  Used Block count 1
229
  Peak Allocated bytes 0.9 MB
230
  [ALLOC_CACHEOBJECTS_MAIN]
231
- Peak usage frame count: [0.5 MB-1.0 MB]: 2041 frames
232
  Requested Block Size 4.0 MB
233
  Peak Block count 1
234
  Peak Allocated memory 0.6 MB
235
  Peak Large allocation bytes 0 B
236
  [ALLOC_CACHEOBJECTS_THREAD]
237
- Peak usage frame count: [0.5 MB-1.0 MB]: 2040 frames, [2.0 MB-4.0 MB]: 1 frames
238
  Requested Block Size 4.0 MB
239
  Peak Block count 1
240
  Peak Allocated memory 2.2 MB
@@ -246,13 +246,13 @@ Memory Statistics:
246
  Used Block count 1
247
  Peak Allocated bytes 0.9 MB
248
  [ALLOC_TYPETREE_MAIN]
249
- Peak usage frame count: [0-1.0 KB]: 2041 frames
250
  Requested Block Size 2.0 MB
251
  Peak Block count 1
252
  Peak Allocated memory 1.0 KB
253
  Peak Large allocation bytes 0 B
254
  [ALLOC_TYPETREE_THREAD]
255
- Peak usage frame count: [1.0 KB-2.0 KB]: 2041 frames
256
  Requested Block Size 2.0 MB
257
  Peak Block count 1
258
  Peak Allocated memory 1.7 KB
 
31
  FMOD failed to initialize the output device.: "Error initializing output device. " (60)
32
  FMOD initialized on nosound output
33
  Begin MonoManager ReloadAssembly
34
+ - Completed reload, in 0.081 seconds
35
  ERROR: Shader Sprites/Default shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
36
  ERROR: Shader Sprites/Mask shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
37
  ERROR: Shader Legacy Shaders/VertexLit shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
 
42
  WARNING: Shader Unsupported: 'Standard' - All subshaders removed
43
  WARNING: Shader Did you use #pragma only_renderers and omit this platform?
44
  WARNING: Shader If subshaders removal was intentional, you may have forgotten turning Fallback off?
45
+ UnloadTime: 0.653365 ms
46
  ERROR: Shader UI/Default shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
47
  requesting resize 84 x 84
48
  Setting up 1 worker threads for Enlighten.
 
50
  [ALLOC_TEMP_TLS] TLS Allocator
51
  StackAllocators :
52
  [ALLOC_TEMP_MAIN]
53
+ Peak usage frame count: [8.0 KB-16.0 KB]: 13433 frames, [16.0 KB-32.0 KB]: 227 frames, [2.0 MB-4.0 MB]: 1 frames
54
  Initial Block Size 4.0 MB
55
  Current Block Size 4.0 MB
56
  Peak Allocated Bytes 2.0 MB
 
166
  Peak Allocated Bytes 0 B
167
  Overflow Count 0
168
  [ALLOC_DEFAULT] Dual Thread Allocator
169
+ Peak main deferred allocation count 40
170
  [ALLOC_BUCKET]
171
  Large Block size 4.0 MB
172
  Used Block count 1
173
  Peak Allocated bytes 0.9 MB
174
  [ALLOC_DEFAULT_MAIN]
175
+ Peak usage frame count: [4.0 MB-8.0 MB]: 11923 frames, [8.0 MB-16.0 MB]: 1738 frames
176
  Requested Block Size 16.0 MB
177
  Peak Block count 1
178
+ Peak Allocated memory 9.6 MB
179
  Peak Large allocation bytes 0 B
180
  [ALLOC_DEFAULT_THREAD]
181
+ Peak usage frame count: [16.0 MB-32.0 MB]: 13661 frames
182
  Requested Block Size 16.0 MB
183
  Peak Block count 1
184
  Peak Allocated memory 17.8 MB
 
210
  Used Block count 1
211
  Peak Allocated bytes 0.9 MB
212
  [ALLOC_GFX_MAIN]
213
+ Peak usage frame count: [32.0 KB-64.0 KB]: 13084 frames, [64.0 KB-128.0 KB]: 577 frames
214
  Requested Block Size 16.0 MB
215
  Peak Block count 1
216
+ Peak Allocated memory 66.3 KB
217
  Peak Large allocation bytes 0 B
218
  [ALLOC_GFX_THREAD]
219
+ Peak usage frame count: [32.0 KB-64.0 KB]: 13661 frames
220
  Requested Block Size 16.0 MB
221
  Peak Block count 1
222
  Peak Allocated memory 39.6 KB
 
228
  Used Block count 1
229
  Peak Allocated bytes 0.9 MB
230
  [ALLOC_CACHEOBJECTS_MAIN]
231
+ Peak usage frame count: [0.5 MB-1.0 MB]: 13661 frames
232
  Requested Block Size 4.0 MB
233
  Peak Block count 1
234
  Peak Allocated memory 0.6 MB
235
  Peak Large allocation bytes 0 B
236
  [ALLOC_CACHEOBJECTS_THREAD]
237
+ Peak usage frame count: [0.5 MB-1.0 MB]: 13660 frames, [2.0 MB-4.0 MB]: 1 frames
238
  Requested Block Size 4.0 MB
239
  Peak Block count 1
240
  Peak Allocated memory 2.2 MB
 
246
  Used Block count 1
247
  Peak Allocated bytes 0.9 MB
248
  [ALLOC_TYPETREE_MAIN]
249
+ Peak usage frame count: [0-1.0 KB]: 13661 frames
250
  Requested Block Size 2.0 MB
251
  Peak Block count 1
252
  Peak Allocated memory 1.0 KB
253
  Peak Large allocation bytes 0 B
254
  [ALLOC_TYPETREE_THREAD]
255
+ Peak usage frame count: [1.0 KB-2.0 KB]: 13661 frames
256
  Requested Block Size 2.0 MB
257
  Peak Block count 1
258
  Peak Allocated memory 1.7 KB
run_logs/timers.json CHANGED
@@ -2,220 +2,220 @@
2
  "name": "root",
3
  "gauges": {
4
  "SnowballTarget.Policy.Entropy.mean": {
5
- "value": 1.328168511390686,
6
- "min": 1.328168511390686,
7
- "max": 1.6884623765945435,
8
- "count": 8
9
  },
10
  "SnowballTarget.Policy.Entropy.sum": {
11
- "value": 13543.333984375,
12
- "min": 8617.912109375,
13
- "max": 16077.0927734375,
14
- "count": 8
15
  },
16
  "SnowballTarget.Step.mean": {
17
- "value": 199984.0,
18
- "min": 129992.0,
19
- "max": 199984.0,
20
- "count": 8
21
  },
22
  "SnowballTarget.Step.sum": {
23
- "value": 199984.0,
24
- "min": 129992.0,
25
- "max": 199984.0,
26
- "count": 8
27
  },
28
  "SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
29
- "value": 12.007621765136719,
30
- "min": 9.204183578491211,
31
- "max": 12.007621765136719,
32
- "count": 8
33
  },
34
  "SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
35
- "value": 2341.486328125,
36
- "min": 736.334716796875,
37
- "max": 2408.5126953125,
38
- "count": 8
39
  },
40
  "SnowballTarget.Environment.EpisodeLength.mean": {
41
  "value": 199.0,
42
  "min": 199.0,
43
  "max": 199.0,
44
- "count": 8
45
  },
46
  "SnowballTarget.Environment.EpisodeLength.sum": {
47
  "value": 8756.0,
48
- "min": 4378.0,
49
  "max": 10945.0,
50
- "count": 8
51
  },
52
  "SnowballTarget.Environment.CumulativeReward.mean": {
53
- "value": 24.90909090909091,
54
- "min": 22.26923076923077,
55
- "max": 25.09090909090909,
56
- "count": 8
57
  },
58
  "SnowballTarget.Environment.CumulativeReward.sum": {
59
- "value": 1096.0,
60
- "min": 315.0,
61
- "max": 1380.0,
62
- "count": 8
63
  },
64
  "SnowballTarget.Policy.ExtrinsicReward.mean": {
65
- "value": 24.90909090909091,
66
- "min": 22.26923076923077,
67
- "max": 25.09090909090909,
68
- "count": 8
69
  },
70
  "SnowballTarget.Policy.ExtrinsicReward.sum": {
71
- "value": 1096.0,
72
- "min": 315.0,
73
- "max": 1380.0,
74
- "count": 8
75
  },
76
  "SnowballTarget.Losses.PolicyLoss.mean": {
77
- "value": 0.0660925427240299,
78
- "min": 0.06426675086982096,
79
- "max": 0.07081947505842917,
80
- "count": 8
81
  },
82
  "SnowballTarget.Losses.PolicyLoss.sum": {
83
- "value": 0.1321850854480598,
84
- "min": 0.06426675086982096,
85
- "max": 0.21118983861661572,
86
- "count": 8
87
  },
88
  "SnowballTarget.Losses.ValueLoss.mean": {
89
- "value": 0.20692843010320383,
90
- "min": 0.20692843010320383,
91
- "max": 0.24697394416187748,
92
- "count": 8
93
  },
94
  "SnowballTarget.Losses.ValueLoss.sum": {
95
- "value": 0.41385686020640766,
96
- "min": 0.24697394416187748,
97
- "max": 0.7215860576314084,
98
- "count": 8
99
  },
100
  "SnowballTarget.Policy.LearningRate.mean": {
101
- "value": 1.5580096883999985e-05,
102
- "min": 1.5580096883999985e-05,
103
- "max": 0.00017508006498399996,
104
- "count": 8
105
  },
106
  "SnowballTarget.Policy.LearningRate.sum": {
107
- "value": 3.116019376799997e-05,
108
- "min": 3.116019376799997e-05,
109
- "max": 0.00031716013656799994,
110
- "count": 8
111
  },
112
  "SnowballTarget.Policy.Epsilon.mean": {
113
- "value": 0.10311599999999999,
114
- "min": 0.10311599999999999,
115
- "max": 0.13501600000000002,
116
- "count": 8
117
  },
118
  "SnowballTarget.Policy.Epsilon.sum": {
119
- "value": 0.20623199999999997,
120
- "min": 0.13501600000000002,
121
- "max": 0.35224799999999995,
122
- "count": 8
123
  },
124
  "SnowballTarget.Policy.Beta.mean": {
125
- "value": 0.00016548839999999982,
126
- "min": 0.00016548839999999982,
127
- "max": 0.0017572984000000002,
128
- "count": 8
129
  },
130
  "SnowballTarget.Policy.Beta.sum": {
131
- "value": 0.00033097679999999965,
132
- "min": 0.00033097679999999965,
133
- "max": 0.0031852568000000003,
134
- "count": 8
135
  },
136
  "SnowballTarget.IsTraining.mean": {
137
  "value": 1.0,
138
  "min": 1.0,
139
  "max": 1.0,
140
- "count": 8
141
  },
142
  "SnowballTarget.IsTraining.sum": {
143
  "value": 1.0,
144
  "min": 1.0,
145
  "max": 1.0,
146
- "count": 8
147
  }
148
  },
149
  "metadata": {
150
  "timer_format_version": "0.1.0",
151
- "start_time_seconds": "1673952566",
152
  "python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
153
- "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
154
  "mlagents_version": "0.29.0.dev0",
155
  "mlagents_envs_version": "0.29.0.dev0",
156
  "communication_protocol_version": "1.5.0",
157
  "pytorch_version": "1.8.1+cu102",
158
  "numpy_version": "1.21.6",
159
- "end_time_seconds": "1673952749"
160
  },
161
- "total": 183.44327956799998,
162
  "count": 1,
163
- "self": 0.38690096899995297,
164
  "children": {
165
  "run_training.setup": {
166
- "total": 0.11145493399999395,
167
  "count": 1,
168
- "self": 0.11145493399999395
169
  },
170
  "TrainerController.start_learning": {
171
- "total": 182.94492366500003,
172
  "count": 1,
173
- "self": 0.21657700199239116,
174
  "children": {
175
  "TrainerController._reset_env": {
176
- "total": 6.098904782999853,
177
  "count": 1,
178
- "self": 6.098904782999853
179
  },
180
  "TrainerController.advance": {
181
- "total": 176.50930990300776,
182
- "count": 6802,
183
- "self": 0.10504824200597795,
184
  "children": {
185
  "env_step": {
186
- "total": 176.40426166100178,
187
- "count": 6802,
188
- "self": 128.7467460399805,
189
  "children": {
190
  "SubprocessEnvManager._take_step": {
191
- "total": 47.54695558100752,
192
- "count": 6802,
193
- "self": 0.506306558009328,
194
  "children": {
195
  "TorchPolicy.evaluate": {
196
- "total": 47.040649022998196,
197
- "count": 6802,
198
- "self": 10.416061452002168,
199
  "children": {
200
  "TorchPolicy.sample_actions": {
201
- "total": 36.62458757099603,
202
- "count": 6802,
203
- "self": 36.62458757099603
204
  }
205
  }
206
  }
207
  }
208
  },
209
  "workers": {
210
- "total": 0.11056004001375186,
211
- "count": 6802,
212
  "self": 0.0,
213
  "children": {
214
  "worker_root": {
215
- "total": 182.45991812600755,
216
- "count": 6802,
217
  "is_parallel": true,
218
- "self": 99.34957993200283,
219
  "children": {
220
  "run_training.setup": {
221
  "total": 0.0,
@@ -224,48 +224,48 @@
224
  "self": 0.0,
225
  "children": {
226
  "steps_from_proto": {
227
- "total": 0.0017766769999525422,
228
  "count": 1,
229
  "is_parallel": true,
230
- "self": 0.0005972249998649204,
231
  "children": {
232
  "_process_rank_one_or_two_observation": {
233
- "total": 0.0011794520000876219,
234
  "count": 10,
235
  "is_parallel": true,
236
- "self": 0.0011794520000876219
237
  }
238
  }
239
  },
240
  "UnityEnvironment.step": {
241
- "total": 0.04119560999993155,
242
  "count": 1,
243
  "is_parallel": true,
244
- "self": 0.0005442609999590786,
245
  "children": {
246
  "UnityEnvironment._generate_step_input": {
247
- "total": 0.0003620250001858949,
248
  "count": 1,
249
  "is_parallel": true,
250
- "self": 0.0003620250001858949
251
  },
252
  "communicator.exchange": {
253
- "total": 0.03838651499995649,
254
  "count": 1,
255
  "is_parallel": true,
256
- "self": 0.03838651499995649
257
  },
258
  "steps_from_proto": {
259
- "total": 0.0019028089998300857,
260
  "count": 1,
261
  "is_parallel": true,
262
- "self": 0.0004959179996149032,
263
  "children": {
264
  "_process_rank_one_or_two_observation": {
265
- "total": 0.0014068910002151824,
266
  "count": 10,
267
  "is_parallel": true,
268
- "self": 0.0014068910002151824
269
  }
270
  }
271
  }
@@ -274,34 +274,34 @@
274
  }
275
  },
276
  "UnityEnvironment.step": {
277
- "total": 83.11033819400473,
278
- "count": 6801,
279
  "is_parallel": true,
280
- "self": 3.0566140609896593,
281
  "children": {
282
  "UnityEnvironment._generate_step_input": {
283
- "total": 1.8816506050081898,
284
- "count": 6801,
285
  "is_parallel": true,
286
- "self": 1.8816506050081898
287
  },
288
  "communicator.exchange": {
289
- "total": 67.01576045099796,
290
- "count": 6801,
291
  "is_parallel": true,
292
- "self": 67.01576045099796
293
  },
294
  "steps_from_proto": {
295
- "total": 11.156313077008917,
296
- "count": 6801,
297
  "is_parallel": true,
298
- "self": 2.3903602610273538,
299
  "children": {
300
  "_process_rank_one_or_two_observation": {
301
- "total": 8.765952815981564,
302
- "count": 68010,
303
  "is_parallel": true,
304
- "self": 8.765952815981564
305
  }
306
  }
307
  }
@@ -316,9 +316,9 @@
316
  }
317
  },
318
  "trainer_threads": {
319
- "total": 4.488800004764926e-05,
320
  "count": 1,
321
- "self": 4.488800004764926e-05,
322
  "children": {
323
  "thread_root": {
324
  "total": 0.0,
@@ -327,36 +327,36 @@
327
  "self": 0.0,
328
  "children": {
329
  "trainer_advance": {
330
- "total": 175.15293219802106,
331
- "count": 160925,
332
  "is_parallel": true,
333
- "self": 4.2363684649897095,
334
  "children": {
335
  "process_trajectory": {
336
- "total": 110.09657355303102,
337
- "count": 160925,
338
  "is_parallel": true,
339
- "self": 109.6392081670308,
340
  "children": {
341
  "RLTrainer._checkpoint": {
342
- "total": 0.4573653860002196,
343
- "count": 2,
344
  "is_parallel": true,
345
- "self": 0.4573653860002196
346
  }
347
  }
348
  },
349
  "_update_policy": {
350
- "total": 60.81999018000033,
351
- "count": 16,
352
  "is_parallel": true,
353
- "self": 24.29063101199381,
354
  "children": {
355
  "TorchPPOOptimizer.update": {
356
- "total": 36.52935916800652,
357
- "count": 2715,
358
  "is_parallel": true,
359
- "self": 36.52935916800652
360
  }
361
  }
362
  }
@@ -367,14 +367,14 @@
367
  }
368
  },
369
  "TrainerController._save_models": {
370
- "total": 0.12008708899998055,
371
  "count": 1,
372
- "self": 0.0010848530000657775,
373
  "children": {
374
  "RLTrainer._checkpoint": {
375
- "total": 0.11900223599991477,
376
  "count": 1,
377
- "self": 0.11900223599991477
378
  }
379
  }
380
  }
 
2
  "name": "root",
3
  "gauges": {
4
  "SnowballTarget.Policy.Entropy.mean": {
5
+ "value": 1.135367512702942,
6
+ "min": 1.135367512702942,
7
+ "max": 2.887923240661621,
8
+ "count": 50
9
  },
10
  "SnowballTarget.Policy.Entropy.sum": {
11
+ "value": 11589.83203125,
12
+ "min": 10198.53515625,
13
+ "max": 31862.45703125,
14
+ "count": 50
15
  },
16
  "SnowballTarget.Step.mean": {
17
+ "value": 499912.0,
18
+ "min": 9952.0,
19
+ "max": 499912.0,
20
+ "count": 50
21
  },
22
  "SnowballTarget.Step.sum": {
23
+ "value": 499912.0,
24
+ "min": 9952.0,
25
+ "max": 499912.0,
26
+ "count": 50
27
  },
28
  "SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
29
+ "value": 2.7458341121673584,
30
+ "min": 0.106269471347332,
31
+ "max": 2.7458341121673584,
32
+ "count": 50
33
  },
34
  "SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
35
+ "value": 277.3292541503906,
36
+ "min": 10.308138847351074,
37
+ "max": 278.6896057128906,
38
+ "count": 50
39
  },
40
  "SnowballTarget.Environment.EpisodeLength.mean": {
41
  "value": 199.0,
42
  "min": 199.0,
43
  "max": 199.0,
44
+ "count": 50
45
  },
46
  "SnowballTarget.Environment.EpisodeLength.sum": {
47
  "value": 8756.0,
48
+ "min": 8756.0,
49
  "max": 10945.0,
50
+ "count": 50
51
  },
52
  "SnowballTarget.Environment.CumulativeReward.mean": {
53
+ "value": 27.037735849056602,
54
+ "min": 3.0681818181818183,
55
+ "max": 27.434782608695652,
56
+ "count": 50
57
  },
58
  "SnowballTarget.Environment.CumulativeReward.sum": {
59
+ "value": 1433.0,
60
+ "min": 135.0,
61
+ "max": 1457.0,
62
+ "count": 50
63
  },
64
  "SnowballTarget.Policy.ExtrinsicReward.mean": {
65
+ "value": 27.037735849056602,
66
+ "min": 3.0681818181818183,
67
+ "max": 27.434782608695652,
68
+ "count": 50
69
  },
70
  "SnowballTarget.Policy.ExtrinsicReward.sum": {
71
+ "value": 1433.0,
72
+ "min": 135.0,
73
+ "max": 1457.0,
74
+ "count": 50
75
  },
76
  "SnowballTarget.Losses.PolicyLoss.mean": {
77
+ "value": 0.05115493647856912,
78
+ "min": 0.04354346607116681,
79
+ "max": 0.056254119427039236,
80
+ "count": 50
81
  },
82
  "SnowballTarget.Losses.PolicyLoss.sum": {
83
+ "value": 0.05115493647856912,
84
+ "min": 0.04354346607116681,
85
+ "max": 0.10711522025230806,
86
+ "count": 50
87
  },
88
  "SnowballTarget.Losses.ValueLoss.mean": {
89
+ "value": 0.1038912947563564,
90
+ "min": 0.06381678456331001,
91
+ "max": 0.16697393173680586,
92
+ "count": 50
93
  },
94
  "SnowballTarget.Losses.ValueLoss.sum": {
95
+ "value": 0.1038912947563564,
96
+ "min": 0.06381678456331001,
97
+ "max": 0.2940784593715387,
98
+ "count": 50
99
  },
100
  "SnowballTarget.Policy.LearningRate.mean": {
101
+ "value": 1.5984098401599997e-05,
102
+ "min": 1.5984098401599997e-05,
103
+ "max": 0.00098240000176,
104
+ "count": 50
105
  },
106
  "SnowballTarget.Policy.LearningRate.sum": {
107
+ "value": 1.5984098401599997e-05,
108
+ "min": 1.5984098401599997e-05,
109
+ "max": 0.0017391680260832,
110
+ "count": 50
111
  },
112
  "SnowballTarget.Policy.Epsilon.mean": {
113
+ "value": 0.10159840000000002,
114
+ "min": 0.10159840000000002,
115
+ "max": 0.19823999999999997,
116
+ "count": 50
117
  },
118
  "SnowballTarget.Policy.Epsilon.sum": {
119
+ "value": 0.10159840000000002,
120
+ "min": 0.10159840000000002,
121
+ "max": 0.3739168,
122
+ "count": 50
123
  },
124
  "SnowballTarget.Policy.Beta.mean": {
125
+ "value": 8.976015999999999e-05,
126
+ "min": 8.976015999999999e-05,
127
+ "max": 0.004912176000000002,
128
+ "count": 50
129
  },
130
  "SnowballTarget.Policy.Beta.sum": {
131
+ "value": 8.976015999999999e-05,
132
+ "min": 8.976015999999999e-05,
133
+ "max": 0.008698448319999998,
134
+ "count": 50
135
  },
136
  "SnowballTarget.IsTraining.mean": {
137
  "value": 1.0,
138
  "min": 1.0,
139
  "max": 1.0,
140
+ "count": 50
141
  },
142
  "SnowballTarget.IsTraining.sum": {
143
  "value": 1.0,
144
  "min": 1.0,
145
  "max": 1.0,
146
+ "count": 50
147
  }
148
  },
149
  "metadata": {
150
  "timer_format_version": "0.1.0",
151
+ "start_time_seconds": "1673953146",
152
  "python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
153
+ "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget2 --no-graphics",
154
  "mlagents_version": "0.29.0.dev0",
155
  "mlagents_envs_version": "0.29.0.dev0",
156
  "communication_protocol_version": "1.5.0",
157
  "pytorch_version": "1.8.1+cu102",
158
  "numpy_version": "1.21.6",
159
+ "end_time_seconds": "1673954277"
160
  },
161
+ "total": 1130.721411611,
162
  "count": 1,
163
+ "self": 0.3841987599994354,
164
  "children": {
165
  "run_training.setup": {
166
+ "total": 0.11357385500014061,
167
  "count": 1,
168
+ "self": 0.11357385500014061
169
  },
170
  "TrainerController.start_learning": {
171
+ "total": 1130.2236389960003,
172
  "count": 1,
173
+ "self": 1.3224685849850175,
174
  "children": {
175
  "TrainerController._reset_env": {
176
+ "total": 6.175763149999966,
177
  "count": 1,
178
+ "self": 6.175763149999966
179
  },
180
  "TrainerController.advance": {
181
+ "total": 1122.606961164015,
182
+ "count": 45534,
183
+ "self": 0.6573774920002506,
184
  "children": {
185
  "env_step": {
186
+ "total": 1121.9495836720148,
187
+ "count": 45534,
188
+ "self": 753.3429917339977,
189
  "children": {
190
  "SubprocessEnvManager._take_step": {
191
+ "total": 367.9117610980047,
192
+ "count": 45534,
193
+ "self": 3.362679239029603,
194
  "children": {
195
  "TorchPolicy.evaluate": {
196
+ "total": 364.5490818589751,
197
+ "count": 45534,
198
+ "self": 79.10298805595357,
199
  "children": {
200
  "TorchPolicy.sample_actions": {
201
+ "total": 285.44609380302154,
202
+ "count": 45534,
203
+ "self": 285.44609380302154
204
  }
205
  }
206
  }
207
  }
208
  },
209
  "workers": {
210
+ "total": 0.6948308400124006,
211
+ "count": 45534,
212
  "self": 0.0,
213
  "children": {
214
  "worker_root": {
215
+ "total": 1126.9902016269893,
216
+ "count": 45534,
217
  "is_parallel": true,
218
+ "self": 565.6729538380469,
219
  "children": {
220
  "run_training.setup": {
221
  "total": 0.0,
 
224
  "self": 0.0,
225
  "children": {
226
  "steps_from_proto": {
227
+ "total": 0.0018085649999193265,
228
  "count": 1,
229
  "is_parallel": true,
230
+ "self": 0.0005987880003885948,
231
  "children": {
232
  "_process_rank_one_or_two_observation": {
233
+ "total": 0.0012097769995307317,
234
  "count": 10,
235
  "is_parallel": true,
236
+ "self": 0.0012097769995307317
237
  }
238
  }
239
  },
240
  "UnityEnvironment.step": {
241
+ "total": 0.03667948699967383,
242
  "count": 1,
243
  "is_parallel": true,
244
+ "self": 0.00035762999914368265,
245
  "children": {
246
  "UnityEnvironment._generate_step_input": {
247
+ "total": 0.000469673000225157,
248
  "count": 1,
249
  "is_parallel": true,
250
+ "self": 0.000469673000225157
251
  },
252
  "communicator.exchange": {
253
+ "total": 0.0343268070000704,
254
  "count": 1,
255
  "is_parallel": true,
256
+ "self": 0.0343268070000704
257
  },
258
  "steps_from_proto": {
259
+ "total": 0.0015253770002345846,
260
  "count": 1,
261
  "is_parallel": true,
262
+ "self": 0.00040654099984749337,
263
  "children": {
264
  "_process_rank_one_or_two_observation": {
265
+ "total": 0.0011188360003870912,
266
  "count": 10,
267
  "is_parallel": true,
268
+ "self": 0.0011188360003870912
269
  }
270
  }
271
  }
 
274
  }
275
  },
276
  "UnityEnvironment.step": {
277
+ "total": 561.3172477889425,
278
+ "count": 45533,
279
  "is_parallel": true,
280
+ "self": 20.925553688022774,
281
  "children": {
282
  "UnityEnvironment._generate_step_input": {
283
+ "total": 12.946825961988452,
284
+ "count": 45533,
285
  "is_parallel": true,
286
+ "self": 12.946825961988452
287
  },
288
  "communicator.exchange": {
289
+ "total": 445.0801878959833,
290
+ "count": 45533,
291
  "is_parallel": true,
292
+ "self": 445.0801878959833
293
  },
294
  "steps_from_proto": {
295
+ "total": 82.364680242948,
296
+ "count": 45533,
297
  "is_parallel": true,
298
+ "self": 16.67430063593565,
299
  "children": {
300
  "_process_rank_one_or_two_observation": {
301
+ "total": 65.69037960701235,
302
+ "count": 455330,
303
  "is_parallel": true,
304
+ "self": 65.69037960701235
305
  }
306
  }
307
  }
 
316
  }
317
  },
318
  "trainer_threads": {
319
+ "total": 4.785600003742729e-05,
320
  "count": 1,
321
+ "self": 4.785600003742729e-05,
322
  "children": {
323
  "thread_root": {
324
  "total": 0.0,
 
327
  "self": 0.0,
328
  "children": {
329
  "trainer_advance": {
330
+ "total": 1113.9459555088974,
331
+ "count": 1033906,
332
  "is_parallel": true,
333
+ "self": 26.339789770306652,
334
  "children": {
335
  "process_trajectory": {
336
+ "total": 670.5739063215933,
337
+ "count": 1033906,
338
  "is_parallel": true,
339
+ "self": 668.849687068594,
340
  "children": {
341
  "RLTrainer._checkpoint": {
342
+ "total": 1.724219252999319,
343
+ "count": 10,
344
  "is_parallel": true,
345
+ "self": 1.724219252999319
346
  }
347
  }
348
  },
349
  "_update_policy": {
350
+ "total": 417.0322594169975,
351
+ "count": 56,
352
  "is_parallel": true,
353
+ "self": 179.76885711297064,
354
  "children": {
355
  "TorchPPOOptimizer.update": {
356
+ "total": 237.26340230402684,
357
+ "count": 9505,
358
  "is_parallel": true,
359
+ "self": 237.26340230402684
360
  }
361
  }
362
  }
 
367
  }
368
  },
369
  "TrainerController._save_models": {
370
+ "total": 0.11839824100024998,
371
  "count": 1,
372
+ "self": 0.0008372170004804502,
373
  "children": {
374
  "RLTrainer._checkpoint": {
375
+ "total": 0.11756102399976953,
376
  "count": 1,
377
+ "self": 0.11756102399976953
378
  }
379
  }
380
  }
run_logs/training_status.json CHANGED
@@ -2,67 +2,103 @@
2
  "SnowballTarget": {
3
  "checkpoints": [
4
  {
5
- "steps": 49936,
6
- "file_path": "results/SnowballTarget1/SnowballTarget/SnowballTarget-49936.onnx",
7
- "reward": 12.272727272727273,
8
- "creation_time": 1673952364.9932427,
9
  "auxillary_file_paths": [
10
- "results/SnowballTarget1/SnowballTarget/SnowballTarget-49936.pt"
11
  ]
12
  },
13
  {
14
- "steps": 99960,
15
- "file_path": "results/SnowballTarget1/SnowballTarget/SnowballTarget-99960.onnx",
16
- "reward": 20.363636363636363,
17
- "creation_time": 1673952484.5346525,
18
  "auxillary_file_paths": [
19
- "results/SnowballTarget1/SnowballTarget/SnowballTarget-99960.pt"
20
  ]
21
  },
22
  {
23
- "steps": 126104,
24
- "file_path": "results/SnowballTarget1/SnowballTarget/SnowballTarget-125592.onnx",
25
- "reward": 22.318181818181817,
26
- "creation_time": 1673952546.7718027,
27
  "auxillary_file_paths": [
28
- "results/SnowballTarget1/SnowballTarget/SnowballTarget-125592.pt"
29
  ]
30
  },
31
  {
32
- "steps": 149984,
33
- "file_path": "results/SnowballTarget1/SnowballTarget/SnowballTarget-149984.onnx",
34
- "reward": 24.136363636363637,
35
- "creation_time": 1673952631.1833293,
36
  "auxillary_file_paths": [
37
- "results/SnowballTarget1/SnowballTarget/SnowballTarget-149984.pt"
38
  ]
39
  },
40
  {
41
- "steps": 199984,
42
- "file_path": "results/SnowballTarget1/SnowballTarget/SnowballTarget-199984.onnx",
43
- "reward": 24.636363636363637,
44
- "creation_time": 1673952749.3621132,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  "auxillary_file_paths": [
46
- "results/SnowballTarget1/SnowballTarget/SnowballTarget-199984.pt"
47
  ]
48
  },
49
  {
50
- "steps": 200368,
51
- "file_path": "results/SnowballTarget1/SnowballTarget/SnowballTarget-200368.onnx",
52
- "reward": 24.636363636363637,
53
- "creation_time": 1673952749.5381525,
54
  "auxillary_file_paths": [
55
- "results/SnowballTarget1/SnowballTarget/SnowballTarget-200368.pt"
56
  ]
57
  }
58
  ],
59
  "final_checkpoint": {
60
- "steps": 200368,
61
- "file_path": "results/SnowballTarget1/SnowballTarget.onnx",
62
- "reward": 24.636363636363637,
63
- "creation_time": 1673952749.5381525,
64
  "auxillary_file_paths": [
65
- "results/SnowballTarget1/SnowballTarget/SnowballTarget-200368.pt"
66
  ]
67
  }
68
  },
 
2
  "SnowballTarget": {
3
  "checkpoints": [
4
  {
5
+ "steps": 99896,
6
+ "file_path": "results/SnowballTarget2/SnowballTarget/SnowballTarget-99896.onnx",
7
+ "reward": 15.363636363636363,
8
+ "creation_time": 1673953375.902,
9
  "auxillary_file_paths": [
10
+ "results/SnowballTarget2/SnowballTarget/SnowballTarget-99896.pt"
11
  ]
12
  },
13
  {
14
+ "steps": 149984,
15
+ "file_path": "results/SnowballTarget2/SnowballTarget/SnowballTarget-149984.onnx",
16
+ "reward": 22.09090909090909,
17
+ "creation_time": 1673953491.7287333,
18
  "auxillary_file_paths": [
19
+ "results/SnowballTarget2/SnowballTarget/SnowballTarget-149984.pt"
20
  ]
21
  },
22
  {
23
+ "steps": 199984,
24
+ "file_path": "results/SnowballTarget2/SnowballTarget/SnowballTarget-199984.onnx",
25
+ "reward": 22.75609756097561,
26
+ "creation_time": 1673953599.200578,
27
  "auxillary_file_paths": [
28
+ "results/SnowballTarget2/SnowballTarget/SnowballTarget-199984.pt"
29
  ]
30
  },
31
  {
32
+ "steps": 249880,
33
+ "file_path": "results/SnowballTarget2/SnowballTarget/SnowballTarget-249880.onnx",
34
+ "reward": 24.818181818181817,
35
+ "creation_time": 1673953713.4827852,
36
  "auxillary_file_paths": [
37
+ "results/SnowballTarget2/SnowballTarget/SnowballTarget-249880.pt"
38
  ]
39
  },
40
  {
41
+ "steps": 299968,
42
+ "file_path": "results/SnowballTarget2/SnowballTarget/SnowballTarget-299968.onnx",
43
+ "reward": 26.818181818181817,
44
+ "creation_time": 1673953829.2000349,
45
+ "auxillary_file_paths": [
46
+ "results/SnowballTarget2/SnowballTarget/SnowballTarget-299968.pt"
47
+ ]
48
+ },
49
+ {
50
+ "steps": 349928,
51
+ "file_path": "results/SnowballTarget2/SnowballTarget/SnowballTarget-349928.onnx",
52
+ "reward": 26.204545454545453,
53
+ "creation_time": 1673953939.8303313,
54
+ "auxillary_file_paths": [
55
+ "results/SnowballTarget2/SnowballTarget/SnowballTarget-349928.pt"
56
+ ]
57
+ },
58
+ {
59
+ "steps": 399968,
60
+ "file_path": "results/SnowballTarget2/SnowballTarget/SnowballTarget-399968.onnx",
61
+ "reward": 25.77777777777778,
62
+ "creation_time": 1673954052.8616538,
63
+ "auxillary_file_paths": [
64
+ "results/SnowballTarget2/SnowballTarget/SnowballTarget-399968.pt"
65
+ ]
66
+ },
67
+ {
68
+ "steps": 449952,
69
+ "file_path": "results/SnowballTarget2/SnowballTarget/SnowballTarget-449952.onnx",
70
+ "reward": 27.454545454545453,
71
+ "creation_time": 1673954165.580728,
72
+ "auxillary_file_paths": [
73
+ "results/SnowballTarget2/SnowballTarget/SnowballTarget-449952.pt"
74
+ ]
75
+ },
76
+ {
77
+ "steps": 499912,
78
+ "file_path": "results/SnowballTarget2/SnowballTarget/SnowballTarget-499912.onnx",
79
+ "reward": 27.136363636363637,
80
+ "creation_time": 1673954276.701201,
81
  "auxillary_file_paths": [
82
+ "results/SnowballTarget2/SnowballTarget/SnowballTarget-499912.pt"
83
  ]
84
  },
85
  {
86
+ "steps": 500808,
87
+ "file_path": "results/SnowballTarget2/SnowballTarget/SnowballTarget-500808.onnx",
88
+ "reward": 27.136363636363637,
89
+ "creation_time": 1673954276.9201777,
90
  "auxillary_file_paths": [
91
+ "results/SnowballTarget2/SnowballTarget/SnowballTarget-500808.pt"
92
  ]
93
  }
94
  ],
95
  "final_checkpoint": {
96
+ "steps": 500808,
97
+ "file_path": "results/SnowballTarget2/SnowballTarget.onnx",
98
+ "reward": 27.136363636363637,
99
+ "creation_time": 1673954276.9201777,
100
  "auxillary_file_paths": [
101
+ "results/SnowballTarget2/SnowballTarget/SnowballTarget-500808.pt"
102
  ]
103
  }
104
  },