atorre commited on
Commit
63d5a65
1 Parent(s): 895390a

First training of PPO agent on Pyramids environment.

Browse files
Pyramids.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7f6b5593ad7fb701e9e8d73427cce4d9071aabf2e3fd844bf6998a6e247c875
3
+ size 1417437
Pyramids/Pyramids-499948.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:152eadcd2f7f21e8a3039ca9e7e24b5d70aac974d4c86c1e79c6f9383344511e
3
+ size 1417437
Pyramids/Pyramids-499948.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:753f79df88aff5dd639be2d99cdab47a0c1c7e3e2367059c2cf12607a6c664c7
3
+ size 8651427
Pyramids/Pyramids-780922.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7f6b5593ad7fb701e9e8d73427cce4d9071aabf2e3fd844bf6998a6e247c875
3
+ size 1417437
Pyramids/Pyramids-780922.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcdbb1beaacef82ccc7328edb200fd1ee6d2ea064c014d896c9d29280dbd3321
3
+ size 8651427
Pyramids/checkpoint.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcdbb1beaacef82ccc7328edb200fd1ee6d2ea064c014d896c9d29280dbd3321
3
+ size 8651427
Pyramids/events.out.tfevents.1673944422.2694b56eafa7.23246.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed92ba6bd0e027c5471a8c46112ef1ae9632a7ddb49f7749b77a4d6434f5e880
3
+ size 260691
README.md ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ tags:
4
+ - unity-ml-agents
5
+ - ml-agents
6
+ - deep-reinforcement-learning
7
+ - reinforcement-learning
8
+ - ML-Agents-Pyramids
9
+ library_name: ml-agents
10
+ ---
11
+
12
+ # **ppo** Agent playing **Pyramids**
13
+ This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents).
14
+
15
+ ## Usage (with ML-Agents)
16
+ The Documentation: https://github.com/huggingface/ml-agents#get-started
17
+ We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub:
18
+
19
+
20
+ ### Resume the training
21
+ ```
22
+ mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume
23
+ ```
24
+ ### Watch your Agent play
25
+ You can watch your agent **playing directly in your browser:**.
26
+
27
+ 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids
28
+ 2. Step 1: Write your model_id: atorre/ppo-Pyramids
29
+ 3. Step 2: Select your *.nn /*.onnx file
30
+ 4. Click on Watch the agent play 👀
31
+
config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"default_settings": null, "behaviors": {"Pyramids": {"trainer_type": "ppo", "hyperparameters": {"batch_size": 128, "buffer_size": 2048, "learning_rate": 0.0003, "beta": 0.01, "epsilon": 0.2, "lambd": 0.95, "num_epoch": 3, "learning_rate_schedule": "linear", "beta_schedule": "linear", "epsilon_schedule": "linear"}, "network_settings": {"normalize": false, "hidden_units": 512, "num_layers": 2, "vis_encode_type": "simple", "memory": null, "goal_conditioning_type": "hyper", "deterministic": false}, "reward_signals": {"extrinsic": {"gamma": 0.99, "strength": 1.0, "network_settings": {"normalize": false, "hidden_units": 128, "num_layers": 2, "vis_encode_type": "simple", "memory": null, "goal_conditioning_type": "hyper", "deterministic": false}}, "rnd": {"gamma": 0.99, "strength": 0.01, "network_settings": {"normalize": false, "hidden_units": 64, "num_layers": 3, "vis_encode_type": "simple", "memory": null, "goal_conditioning_type": "hyper", "deterministic": false}, "learning_rate": 0.0001, "encoding_size": null}}, "init_path": null, "keep_checkpoints": 5, "checkpoint_interval": 500000, "max_steps": 1000000, "time_horizon": 128, "summary_freq": 30000, "threaded": false, "self_play": null, "behavioral_cloning": null}}, "env_settings": {"env_path": "./training-envs-executables/linux/Pyramids/Pyramids", "env_args": null, "base_port": 5005, "num_envs": 1, "num_areas": 1, "seed": -1, "max_lifetime_restarts": 10, "restarts_rate_limit_n": 1, "restarts_rate_limit_period_s": 60}, "engine_settings": {"width": 84, "height": 84, "quality_level": 5, "time_scale": 20, "target_frame_rate": -1, "capture_frame_rate": 60, "no_graphics": true}, "environment_parameters": null, "checkpoint_settings": {"run_id": "Pyramids Training", "initialize_from": null, "load_model": false, "resume": false, "force": false, "train_model": false, "inference": false, "results_dir": "results"}, "torch_settings": {"device": null}, "debug": false}
configuration.yaml ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ default_settings: null
2
+ behaviors:
3
+ Pyramids:
4
+ trainer_type: ppo
5
+ hyperparameters:
6
+ batch_size: 128
7
+ buffer_size: 2048
8
+ learning_rate: 0.0003
9
+ beta: 0.01
10
+ epsilon: 0.2
11
+ lambd: 0.95
12
+ num_epoch: 3
13
+ learning_rate_schedule: linear
14
+ beta_schedule: linear
15
+ epsilon_schedule: linear
16
+ network_settings:
17
+ normalize: false
18
+ hidden_units: 512
19
+ num_layers: 2
20
+ vis_encode_type: simple
21
+ memory: null
22
+ goal_conditioning_type: hyper
23
+ deterministic: false
24
+ reward_signals:
25
+ extrinsic:
26
+ gamma: 0.99
27
+ strength: 1.0
28
+ network_settings:
29
+ normalize: false
30
+ hidden_units: 128
31
+ num_layers: 2
32
+ vis_encode_type: simple
33
+ memory: null
34
+ goal_conditioning_type: hyper
35
+ deterministic: false
36
+ rnd:
37
+ gamma: 0.99
38
+ strength: 0.01
39
+ network_settings:
40
+ normalize: false
41
+ hidden_units: 64
42
+ num_layers: 3
43
+ vis_encode_type: simple
44
+ memory: null
45
+ goal_conditioning_type: hyper
46
+ deterministic: false
47
+ learning_rate: 0.0001
48
+ encoding_size: null
49
+ init_path: null
50
+ keep_checkpoints: 5
51
+ checkpoint_interval: 500000
52
+ max_steps: 1000000
53
+ time_horizon: 128
54
+ summary_freq: 30000
55
+ threaded: false
56
+ self_play: null
57
+ behavioral_cloning: null
58
+ env_settings:
59
+ env_path: ./training-envs-executables/linux/Pyramids/Pyramids
60
+ env_args: null
61
+ base_port: 5005
62
+ num_envs: 1
63
+ num_areas: 1
64
+ seed: -1
65
+ max_lifetime_restarts: 10
66
+ restarts_rate_limit_n: 1
67
+ restarts_rate_limit_period_s: 60
68
+ engine_settings:
69
+ width: 84
70
+ height: 84
71
+ quality_level: 5
72
+ time_scale: 20
73
+ target_frame_rate: -1
74
+ capture_frame_rate: 60
75
+ no_graphics: true
76
+ environment_parameters: null
77
+ checkpoint_settings:
78
+ run_id: Pyramids Training
79
+ initialize_from: null
80
+ load_model: false
81
+ resume: false
82
+ force: false
83
+ train_model: false
84
+ inference: false
85
+ results_dir: results
86
+ torch_settings:
87
+ device: null
88
+ debug: false
run_logs/Player-0.log ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Mono path[0] = '/content/ml-agents/training-envs-executables/linux/Pyramids/Pyramids_Data/Managed'
2
+ Mono config path = '/content/ml-agents/training-envs-executables/linux/Pyramids/Pyramids_Data/MonoBleedingEdge/etc'
3
+ Found 1 interfaces on host : 0) 172.28.0.2
4
+ Multi-casting "[IP] 172.28.0.2 [Port] 55227 [Flags] 2 [Guid] 4117401756 [EditorId] 764847374 [Version] 1048832 [Id] LinuxPlayer(13,172.28.0.2) [Debug] 0 [PackageName] LinuxPlayer [ProjectName] UnityEnvironment" to [225.0.0.222:54997]...
5
+ Preloaded 'lib_burst_generated.so'
6
+ Preloaded 'libgrpc_csharp_ext.x64.so'
7
+ PlayerPrefs - Creating folder: /root/.config/unity3d/Unity Technologies
8
+ PlayerPrefs - Creating folder: /root/.config/unity3d/Unity Technologies/UnityEnvironment
9
+ Unable to load player prefs
10
+ Initialize engine version: 2021.3.5f1 (40eb3a945986)
11
+ [Subsystems] Discovering subsystems at path /content/ml-agents/training-envs-executables/linux/Pyramids/Pyramids_Data/UnitySubsystems
12
+ Forcing GfxDevice: Null
13
+ GfxDevice: creating device client; threaded=0; jobified=0
14
+ NullGfxDevice:
15
+ Version: NULL 1.0 [1.0]
16
+ Renderer: Null Device
17
+ Vendor: Unity Technologies
18
+ ALSA lib confmisc.c:767:(parse_card) cannot find card '0'
19
+ ALSA lib conf.c:4528:(_snd_config_evaluate) function snd_func_card_driver returned error: No such file or directory
20
+ ALSA lib confmisc.c:392:(snd_func_concat) error evaluating strings
21
+ ALSA lib conf.c:4528:(_snd_config_evaluate) function snd_func_concat returned error: No such file or directory
22
+ ALSA lib confmisc.c:1246:(snd_func_refer) error evaluating name
23
+ ALSA lib conf.c:4528:(_snd_config_evaluate) function snd_func_refer returned error: No such file or directory
24
+ ALSA lib conf.c:5007:(snd_config_expand) Evaluate error: No such file or directory
25
+ ALSA lib pcm.c:2495:(snd_pcm_open_noupdate) Unknown PCM default
26
+ FMOD failed to initialize the output device.: "Error initializing output device. " (60)
27
+ Forced to initialize FMOD to to the device driver's system output rate 48000, this may impact performance and/or give inconsistent experiences compared to selected sample rate 48000
28
+ ALSA lib confmisc.c:767:(parse_card) cannot find card '0'
29
+ ALSA lib conf.c:4528:(_snd_config_evaluate) function snd_func_card_driver returned error: No such file or directory
30
+ ALSA lib confmisc.c:392:(snd_func_concat) error evaluating strings
31
+ ALSA lib conf.c:4528:(_snd_config_evaluate) function snd_func_concat returned error: No such file or directory
32
+ ALSA lib confmisc.c:1246:(snd_func_refer) error evaluating name
33
+ ALSA lib conf.c:4528:(_snd_config_evaluate) function snd_func_refer returned error: No such file or directory
34
+ ALSA lib conf.c:5007:(snd_config_expand) Evaluate error: No such file or directory
35
+ ALSA lib pcm.c:2495:(snd_pcm_open_noupdate) Unknown PCM default
36
+ FMOD failed to initialize the output device.: "Error initializing output device. " (60)
37
+ FMOD initialized on nosound output
38
+ Begin MonoManager ReloadAssembly
39
+ - Completed reload, in 0.127 seconds
40
+ ERROR: Shader Sprites/Default shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
41
+ ERROR: Shader Sprites/Mask shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
42
+ ERROR: Shader Legacy Shaders/VertexLit shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
43
+ WARNING: Shader Unsupported: 'Autodesk Interactive' - All subshaders removed
44
+ WARNING: Shader Did you use #pragma only_renderers and omit this platform?
45
+ WARNING: Shader If subshaders removal was intentional, you may have forgotten turning Fallback off?
46
+ ERROR: Shader Autodesk Interactive shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
47
+ WARNING: Shader Unsupported: 'Autodesk Interactive' - All subshaders removed
48
+ WARNING: Shader Did you use #pragma only_renderers and omit this platform?
49
+ WARNING: Shader If subshaders removal was intentional, you may have forgotten turning Fallback off?
50
+ WARNING: Shader Unsupported: 'Legacy Shaders/Diffuse' - All subshaders removed
51
+ WARNING: Shader Did you use #pragma only_renderers and omit this platform?
52
+ WARNING: Shader If subshaders removal was intentional, you may have forgotten turning Fallback off?
53
+ ERROR: Shader Legacy Shaders/Diffuse shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
54
+ WARNING: Shader Unsupported: 'Legacy Shaders/Diffuse' - All subshaders removed
55
+ WARNING: Shader Did you use #pragma only_renderers and omit this platform?
56
+ WARNING: Shader If subshaders removal was intentional, you may have forgotten turning Fallback off?
57
+ WARNING: Shader Unsupported: 'ML-Agents/GridPattern' - All subshaders removed
58
+ WARNING: Shader Did you use #pragma only_renderers and omit this platform?
59
+ WARNING: Shader If subshaders removal was intentional, you may have forgotten turning Fallback off?
60
+ ERROR: Shader ML-Agents/GridPattern shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
61
+ WARNING: Shader Unsupported: 'ML-Agents/GridPattern' - All subshaders removed
62
+ WARNING: Shader Did you use #pragma only_renderers and omit this platform?
63
+ WARNING: Shader If subshaders removal was intentional, you may have forgotten turning Fallback off?
64
+ WARNING: Shader Unsupported: 'Standard' - All subshaders removed
65
+ WARNING: Shader Did you use #pragma only_renderers and omit this platform?
66
+ WARNING: Shader If subshaders removal was intentional, you may have forgotten turning Fallback off?
67
+ ERROR: Shader Standard shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
68
+ WARNING: Shader Unsupported: 'Standard' - All subshaders removed
69
+ WARNING: Shader Did you use #pragma only_renderers and omit this platform?
70
+ WARNING: Shader If subshaders removal was intentional, you may have forgotten turning Fallback off?
71
+ UnloadTime: 1.312864 ms
72
+ ERROR: Shader UI/Default shader is not supported on this GPU (none of subshaders/fallbacks are suitable)
73
+ requesting resize 84 x 84
74
+ Setting up 1 worker threads for Enlighten.
75
+ PlayerConnection::CleanupXIO: fatal IO error 11 (Resource temporarily unavailable) on X server ":0"
76
+ after 234 requests (234 known processed) with 0 events remaining.
run_logs/timers.json ADDED
@@ -0,0 +1,405 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "root",
3
+ "gauges": {
4
+ "Pyramids.Policy.Entropy.mean": {
5
+ "value": 0.4624595046043396,
6
+ "min": 0.4624595046043396,
7
+ "max": 1.3868093490600586,
8
+ "count": 26
9
+ },
10
+ "Pyramids.Policy.Entropy.sum": {
11
+ "value": 13947.7783203125,
12
+ "min": 13947.7783203125,
13
+ "max": 42070.25,
14
+ "count": 26
15
+ },
16
+ "Pyramids.Step.mean": {
17
+ "value": 779972.0,
18
+ "min": 29952.0,
19
+ "max": 779972.0,
20
+ "count": 26
21
+ },
22
+ "Pyramids.Step.sum": {
23
+ "value": 779972.0,
24
+ "min": 29952.0,
25
+ "max": 779972.0,
26
+ "count": 26
27
+ },
28
+ "Pyramids.Policy.ExtrinsicValueEstimate.mean": {
29
+ "value": 0.568676233291626,
30
+ "min": -0.14658166468143463,
31
+ "max": 0.568676233291626,
32
+ "count": 26
33
+ },
34
+ "Pyramids.Policy.ExtrinsicValueEstimate.sum": {
35
+ "value": 155.81729125976562,
36
+ "min": -34.73985290527344,
37
+ "max": 155.81729125976562,
38
+ "count": 26
39
+ },
40
+ "Pyramids.Policy.RndValueEstimate.mean": {
41
+ "value": 0.015208727680146694,
42
+ "min": 0.0010243135038763285,
43
+ "max": 0.271362841129303,
44
+ "count": 26
45
+ },
46
+ "Pyramids.Policy.RndValueEstimate.sum": {
47
+ "value": 4.167191505432129,
48
+ "min": 0.27144306898117065,
49
+ "max": 65.39844512939453,
50
+ "count": 26
51
+ },
52
+ "Pyramids.Losses.PolicyLoss.mean": {
53
+ "value": 0.06873180995139086,
54
+ "min": 0.06573921082911381,
55
+ "max": 0.07225488690293141,
56
+ "count": 26
57
+ },
58
+ "Pyramids.Losses.PolicyLoss.sum": {
59
+ "value": 1.0309771492708628,
60
+ "min": 0.4892529376180998,
61
+ "max": 1.060590602826172,
62
+ "count": 26
63
+ },
64
+ "Pyramids.Losses.ValueLoss.mean": {
65
+ "value": 0.014059386240680595,
66
+ "min": 0.0009458740503079729,
67
+ "max": 0.015242839636768965,
68
+ "count": 26
69
+ },
70
+ "Pyramids.Losses.ValueLoss.sum": {
71
+ "value": 0.21089079361020893,
72
+ "min": 0.010317290585335015,
73
+ "max": 0.2133997549147655,
74
+ "count": 26
75
+ },
76
+ "Pyramids.Policy.LearningRate.mean": {
77
+ "value": 7.055377648209996e-05,
78
+ "min": 7.055377648209996e-05,
79
+ "max": 0.00029515063018788575,
80
+ "count": 26
81
+ },
82
+ "Pyramids.Policy.LearningRate.sum": {
83
+ "value": 0.0010583066472314995,
84
+ "min": 0.0010583066472314995,
85
+ "max": 0.0033750247749918,
86
+ "count": 26
87
+ },
88
+ "Pyramids.Policy.Epsilon.mean": {
89
+ "value": 0.12351789999999999,
90
+ "min": 0.12351789999999999,
91
+ "max": 0.19838354285714285,
92
+ "count": 26
93
+ },
94
+ "Pyramids.Policy.Epsilon.sum": {
95
+ "value": 1.8527684999999998,
96
+ "min": 1.3886848,
97
+ "max": 2.4432375,
98
+ "count": 26
99
+ },
100
+ "Pyramids.Policy.Beta.mean": {
101
+ "value": 0.00235943821,
102
+ "min": 0.00235943821,
103
+ "max": 0.00983851593142857,
104
+ "count": 26
105
+ },
106
+ "Pyramids.Policy.Beta.sum": {
107
+ "value": 0.03539157315,
108
+ "min": 0.03539157315,
109
+ "max": 0.11251831918,
110
+ "count": 26
111
+ },
112
+ "Pyramids.Losses.RNDLoss.mean": {
113
+ "value": 0.011437034234404564,
114
+ "min": 0.011437034234404564,
115
+ "max": 0.39684292674064636,
116
+ "count": 26
117
+ },
118
+ "Pyramids.Losses.RNDLoss.sum": {
119
+ "value": 0.1715555191040039,
120
+ "min": 0.1683458536863327,
121
+ "max": 2.777900457382202,
122
+ "count": 26
123
+ },
124
+ "Pyramids.Environment.EpisodeLength.mean": {
125
+ "value": 349.3448275862069,
126
+ "min": 332.43956043956047,
127
+ "max": 999.0,
128
+ "count": 26
129
+ },
130
+ "Pyramids.Environment.EpisodeLength.sum": {
131
+ "value": 30393.0,
132
+ "min": 15984.0,
133
+ "max": 34041.0,
134
+ "count": 26
135
+ },
136
+ "Pyramids.Environment.CumulativeReward.mean": {
137
+ "value": 1.5613454385575922,
138
+ "min": -1.0000000521540642,
139
+ "max": 1.6216955381962987,
140
+ "count": 26
141
+ },
142
+ "Pyramids.Environment.CumulativeReward.sum": {
143
+ "value": 137.39839859306812,
144
+ "min": -30.676601640880108,
145
+ "max": 145.9525984376669,
146
+ "count": 26
147
+ },
148
+ "Pyramids.Policy.ExtrinsicReward.mean": {
149
+ "value": 1.5613454385575922,
150
+ "min": -1.0000000521540642,
151
+ "max": 1.6216955381962987,
152
+ "count": 26
153
+ },
154
+ "Pyramids.Policy.ExtrinsicReward.sum": {
155
+ "value": 137.39839859306812,
156
+ "min": -30.676601640880108,
157
+ "max": 145.9525984376669,
158
+ "count": 26
159
+ },
160
+ "Pyramids.Policy.RndReward.mean": {
161
+ "value": 0.04159902990431874,
162
+ "min": 0.04159902990431874,
163
+ "max": 7.765137803740799,
164
+ "count": 26
165
+ },
166
+ "Pyramids.Policy.RndReward.sum": {
167
+ "value": 3.660714631580049,
168
+ "min": 3.660714631580049,
169
+ "max": 124.24220485985279,
170
+ "count": 26
171
+ },
172
+ "Pyramids.IsTraining.mean": {
173
+ "value": 1.0,
174
+ "min": 1.0,
175
+ "max": 1.0,
176
+ "count": 26
177
+ },
178
+ "Pyramids.IsTraining.sum": {
179
+ "value": 1.0,
180
+ "min": 1.0,
181
+ "max": 1.0,
182
+ "count": 26
183
+ }
184
+ },
185
+ "metadata": {
186
+ "timer_format_version": "0.1.0",
187
+ "start_time_seconds": "1673944417",
188
+ "python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
189
+ "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
190
+ "mlagents_version": "0.29.0.dev0",
191
+ "mlagents_envs_version": "0.29.0.dev0",
192
+ "communication_protocol_version": "1.5.0",
193
+ "pytorch_version": "1.8.1+cu102",
194
+ "numpy_version": "1.21.6",
195
+ "end_time_seconds": "1673946819"
196
+ },
197
+ "total": 2401.6863892289994,
198
+ "count": 1,
199
+ "self": 0.39779570499922556,
200
+ "children": {
201
+ "run_training.setup": {
202
+ "total": 0.14710872799969366,
203
+ "count": 1,
204
+ "self": 0.14710872799969366
205
+ },
206
+ "TrainerController.start_learning": {
207
+ "total": 2401.1414847960004,
208
+ "count": 1,
209
+ "self": 1.5874755200247819,
210
+ "children": {
211
+ "TrainerController._reset_env": {
212
+ "total": 4.51566898100009,
213
+ "count": 1,
214
+ "self": 4.51566898100009
215
+ },
216
+ "TrainerController.advance": {
217
+ "total": 2394.855636035976,
218
+ "count": 49824,
219
+ "self": 1.853121960093631,
220
+ "children": {
221
+ "env_step": {
222
+ "total": 1494.0950928778411,
223
+ "count": 49824,
224
+ "self": 1391.7245817680168,
225
+ "children": {
226
+ "SubprocessEnvManager._take_step": {
227
+ "total": 101.23936362989662,
228
+ "count": 49824,
229
+ "self": 4.87605545005772,
230
+ "children": {
231
+ "TorchPolicy.evaluate": {
232
+ "total": 96.3633081798389,
233
+ "count": 48876,
234
+ "self": 21.483079906790408,
235
+ "children": {
236
+ "TorchPolicy.sample_actions": {
237
+ "total": 74.88022827304849,
238
+ "count": 48876,
239
+ "self": 74.88022827304849
240
+ }
241
+ }
242
+ }
243
+ }
244
+ },
245
+ "workers": {
246
+ "total": 1.1311474799276766,
247
+ "count": 49823,
248
+ "self": 0.0,
249
+ "children": {
250
+ "worker_root": {
251
+ "total": 2396.88050856301,
252
+ "count": 49823,
253
+ "is_parallel": true,
254
+ "self": 1122.0686625541803,
255
+ "children": {
256
+ "run_training.setup": {
257
+ "total": 0.0,
258
+ "count": 0,
259
+ "is_parallel": true,
260
+ "self": 0.0,
261
+ "children": {
262
+ "steps_from_proto": {
263
+ "total": 0.002752772999883746,
264
+ "count": 1,
265
+ "is_parallel": true,
266
+ "self": 0.001033865998579131,
267
+ "children": {
268
+ "_process_rank_one_or_two_observation": {
269
+ "total": 0.0017189070013046148,
270
+ "count": 8,
271
+ "is_parallel": true,
272
+ "self": 0.0017189070013046148
273
+ }
274
+ }
275
+ },
276
+ "UnityEnvironment.step": {
277
+ "total": 0.0615624010006286,
278
+ "count": 1,
279
+ "is_parallel": true,
280
+ "self": 0.0005867099998795311,
281
+ "children": {
282
+ "UnityEnvironment._generate_step_input": {
283
+ "total": 0.0005159009997441899,
284
+ "count": 1,
285
+ "is_parallel": true,
286
+ "self": 0.0005159009997441899
287
+ },
288
+ "communicator.exchange": {
289
+ "total": 0.0584847690006427,
290
+ "count": 1,
291
+ "is_parallel": true,
292
+ "self": 0.0584847690006427
293
+ },
294
+ "steps_from_proto": {
295
+ "total": 0.001975021000362176,
296
+ "count": 1,
297
+ "is_parallel": true,
298
+ "self": 0.0004616490004991647,
299
+ "children": {
300
+ "_process_rank_one_or_two_observation": {
301
+ "total": 0.0015133719998630113,
302
+ "count": 8,
303
+ "is_parallel": true,
304
+ "self": 0.0015133719998630113
305
+ }
306
+ }
307
+ }
308
+ }
309
+ }
310
+ }
311
+ },
312
+ "UnityEnvironment.step": {
313
+ "total": 1274.81184600883,
314
+ "count": 49822,
315
+ "is_parallel": true,
316
+ "self": 31.720769021942942,
317
+ "children": {
318
+ "UnityEnvironment._generate_step_input": {
319
+ "total": 18.896137503044883,
320
+ "count": 49822,
321
+ "is_parallel": true,
322
+ "self": 18.896137503044883
323
+ },
324
+ "communicator.exchange": {
325
+ "total": 1117.4258751767757,
326
+ "count": 49822,
327
+ "is_parallel": true,
328
+ "self": 1117.4258751767757
329
+ },
330
+ "steps_from_proto": {
331
+ "total": 106.7690643070664,
332
+ "count": 49822,
333
+ "is_parallel": true,
334
+ "self": 24.16646897301598,
335
+ "children": {
336
+ "_process_rank_one_or_two_observation": {
337
+ "total": 82.60259533405042,
338
+ "count": 398576,
339
+ "is_parallel": true,
340
+ "self": 82.60259533405042
341
+ }
342
+ }
343
+ }
344
+ }
345
+ }
346
+ }
347
+ }
348
+ }
349
+ }
350
+ }
351
+ },
352
+ "trainer_advance": {
353
+ "total": 898.9074211980414,
354
+ "count": 49823,
355
+ "self": 3.0712260500840785,
356
+ "children": {
357
+ "process_trajectory": {
358
+ "total": 154.13215791196671,
359
+ "count": 49823,
360
+ "self": 154.0119976509659,
361
+ "children": {
362
+ "RLTrainer._checkpoint": {
363
+ "total": 0.12016026100081945,
364
+ "count": 1,
365
+ "self": 0.12016026100081945
366
+ }
367
+ }
368
+ },
369
+ "_update_policy": {
370
+ "total": 741.7040372359907,
371
+ "count": 345,
372
+ "self": 194.08276745405965,
373
+ "children": {
374
+ "TorchPPOOptimizer.update": {
375
+ "total": 547.621269781931,
376
+ "count": 17811,
377
+ "self": 547.621269781931
378
+ }
379
+ }
380
+ }
381
+ }
382
+ }
383
+ }
384
+ },
385
+ "trainer_threads": {
386
+ "total": 1.81299947143998e-06,
387
+ "count": 1,
388
+ "self": 1.81299947143998e-06
389
+ },
390
+ "TrainerController._save_models": {
391
+ "total": 0.18270244599989383,
392
+ "count": 1,
393
+ "self": 0.002395254999100871,
394
+ "children": {
395
+ "RLTrainer._checkpoint": {
396
+ "total": 0.18030719100079295,
397
+ "count": 1,
398
+ "self": 0.18030719100079295
399
+ }
400
+ }
401
+ }
402
+ }
403
+ }
404
+ }
405
+ }
run_logs/training_status.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "Pyramids": {
3
+ "checkpoints": [
4
+ {
5
+ "steps": 499948,
6
+ "file_path": "results/Pyramids Training/Pyramids/Pyramids-499948.onnx",
7
+ "reward": 1.6249999776482582,
8
+ "creation_time": 1673945884.659464,
9
+ "auxillary_file_paths": [
10
+ "results/Pyramids Training/Pyramids/Pyramids-499948.pt"
11
+ ]
12
+ },
13
+ {
14
+ "steps": 780922,
15
+ "file_path": "results/Pyramids Training/Pyramids/Pyramids-780922.onnx",
16
+ "reward": 1.6547499932348728,
17
+ "creation_time": 1673946819.2399633,
18
+ "auxillary_file_paths": [
19
+ "results/Pyramids Training/Pyramids/Pyramids-780922.pt"
20
+ ]
21
+ }
22
+ ],
23
+ "final_checkpoint": {
24
+ "steps": 780922,
25
+ "file_path": "results/Pyramids Training/Pyramids.onnx",
26
+ "reward": 1.6547499932348728,
27
+ "creation_time": 1673946819.2399633,
28
+ "auxillary_file_paths": [
29
+ "results/Pyramids Training/Pyramids/Pyramids-780922.pt"
30
+ ]
31
+ }
32
+ },
33
+ "metadata": {
34
+ "stats_format_version": "0.3.0",
35
+ "mlagents_version": "0.29.0.dev0",
36
+ "torch_version": "1.8.1+cu102"
37
+ }
38
+ }