afonsosamarques
commited on
Commit
•
6289527
1
Parent(s):
15fc0ba
Training in progress, step 2000
Browse files- .gitignore +1 -0
- config.json +83 -0
- pytorch_model.bin +3 -0
- training_args.bin +3 -0
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
checkpoint-*/
|
config.json
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"act_dim": 6,
|
3 |
+
"action_tanh": true,
|
4 |
+
"activation_function": "relu",
|
5 |
+
"adv_act_dim": 6,
|
6 |
+
"architectures": [
|
7 |
+
"AdversarialDT"
|
8 |
+
],
|
9 |
+
"attn_pdrop": 0.1,
|
10 |
+
"bos_token_id": 50256,
|
11 |
+
"context_size": 20,
|
12 |
+
"embd_pdrop": 0.1,
|
13 |
+
"eos_token_id": 50256,
|
14 |
+
"flag": false,
|
15 |
+
"hidden_size": 128,
|
16 |
+
"initializer_range": 0.02,
|
17 |
+
"lambda1": 1.0,
|
18 |
+
"lambda2": 1.0,
|
19 |
+
"layer_norm_epsilon": 1e-05,
|
20 |
+
"log_interval_steps": 100,
|
21 |
+
"max_ep_len": 1001,
|
22 |
+
"max_ep_return": 5389,
|
23 |
+
"max_obs_len": 1001,
|
24 |
+
"max_obs_return": 5389,
|
25 |
+
"min_ep_return": -36.38545789499999,
|
26 |
+
"min_obs_return": -36.38545789499999,
|
27 |
+
"model_type": "decision_transformer",
|
28 |
+
"n_head": 1,
|
29 |
+
"n_inner": null,
|
30 |
+
"n_layer": 3,
|
31 |
+
"n_positions": 1024,
|
32 |
+
"pr_act_dim": 6,
|
33 |
+
"reorder_and_upcast_attn": false,
|
34 |
+
"resid_pdrop": 0.1,
|
35 |
+
"returns_scale": 1000,
|
36 |
+
"scale_attn_by_inverse_layer_idx": false,
|
37 |
+
"scale_attn_weights": true,
|
38 |
+
"state_dim": 17,
|
39 |
+
"state_mean": [
|
40 |
+
-0.12463613730660446,
|
41 |
+
0.0380851573126452,
|
42 |
+
0.15752106892811954,
|
43 |
+
0.09558710425767158,
|
44 |
+
0.07873622143234135,
|
45 |
+
-0.024131525166433174,
|
46 |
+
-0.12270041477603931,
|
47 |
+
-0.2951165516992248,
|
48 |
+
3.0499440021372095,
|
49 |
+
-0.025617095212702615,
|
50 |
+
-0.03129052119123046,
|
51 |
+
-0.008744465966597231,
|
52 |
+
-0.08879571539228269,
|
53 |
+
0.126699288294097,
|
54 |
+
-0.08706499813665808,
|
55 |
+
-0.2625399151472837,
|
56 |
+
-0.20975868968367148
|
57 |
+
],
|
58 |
+
"state_std": [
|
59 |
+
0.08693300335893665,
|
60 |
+
0.25115436008843744,
|
61 |
+
0.3412246248710908,
|
62 |
+
0.37199314748513224,
|
63 |
+
0.4248907200422569,
|
64 |
+
0.4394592313301521,
|
65 |
+
0.30854268429829035,
|
66 |
+
0.2503472104454028,
|
67 |
+
1.9948001599895642,
|
68 |
+
0.8866457906518544,
|
69 |
+
1.4982192253144506,
|
70 |
+
7.182257934162716,
|
71 |
+
7.8703290304262445,
|
72 |
+
9.443924738229082,
|
73 |
+
9.063740430833478,
|
74 |
+
6.143162458510403,
|
75 |
+
5.662356308267215
|
76 |
+
],
|
77 |
+
"torch_dtype": "float32",
|
78 |
+
"total_train_steps": 10000,
|
79 |
+
"transformers_version": "4.29.2",
|
80 |
+
"use_cache": true,
|
81 |
+
"vocab_size": 1,
|
82 |
+
"warmup_steps": 1000
|
83 |
+
}
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bbb4826839655ac12204e941927bd330ec259a0ea6d427726d400951cf52270d
|
3 |
+
size 6625853
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5e96462505b4a3c52a435bafbf31e31021c4022f4ec850155654af68f558bfb2
|
3 |
+
size 4664
|