afonsosamarques's picture
Training in progress, step 2000
60531cb
raw
history blame
1.94 kB
{
"act_dim": 6,
"action_tanh": true,
"activation_function": "relu",
"adv_act_dim": 6,
"architectures": [
"AdversarialDT"
],
"attn_pdrop": 0.1,
"bos_token_id": 50256,
"context_size": 20,
"embd_pdrop": 0.1,
"eos_token_id": 50256,
"flag": false,
"hidden_size": 128,
"initializer_range": 0.02,
"lambda1": 1.0,
"lambda2": 1.0,
"layer_norm_epsilon": 1e-05,
"log_interval_steps": 100,
"max_ep_len": 1001,
"max_ep_return": 5389,
"max_obs_len": 1001,
"max_obs_return": 5389,
"min_ep_return": -36.38545789499999,
"min_obs_return": -36.38545789499999,
"model_type": "decision_transformer",
"n_head": 1,
"n_inner": null,
"n_layer": 3,
"n_positions": 1024,
"pr_act_dim": 6,
"reorder_and_upcast_attn": false,
"resid_pdrop": 0.1,
"returns_scale": 1000,
"scale_attn_by_inverse_layer_idx": false,
"scale_attn_weights": true,
"state_dim": 17,
"state_mean": [
-0.12463613730660446,
0.0380851573126452,
0.15752106892811954,
0.09558710425767158,
0.07873622143234135,
-0.024131525166433174,
-0.12270041477603931,
-0.2951165516992248,
3.0499440021372095,
-0.025617095212702615,
-0.03129052119123046,
-0.008744465966597231,
-0.08879571539228269,
0.126699288294097,
-0.08706499813665808,
-0.2625399151472837,
-0.20975868968367148
],
"state_std": [
0.08693300335893665,
0.25115436008843744,
0.3412246248710908,
0.37199314748513224,
0.4248907200422569,
0.4394592313301521,
0.30854268429829035,
0.2503472104454028,
1.9948001599895642,
0.8866457906518544,
1.4982192253144506,
7.182257934162716,
7.8703290304262445,
9.443924738229082,
9.063740430833478,
6.143162458510403,
5.662356308267215
],
"torch_dtype": "float32",
"total_train_steps": 10000,
"transformers_version": "4.29.2",
"use_cache": true,
"vocab_size": 1,
"warmup_steps": 1000
}