levmckinney commited on
Commit
fedd82b
1 Parent(s): 46ff026

Upload with huggingface_hub

Browse files
gpt2-large/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"dropout": 0.0, "identity_init": true, "include_input": true, "layer_norm": false, "mlp_hidden_sizes": [], "rank": null, "shared_mlp_hidden_sizes": [], "share_weights": false, "sublayers": false, "num_layers": 36, "vocab_size": 50257, "bias": true, "d_model": 1280}
gpt2-large/params.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:900bde39d9ad903985824852171f40ae406969dd7e92d1b5003f4208e4651b51
3
+ size 493459767
gpt2-xl/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bias": true, "identity_init": true, "include_input": true, "include_final": false, "orthogonal": false, "rank": null, "sublayers": false, "d_model": 1600, "num_layers": 48, "vocab_size": 50257}
gpt2-xl/params.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96ea1cf6b4fa0c22710e7dd64499fecff2f4f6b0f1fb5f9d277d43e46cf88947
3
+ size 813510935
gpt2/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bias": true, "identity_init": true, "include_input": true, "include_final": false, "orthogonal": false, "rank": null, "sublayers": false, "d_model": 768, "num_layers": 12, "vocab_size": 50257}
gpt2/params.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9310b4ce09a053b38aa79dab8616309dc37db0d74a58ed683c1347859f9d9343
3
+ size 182751031