mkardas commited on
Commit
fe3dba0
1 Parent(s): 0e48af2

Convert checkpoint files to float16 (#6)

Browse files

- Convert checkpoint files to float16 (f5fda7abbb69f0ea039d1602b79a4bf495a7e577)

Files changed (2) hide show
  1. config.json +3 -3
  2. pytorch_model.bin +2 -2
config.json CHANGED
@@ -7,7 +7,7 @@
7
  "OPTForCausalLM"
8
  ],
9
  "attention_dropout": 0.1,
10
- "bias": false,
11
  "bos_token_id": 0,
12
  "do_layer_norm_before": true,
13
  "dropout": 0.1,
@@ -15,7 +15,7 @@
15
  "ffn_dim": 3072,
16
  "hidden_size": 768,
17
  "init_std": 0.02,
18
- "layer_norm_elementwise_affine": false,
19
  "layerdrop": 0.0,
20
  "learned_embeddings": true,
21
  "max_position_embeddings": 2048,
@@ -25,7 +25,7 @@
25
  "pad_token_id": 1,
26
  "scale_embeddings": false,
27
  "torch_dtype": "float32",
28
- "transformers_version": "4.25.0.dev0",
29
  "use_cache": true,
30
  "vocab_size": 50000,
31
  "word_embed_proj_dim": 768
7
  "OPTForCausalLM"
8
  ],
9
  "attention_dropout": 0.1,
10
+ "enable_bias": true,
11
  "bos_token_id": 0,
12
  "do_layer_norm_before": true,
13
  "dropout": 0.1,
15
  "ffn_dim": 3072,
16
  "hidden_size": 768,
17
  "init_std": 0.02,
18
+ "layer_norm_elementwise_affine": true,
19
  "layerdrop": 0.0,
20
  "learned_embeddings": true,
21
  "max_position_embeddings": 2048,
25
  "pad_token_id": 1,
26
  "scale_embeddings": false,
27
  "torch_dtype": "float32",
28
+ "transformers_version": "4.21.0.dev0",
29
  "use_cache": true,
30
  "vocab_size": 50000,
31
  "word_embed_proj_dim": 768
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c1d77d9ee8b607215b7f17838a6ae16466297765e668b3bb5a31088c90df15d0
3
- size 500183289
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6743a279edce768ab7e010c822ee564f997055ec333ac8f9e4220640fe50ae81
3
+ size 250124253