SFconvertbot commited on
Commit
281b641
1 Parent(s): 42701e8

Adding `safetensors` variant of this model

Browse files

This is an automated PR created with https://huggingface.co/spaces/safetensors/convert

This new file is equivalent to `pytorch_model.bin` but safe in the sense that
no arbitrary code can be put into it.

These files also happen to load much faster than their pytorch counterpart:
https://colab.research.google.com/github/huggingface/notebooks/blob/main/safetensors_doc/en/speed.ipynb

The widgets on your model page will run using this model even if this is not merged
making sure the file actually works.

If you find any issues: please report here: https://huggingface.co/spaces/safetensors/convert/discussions

Feel free to ignore this PR.

model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7b80791d012d0b3245d430020e1ef65c95c083878700aabf633b4ee2ad4de78
3
+ size 757728
model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc04f269308b1ed230980fd16b87ea2ad6b6523a5ea299922aa39968cf1e1a51
3
+ size 939192
model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9522b93c9d3dcb120bf1d427eea7562723f8dabce4f5f24b13c810eaf7f831a1
3
+ size 135360
model.safetensors.index.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 1827456
4
+ },
5
+ "weight_map": {
6
+ "lm_head.bias": "model-00003-of-00003.safetensors",
7
+ "lm_head.weight": "model-00003-of-00003.safetensors",
8
+ "transformer.h.0.attn.causal_mask": "model-00001-of-00003.safetensors",
9
+ "transformer.h.0.attn.out_proj.weight": "model-00001-of-00003.safetensors",
10
+ "transformer.h.0.attn.qkv_proj.weight": "model-00001-of-00003.safetensors",
11
+ "transformer.h.0.ln_1.bias": "model-00001-of-00003.safetensors",
12
+ "transformer.h.0.ln_1.weight": "model-00001-of-00003.safetensors",
13
+ "transformer.h.0.mlp.fc_in.bias": "model-00001-of-00003.safetensors",
14
+ "transformer.h.0.mlp.fc_in.weight": "model-00001-of-00003.safetensors",
15
+ "transformer.h.0.mlp.fc_out.bias": "model-00001-of-00003.safetensors",
16
+ "transformer.h.0.mlp.fc_out.weight": "model-00001-of-00003.safetensors",
17
+ "transformer.h.1.attn.causal_mask": "model-00001-of-00003.safetensors",
18
+ "transformer.h.1.attn.out_proj.weight": "model-00001-of-00003.safetensors",
19
+ "transformer.h.1.attn.qkv_proj.weight": "model-00001-of-00003.safetensors",
20
+ "transformer.h.1.ln_1.bias": "model-00001-of-00003.safetensors",
21
+ "transformer.h.1.ln_1.weight": "model-00001-of-00003.safetensors",
22
+ "transformer.h.1.mlp.fc_in.bias": "model-00001-of-00003.safetensors",
23
+ "transformer.h.1.mlp.fc_in.weight": "model-00001-of-00003.safetensors",
24
+ "transformer.h.1.mlp.fc_out.bias": "model-00001-of-00003.safetensors",
25
+ "transformer.h.1.mlp.fc_out.weight": "model-00001-of-00003.safetensors",
26
+ "transformer.h.2.attn.causal_mask": "model-00002-of-00003.safetensors",
27
+ "transformer.h.2.attn.out_proj.weight": "model-00002-of-00003.safetensors",
28
+ "transformer.h.2.attn.qkv_proj.weight": "model-00002-of-00003.safetensors",
29
+ "transformer.h.2.ln_1.bias": "model-00001-of-00003.safetensors",
30
+ "transformer.h.2.ln_1.weight": "model-00001-of-00003.safetensors",
31
+ "transformer.h.2.mlp.fc_in.bias": "model-00002-of-00003.safetensors",
32
+ "transformer.h.2.mlp.fc_in.weight": "model-00002-of-00003.safetensors",
33
+ "transformer.h.2.mlp.fc_out.bias": "model-00002-of-00003.safetensors",
34
+ "transformer.h.2.mlp.fc_out.weight": "model-00002-of-00003.safetensors",
35
+ "transformer.h.3.attn.causal_mask": "model-00002-of-00003.safetensors",
36
+ "transformer.h.3.attn.out_proj.weight": "model-00002-of-00003.safetensors",
37
+ "transformer.h.3.attn.qkv_proj.weight": "model-00002-of-00003.safetensors",
38
+ "transformer.h.3.ln_1.bias": "model-00002-of-00003.safetensors",
39
+ "transformer.h.3.ln_1.weight": "model-00002-of-00003.safetensors",
40
+ "transformer.h.3.mlp.fc_in.bias": "model-00002-of-00003.safetensors",
41
+ "transformer.h.3.mlp.fc_in.weight": "model-00002-of-00003.safetensors",
42
+ "transformer.h.3.mlp.fc_out.bias": "model-00002-of-00003.safetensors",
43
+ "transformer.h.3.mlp.fc_out.weight": "model-00002-of-00003.safetensors",
44
+ "transformer.h.4.attn.causal_mask": "model-00002-of-00003.safetensors",
45
+ "transformer.h.4.attn.out_proj.weight": "model-00002-of-00003.safetensors",
46
+ "transformer.h.4.attn.qkv_proj.weight": "model-00002-of-00003.safetensors",
47
+ "transformer.h.4.ln_1.bias": "model-00002-of-00003.safetensors",
48
+ "transformer.h.4.ln_1.weight": "model-00002-of-00003.safetensors",
49
+ "transformer.h.4.mlp.fc_in.bias": "model-00002-of-00003.safetensors",
50
+ "transformer.h.4.mlp.fc_in.weight": "model-00002-of-00003.safetensors",
51
+ "transformer.h.4.mlp.fc_out.bias": "model-00002-of-00003.safetensors",
52
+ "transformer.h.4.mlp.fc_out.weight": "model-00002-of-00003.safetensors",
53
+ "transformer.ln_f.bias": "model-00002-of-00003.safetensors",
54
+ "transformer.ln_f.weight": "model-00002-of-00003.safetensors",
55
+ "transformer.wte.weight": "model-00001-of-00003.safetensors"
56
+ }
57
+ }