NeMo
okuchaiev commited on
Commit
307f415
1 Parent(s): ebe4859

Add files using large-upload tool

Browse files
Files changed (26) hide show
  1. .gitattributes +13 -0
  2. model_weights/model.decoder.layers.mlp.linear_fc2.weight/12.0.3 +3 -0
  3. model_weights/model.decoder.layers.mlp.linear_fc2.weight/15.0.4 +3 -0
  4. model_weights/model.decoder.layers.mlp.linear_fc2.weight/23.0.6 +3 -0
  5. model_weights/model.decoder.layers.mlp.linear_fc2.weight/25.0.1 +3 -0
  6. model_weights/model.decoder.layers.mlp.linear_fc2.weight/27.0.7 +3 -0
  7. model_weights/model.decoder.layers.mlp.linear_fc2.weight/45.0.4 +3 -0
  8. model_weights/model.decoder.layers.mlp.linear_fc2.weight/49.0.3 +3 -0
  9. model_weights/model.decoder.layers.mlp.linear_fc2.weight/50.0.3 +3 -0
  10. model_weights/model.decoder.layers.mlp.linear_fc2.weight/54.0.6 +3 -0
  11. model_weights/model.decoder.layers.mlp.linear_fc2.weight/6.0.5 +3 -0
  12. model_weights/model.decoder.layers.mlp.linear_fc2.weight/77.0.3 +3 -0
  13. model_weights/model.decoder.layers.mlp.linear_fc2.weight/78.0.4 +3 -0
  14. model_weights/model.decoder.layers.mlp.linear_fc2.weight/89.0.2 +3 -0
  15. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/.zarray +16 -0
  16. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/12.0 +0 -0
  17. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/25.0 +0 -0
  18. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/27.0 +0 -0
  19. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/37.0 +0 -0
  20. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/44.0 +0 -0
  21. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/47.0 +0 -0
  22. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/74.0 +0 -0
  23. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/79.0 +0 -0
  24. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/89.0 +0 -0
  25. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/93.0 +0 -0
  26. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/95.0 +0 -0
.gitattributes CHANGED
@@ -122,3 +122,16 @@ model_weights/model.decoder.layers.mlp.linear_fc2.weight/80.0.6 filter=lfs diff=
122
  model_weights/model.decoder.layers.mlp.linear_fc2.weight/41.0.3 filter=lfs diff=lfs merge=lfs -text
123
  model_weights/model.decoder.layers.mlp.linear_fc2.weight/61.0.4 filter=lfs diff=lfs merge=lfs -text
124
  model_weights/model.decoder.layers.mlp.linear_fc2.weight/27.0.3 filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  model_weights/model.decoder.layers.mlp.linear_fc2.weight/41.0.3 filter=lfs diff=lfs merge=lfs -text
123
  model_weights/model.decoder.layers.mlp.linear_fc2.weight/61.0.4 filter=lfs diff=lfs merge=lfs -text
124
  model_weights/model.decoder.layers.mlp.linear_fc2.weight/27.0.3 filter=lfs diff=lfs merge=lfs -text
125
+ model_weights/model.decoder.layers.mlp.linear_fc2.weight/50.0.3 filter=lfs diff=lfs merge=lfs -text
126
+ model_weights/model.decoder.layers.mlp.linear_fc2.weight/89.0.2 filter=lfs diff=lfs merge=lfs -text
127
+ model_weights/model.decoder.layers.mlp.linear_fc2.weight/15.0.4 filter=lfs diff=lfs merge=lfs -text
128
+ model_weights/model.decoder.layers.mlp.linear_fc2.weight/12.0.3 filter=lfs diff=lfs merge=lfs -text
129
+ model_weights/model.decoder.layers.mlp.linear_fc2.weight/45.0.4 filter=lfs diff=lfs merge=lfs -text
130
+ model_weights/model.decoder.layers.mlp.linear_fc2.weight/49.0.3 filter=lfs diff=lfs merge=lfs -text
131
+ model_weights/model.decoder.layers.mlp.linear_fc2.weight/6.0.5 filter=lfs diff=lfs merge=lfs -text
132
+ model_weights/model.decoder.layers.mlp.linear_fc2.weight/27.0.7 filter=lfs diff=lfs merge=lfs -text
133
+ model_weights/model.decoder.layers.mlp.linear_fc2.weight/23.0.6 filter=lfs diff=lfs merge=lfs -text
134
+ model_weights/model.decoder.layers.mlp.linear_fc2.weight/25.0.1 filter=lfs diff=lfs merge=lfs -text
135
+ model_weights/model.decoder.layers.mlp.linear_fc2.weight/77.0.3 filter=lfs diff=lfs merge=lfs -text
136
+ model_weights/model.decoder.layers.mlp.linear_fc2.weight/54.0.6 filter=lfs diff=lfs merge=lfs -text
137
+ model_weights/model.decoder.layers.mlp.linear_fc2.weight/78.0.4 filter=lfs diff=lfs merge=lfs -text
model_weights/model.decoder.layers.mlp.linear_fc2.weight/12.0.3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c4a4bad9aaa5987ec613026c184969237aeb633eff8680d8d1110af05816f61
3
+ size 339738624
model_weights/model.decoder.layers.mlp.linear_fc2.weight/15.0.4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:375e5fbe2f7cbccd9579a4d128e9ff5cd1dd76f9e3e24f83132bf9e7e2a437f9
3
+ size 339738624
model_weights/model.decoder.layers.mlp.linear_fc2.weight/23.0.6 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7dddff43d90906dae5e75d81b0dd40ec66f92562720fc84707dd3801fd172b6
3
+ size 339738624
model_weights/model.decoder.layers.mlp.linear_fc2.weight/25.0.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d50a66ebd75a3f9db71032afff9a06f78c74437e1ac22a0c24c6fa315795204a
3
+ size 339738624
model_weights/model.decoder.layers.mlp.linear_fc2.weight/27.0.7 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3097a0f6299a30de5a8ef09ed2f2b0429746fa2fd9653ac343a26af94c6777de
3
+ size 339738624
model_weights/model.decoder.layers.mlp.linear_fc2.weight/45.0.4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abb1c580fa74051a6b54dec304cc42fd95761b97bc24cf270c1fd185541ccbb0
3
+ size 339738624
model_weights/model.decoder.layers.mlp.linear_fc2.weight/49.0.3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38a814c6df2da2ee1aad0be65a369f7ff2702c0e7b54d550f948e9ef78ebf208
3
+ size 339738624
model_weights/model.decoder.layers.mlp.linear_fc2.weight/50.0.3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6dc0d489ea644fc5d3ec5c5a5b2bdc02d437715c6b00cc279d21009fcac0f8e3
3
+ size 339738624
model_weights/model.decoder.layers.mlp.linear_fc2.weight/54.0.6 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a9cb6bc3465da0d790eb3fc9172443db47847fee076fbf35e1cd12c3066c80c
3
+ size 339738624
model_weights/model.decoder.layers.mlp.linear_fc2.weight/6.0.5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fdc2b55f6029a30488663d2ac6d81512933b3cd5bd5d9e0cd0cfe37e87a3908
3
+ size 339738624
model_weights/model.decoder.layers.mlp.linear_fc2.weight/77.0.3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94da049a5af4e31e6883337613785145f4e1ba1199d788315ca4dfc8fcab83b6
3
+ size 339738624
model_weights/model.decoder.layers.mlp.linear_fc2.weight/78.0.4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e29c75e2562b7bd1f7c00768da6ae762373809bb54d28253bb54483d3ead0490
3
+ size 339738624
model_weights/model.decoder.layers.mlp.linear_fc2.weight/89.0.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58f765648bb84ca40de6b4fe85819b9aeaec517efdf560c4059ffcde13453d4b
3
+ size 339738624
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/.zarray ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "chunks": [
3
+ 1,
4
+ 18432
5
+ ],
6
+ "compressor": null,
7
+ "dtype": "bfloat16",
8
+ "fill_value": null,
9
+ "filters": null,
10
+ "order": "C",
11
+ "shape": [
12
+ 96,
13
+ 18432
14
+ ],
15
+ "zarr_format": 2
16
+ }
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/12.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/25.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/27.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/37.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/44.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/47.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/74.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/79.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/89.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/93.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/95.0 ADDED
Binary file (36.9 kB). View file