NeMo
nvidia
jiaqiz commited on
Commit
a184166
1 Parent(s): 510e9ca

Add files using large-upload tool

Browse files
Files changed (25) hide show
  1. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/.zarray +16 -0
  2. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/0.0 +0 -0
  3. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/15.0 +0 -0
  4. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/2.0 +0 -0
  5. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/20.0 +0 -0
  6. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/28.0 +0 -0
  7. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/29.0 +0 -0
  8. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/33.0 +0 -0
  9. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/35.0 +0 -0
  10. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/38.0 +0 -0
  11. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/39.0 +0 -0
  12. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/4.0 +0 -0
  13. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/41.0 +0 -0
  14. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/44.0 +0 -0
  15. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/46.0 +0 -0
  16. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/53.0 +0 -0
  17. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/57.0 +0 -0
  18. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/58.0 +0 -0
  19. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/63.0 +0 -0
  20. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/69.0 +0 -0
  21. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/70.0 +0 -0
  22. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/79.0 +0 -0
  23. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/84.0 +0 -0
  24. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/87.0 +0 -0
  25. model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/89.0 +0 -0
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/.zarray ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "chunks": [
3
+ 1,
4
+ 18432
5
+ ],
6
+ "compressor": null,
7
+ "dtype": "bfloat16",
8
+ "fill_value": null,
9
+ "filters": null,
10
+ "order": "C",
11
+ "shape": [
12
+ 96,
13
+ 18432
14
+ ],
15
+ "zarr_format": 2
16
+ }
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/0.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/15.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/2.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/20.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/28.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/29.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/33.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/35.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/38.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/39.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/4.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/41.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/44.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/46.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/53.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/57.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/58.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/63.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/69.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/70.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/79.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/84.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/87.0 ADDED
Binary file (36.9 kB). View file
 
model_weights/model.decoder.layers.self_attention.linear_qkv.layer_norm_bias/89.0 ADDED
Binary file (36.9 kB). View file