NeMo
nvidia
jiaqiz commited on
Commit
7b08aaf
1 Parent(s): 3b2cc32

Add files using large-upload tool

Browse files
Files changed (26) hide show
  1. .gitattributes +25 -0
  2. model_weights/model.decoder.layers.self_attention.linear_proj.weight/11.0.1 +3 -0
  3. model_weights/model.decoder.layers.self_attention.linear_proj.weight/19.0.1 +3 -0
  4. model_weights/model.decoder.layers.self_attention.linear_proj.weight/29.0.4 +3 -0
  5. model_weights/model.decoder.layers.self_attention.linear_proj.weight/32.0.1 +3 -0
  6. model_weights/model.decoder.layers.self_attention.linear_proj.weight/32.0.4 +3 -0
  7. model_weights/model.decoder.layers.self_attention.linear_proj.weight/36.0.4 +3 -0
  8. model_weights/model.decoder.layers.self_attention.linear_proj.weight/41.0.3 +3 -0
  9. model_weights/model.decoder.layers.self_attention.linear_proj.weight/44.0.2 +3 -0
  10. model_weights/model.decoder.layers.self_attention.linear_proj.weight/44.0.7 +3 -0
  11. model_weights/model.decoder.layers.self_attention.linear_proj.weight/5.0.1 +3 -0
  12. model_weights/model.decoder.layers.self_attention.linear_proj.weight/56.0.2 +3 -0
  13. model_weights/model.decoder.layers.self_attention.linear_proj.weight/58.0.1 +3 -0
  14. model_weights/model.decoder.layers.self_attention.linear_proj.weight/62.0.4 +3 -0
  15. model_weights/model.decoder.layers.self_attention.linear_proj.weight/65.0.6 +3 -0
  16. model_weights/model.decoder.layers.self_attention.linear_proj.weight/69.0.5 +3 -0
  17. model_weights/model.decoder.layers.self_attention.linear_proj.weight/71.0.6 +3 -0
  18. model_weights/model.decoder.layers.self_attention.linear_proj.weight/74.0.2 +3 -0
  19. model_weights/model.decoder.layers.self_attention.linear_proj.weight/79.0.1 +3 -0
  20. model_weights/model.decoder.layers.self_attention.linear_proj.weight/79.0.5 +3 -0
  21. model_weights/model.decoder.layers.self_attention.linear_proj.weight/82.0.2 +3 -0
  22. model_weights/model.decoder.layers.self_attention.linear_proj.weight/83.0.6 +3 -0
  23. model_weights/model.decoder.layers.self_attention.linear_proj.weight/88.0.5 +3 -0
  24. model_weights/model.decoder.layers.self_attention.linear_proj.weight/91.0.0 +3 -0
  25. model_weights/model.decoder.layers.self_attention.linear_proj.weight/91.0.2 +3 -0
  26. model_weights/model.output_layer.weight/1.0 +3 -0
.gitattributes CHANGED
@@ -2770,3 +2770,28 @@ model_weights/model.decoder.layers.self_attention.linear_proj.weight/42.0.0 filt
2770
  model_weights/model.decoder.layers.self_attention.linear_proj.weight/46.0.1 filter=lfs diff=lfs merge=lfs -text
2771
  model_weights/model.decoder.layers.self_attention.linear_proj.weight/73.0.3 filter=lfs diff=lfs merge=lfs -text
2772
  model_weights/model.decoder.layers.self_attention.linear_proj.weight/81.0.6 filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2770
  model_weights/model.decoder.layers.self_attention.linear_proj.weight/46.0.1 filter=lfs diff=lfs merge=lfs -text
2771
  model_weights/model.decoder.layers.self_attention.linear_proj.weight/73.0.3 filter=lfs diff=lfs merge=lfs -text
2772
  model_weights/model.decoder.layers.self_attention.linear_proj.weight/81.0.6 filter=lfs diff=lfs merge=lfs -text
2773
+ model_weights/model.decoder.layers.self_attention.linear_proj.weight/56.0.2 filter=lfs diff=lfs merge=lfs -text
2774
+ model_weights/model.decoder.layers.self_attention.linear_proj.weight/11.0.1 filter=lfs diff=lfs merge=lfs -text
2775
+ model_weights/model.decoder.layers.self_attention.linear_proj.weight/71.0.6 filter=lfs diff=lfs merge=lfs -text
2776
+ model_weights/model.decoder.layers.self_attention.linear_proj.weight/5.0.1 filter=lfs diff=lfs merge=lfs -text
2777
+ model_weights/model.decoder.layers.self_attention.linear_proj.weight/32.0.4 filter=lfs diff=lfs merge=lfs -text
2778
+ model_weights/model.decoder.layers.self_attention.linear_proj.weight/88.0.5 filter=lfs diff=lfs merge=lfs -text
2779
+ model_weights/model.decoder.layers.self_attention.linear_proj.weight/19.0.1 filter=lfs diff=lfs merge=lfs -text
2780
+ model_weights/model.decoder.layers.self_attention.linear_proj.weight/32.0.1 filter=lfs diff=lfs merge=lfs -text
2781
+ model_weights/model.decoder.layers.self_attention.linear_proj.weight/83.0.6 filter=lfs diff=lfs merge=lfs -text
2782
+ model_weights/model.output_layer.weight/1.0 filter=lfs diff=lfs merge=lfs -text
2783
+ model_weights/model.decoder.layers.self_attention.linear_proj.weight/74.0.2 filter=lfs diff=lfs merge=lfs -text
2784
+ model_weights/model.decoder.layers.self_attention.linear_proj.weight/41.0.3 filter=lfs diff=lfs merge=lfs -text
2785
+ model_weights/model.decoder.layers.self_attention.linear_proj.weight/91.0.2 filter=lfs diff=lfs merge=lfs -text
2786
+ model_weights/model.decoder.layers.self_attention.linear_proj.weight/82.0.2 filter=lfs diff=lfs merge=lfs -text
2787
+ model_weights/model.decoder.layers.self_attention.linear_proj.weight/79.0.1 filter=lfs diff=lfs merge=lfs -text
2788
+ model_weights/model.decoder.layers.self_attention.linear_proj.weight/91.0.0 filter=lfs diff=lfs merge=lfs -text
2789
+ model_weights/model.decoder.layers.self_attention.linear_proj.weight/58.0.1 filter=lfs diff=lfs merge=lfs -text
2790
+ model_weights/model.decoder.layers.self_attention.linear_proj.weight/65.0.6 filter=lfs diff=lfs merge=lfs -text
2791
+ model_weights/model.decoder.layers.self_attention.linear_proj.weight/69.0.5 filter=lfs diff=lfs merge=lfs -text
2792
+ model_weights/model.decoder.layers.self_attention.linear_proj.weight/62.0.4 filter=lfs diff=lfs merge=lfs -text
2793
+ model_weights/model.decoder.layers.self_attention.linear_proj.weight/44.0.7 filter=lfs diff=lfs merge=lfs -text
2794
+ model_weights/model.decoder.layers.self_attention.linear_proj.weight/44.0.2 filter=lfs diff=lfs merge=lfs -text
2795
+ model_weights/model.decoder.layers.self_attention.linear_proj.weight/36.0.4 filter=lfs diff=lfs merge=lfs -text
2796
+ model_weights/model.decoder.layers.self_attention.linear_proj.weight/29.0.4 filter=lfs diff=lfs merge=lfs -text
2797
+ model_weights/model.decoder.layers.self_attention.linear_proj.weight/79.0.5 filter=lfs diff=lfs merge=lfs -text
model_weights/model.decoder.layers.self_attention.linear_proj.weight/11.0.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:768ea13c2ece2cce66647c6186662bfac9a61468e103fc6bb3054ccfcb5e7958
3
+ size 84934656
model_weights/model.decoder.layers.self_attention.linear_proj.weight/19.0.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c5da57fc25770f8fd7d395a5ab74a9caf5a0ceb231cc7a5e5a32eaeaca8fb35
3
+ size 84934656
model_weights/model.decoder.layers.self_attention.linear_proj.weight/29.0.4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbce653e4dc39b0f5a31795bcbd97d9c82a332b2b09ca94dfaf6ff31d612dac8
3
+ size 84934656
model_weights/model.decoder.layers.self_attention.linear_proj.weight/32.0.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa5e2833e649d05acb024d7c23affd13cf35ece624bce77f51cfd75e5a0d33c7
3
+ size 84934656
model_weights/model.decoder.layers.self_attention.linear_proj.weight/32.0.4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:548065eceef78a5b13e7740aa623781c3e7d2adad904431933b005515f0b411e
3
+ size 84934656
model_weights/model.decoder.layers.self_attention.linear_proj.weight/36.0.4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:709c01e1a2750845fe5f77fba584589133ec2e3cc1037e988fdceb10e1809f3e
3
+ size 84934656
model_weights/model.decoder.layers.self_attention.linear_proj.weight/41.0.3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66a49d18f797504b404f36306cb44f5d8599ac626f90077bd9daba3cf3c4c602
3
+ size 84934656
model_weights/model.decoder.layers.self_attention.linear_proj.weight/44.0.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d397431e11a7272267d408746963a7473167f31dd90c582a727d648627a7e9f
3
+ size 84934656
model_weights/model.decoder.layers.self_attention.linear_proj.weight/44.0.7 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b7ddbe69f1baf7d07ad10670478f88a35de76c2e75bf7cd9a2d8ea41899bb46
3
+ size 84934656
model_weights/model.decoder.layers.self_attention.linear_proj.weight/5.0.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:549985316536add2e488795effd9456c0c54df7bc4c16c42ae4e92ee30e23ba5
3
+ size 84934656
model_weights/model.decoder.layers.self_attention.linear_proj.weight/56.0.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1cc1f980107b8267c1ffe83e1268ae862339acdabf2b441560b774ec934f1f69
3
+ size 84934656
model_weights/model.decoder.layers.self_attention.linear_proj.weight/58.0.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c29b88639c7adb7b3092f2effd214de4fc1c7010eb8c63adc051c9d3987e5aa8
3
+ size 84934656
model_weights/model.decoder.layers.self_attention.linear_proj.weight/62.0.4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38e261dd2a12cb120693c1343a96de111594bc678c8e1bd69fb3c05f99b44fa1
3
+ size 84934656
model_weights/model.decoder.layers.self_attention.linear_proj.weight/65.0.6 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37eef558094ad3f7382e5a03a8855fd2903275a3853169841ca6c7eb44736bf3
3
+ size 84934656
model_weights/model.decoder.layers.self_attention.linear_proj.weight/69.0.5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03b0b86026377001cf3f2268cbae5906bd281b54cb14d40cb24634bc582ad4ca
3
+ size 84934656
model_weights/model.decoder.layers.self_attention.linear_proj.weight/71.0.6 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a232b89a6ca80dae8c84e620d1bded6dfb32af5978ba938952719c9df97d9818
3
+ size 84934656
model_weights/model.decoder.layers.self_attention.linear_proj.weight/74.0.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc8777e700c1a3c407196fa54e94a7353d9d989c009a9e029f91e5ca3fd226de
3
+ size 84934656
model_weights/model.decoder.layers.self_attention.linear_proj.weight/79.0.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b0b2f0dc660486a2fe1a2c3f4a71b67450c00475be4df369be68dbd6cc9b846
3
+ size 84934656
model_weights/model.decoder.layers.self_attention.linear_proj.weight/79.0.5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d8235c677d0267a2d9fa5066de5161db7c61416e419d81c1103be033646a784
3
+ size 84934656
model_weights/model.decoder.layers.self_attention.linear_proj.weight/82.0.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca0efad6a37def242aec6843e067874891988c8ad74e626e7450fe4bab247830
3
+ size 84934656
model_weights/model.decoder.layers.self_attention.linear_proj.weight/83.0.6 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08309f2f9d9b0cb5cde14ce649b25464d354365536259ca630e340eff9681575
3
+ size 84934656
model_weights/model.decoder.layers.self_attention.linear_proj.weight/88.0.5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23750a76cf8538d6aa067a3b4b950ea076a4afcce41d284a41e631bc1982a661
3
+ size 84934656
model_weights/model.decoder.layers.self_attention.linear_proj.weight/91.0.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54223f2e3fc8821bb90819df2c15648ab4fcb417927539d9b7c788c4450ef224
3
+ size 84934656
model_weights/model.decoder.layers.self_attention.linear_proj.weight/91.0.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2859925331f31a0addc484b2a7a1f433e3f24fb0916090ebeac836e3a5e371f1
3
+ size 84934656
model_weights/model.output_layer.weight/1.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06299358859430fd9deabd8216183524bbd994d2faf7eb52fc29cf26cde69385
3
+ size 1179648000