NeMo
nvidia
jiaqiz commited on
Commit
f36d93c
1 Parent(s): b41ac6a

Add files using large-upload tool

Browse files
Files changed (26) hide show
  1. .gitattributes +25 -0
  2. model_weights/model.decoder.layers.mlp.linear_fc2.weight/31.0.6 +3 -0
  3. model_weights/model.decoder.layers.mlp.linear_fc2.weight/6.0.0 +3 -0
  4. model_weights/model.decoder.layers.mlp.linear_fc2.weight/6.0.7 +3 -0
  5. model_weights/model.decoder.layers.mlp.linear_fc2.weight/66.0.1 +3 -0
  6. model_weights/model.decoder.layers.mlp.linear_fc2.weight/92.0.0 +3 -0
  7. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/1.5.0 +3 -0
  8. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/11.2.0 +3 -0
  9. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/12.2.0 +3 -0
  10. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/15.0.0 +3 -0
  11. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/17.2.0 +3 -0
  12. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/20.0.0 +3 -0
  13. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/26.4.0 +3 -0
  14. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/32.4.0 +3 -0
  15. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/35.6.0 +3 -0
  16. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/36.6.0 +3 -0
  17. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/39.0.0 +3 -0
  18. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/40.3.0 +3 -0
  19. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/48.1.0 +3 -0
  20. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/49.3.0 +3 -0
  21. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/61.5.0 +3 -0
  22. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/78.7.0 +3 -0
  23. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/89.2.0 +3 -0
  24. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/90.7.0 +3 -0
  25. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/91.1.0 +3 -0
  26. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/95.5.0 +3 -0
.gitattributes CHANGED
@@ -983,3 +983,28 @@ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/85.2.0 filte
983
  model_weights/model.decoder.layers.self_attention.linear_qkv.weight/14.0.0 filter=lfs diff=lfs merge=lfs -text
984
  model_weights/model.decoder.layers.self_attention.linear_qkv.weight/14.6.0 filter=lfs diff=lfs merge=lfs -text
985
  model_weights/model.decoder.layers.self_attention.linear_qkv.weight/34.6.0 filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
983
  model_weights/model.decoder.layers.self_attention.linear_qkv.weight/14.0.0 filter=lfs diff=lfs merge=lfs -text
984
  model_weights/model.decoder.layers.self_attention.linear_qkv.weight/14.6.0 filter=lfs diff=lfs merge=lfs -text
985
  model_weights/model.decoder.layers.self_attention.linear_qkv.weight/34.6.0 filter=lfs diff=lfs merge=lfs -text
986
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/26.4.0 filter=lfs diff=lfs merge=lfs -text
987
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/17.2.0 filter=lfs diff=lfs merge=lfs -text
988
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/90.7.0 filter=lfs diff=lfs merge=lfs -text
989
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/15.0.0 filter=lfs diff=lfs merge=lfs -text
990
+ model_weights/model.decoder.layers.mlp.linear_fc2.weight/6.0.0 filter=lfs diff=lfs merge=lfs -text
991
+ model_weights/model.decoder.layers.mlp.linear_fc2.weight/92.0.0 filter=lfs diff=lfs merge=lfs -text
992
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/89.2.0 filter=lfs diff=lfs merge=lfs -text
993
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/40.3.0 filter=lfs diff=lfs merge=lfs -text
994
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/11.2.0 filter=lfs diff=lfs merge=lfs -text
995
+ model_weights/model.decoder.layers.mlp.linear_fc2.weight/31.0.6 filter=lfs diff=lfs merge=lfs -text
996
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/12.2.0 filter=lfs diff=lfs merge=lfs -text
997
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/95.5.0 filter=lfs diff=lfs merge=lfs -text
998
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/61.5.0 filter=lfs diff=lfs merge=lfs -text
999
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/39.0.0 filter=lfs diff=lfs merge=lfs -text
1000
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/20.0.0 filter=lfs diff=lfs merge=lfs -text
1001
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/1.5.0 filter=lfs diff=lfs merge=lfs -text
1002
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/91.1.0 filter=lfs diff=lfs merge=lfs -text
1003
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/35.6.0 filter=lfs diff=lfs merge=lfs -text
1004
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/49.3.0 filter=lfs diff=lfs merge=lfs -text
1005
+ model_weights/model.decoder.layers.mlp.linear_fc2.weight/6.0.7 filter=lfs diff=lfs merge=lfs -text
1006
+ model_weights/model.decoder.layers.mlp.linear_fc2.weight/66.0.1 filter=lfs diff=lfs merge=lfs -text
1007
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/32.4.0 filter=lfs diff=lfs merge=lfs -text
1008
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/78.7.0 filter=lfs diff=lfs merge=lfs -text
1009
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/48.1.0 filter=lfs diff=lfs merge=lfs -text
1010
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/36.6.0 filter=lfs diff=lfs merge=lfs -text
model_weights/model.decoder.layers.mlp.linear_fc2.weight/31.0.6 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cf071ef7a2e96d034ac25a110e1e346c1506d8cfc5c9ccf891ca0a7d215bf0e
3
+ size 339738624
model_weights/model.decoder.layers.mlp.linear_fc2.weight/6.0.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:befc3a23086f7b64815b9be41f30c7bc4474d8a5d2903e1096ebac9dfb7e5fab
3
+ size 339738624
model_weights/model.decoder.layers.mlp.linear_fc2.weight/6.0.7 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a79d8f9738cc11242b5ae50bc90288091ce2c3ff886f5f729327694e046a5d9c
3
+ size 339738624
model_weights/model.decoder.layers.mlp.linear_fc2.weight/66.0.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ef34726f5f8848351949870af518516b5b51ce9b3da11a23fab4912b9b86b1f
3
+ size 339738624
model_weights/model.decoder.layers.mlp.linear_fc2.weight/92.0.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4477259e0e6f542c02d2f26198fe760db5bc2dea25ee05b2cca4e55575a4122
3
+ size 339738624
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/1.5.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef50a4fe6437a2ad0ad738dbe33f1937c0d9666a53cb2577e8cd24f99e0b544d
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/11.2.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:083e1e711a0495a46502f2e280bcce2e6c37102580b359dabb8e9ca00673384c
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/12.2.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bae72579593c63467bd256dcc3e724e23e9fab7625f27c71644c377f6b69542
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/15.0.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8eab6a5ab2c7148f2ec09eea402de15a5b9a43ca1e0b0abefc99be9863e9949a
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/17.2.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20648bc1a0ffc69c4eaae08188127f076b78bf230d9fd581b1dcf48967f7a87a
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/20.0.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:074eedda3a1edbf888872aa6de7bac0f53ff8201bb4517b5776edf58c22c4f76
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/26.4.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1d23b10ca5a0f2a408a863b1cf9ccc2068c15a9782ddc22684c3e42709989aa
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/32.4.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:867487140167d2de33b9313ed5e52cef4b37351d96b8798907d8017841729fd1
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/35.6.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d843b308f06d7fbcb068598393590e155229723bfae6dfb33e121cbf15b917c8
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/36.6.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73aadf6803e2cbdaec02e0242feb61c2c185e8395b76374514c27889e00400be
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/39.0.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83289ec6632187d9a24b13e1553e8666fdfc87a7c17cad51e889225e943865e7
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/40.3.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51ca764cec14295899abb29aa45ec4ca8ede751bf1e716923bb75e9f3f27af4f
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/48.1.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc09aebc96e364b9ad2508aed9d415f749713fcfe514f255c2b7e63c88532c7e
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/49.3.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38532c0900ad6bde8194fbb663b09a8f8155a8a036a4595c6e9723e2501845d4
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/61.5.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3275c1eb0de85085f89934f7cff25300df1b7660e06ac0bc5c048038d2270338
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/78.7.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53899cd39408ae5d2afefca52c3abe4728f0396c75d902c6c067983e0ecd9ab3
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/89.2.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0480b800b6fd68037ffa9a84702ca97636152c79477a106b2c7dc2526eb6693b
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/90.7.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a74c6f025b7788591ba42495088a84f6d8ccdeeaf582891cfe4b191d9956e5b9
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/91.1.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77588c43cab52c0153a68d3e84b3db63e5933a9715bd686635db20d28ae62a3f
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/95.5.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07cc2a99005ac78778f6e05ad940f0f6065af772f3ace51b65179cacd1b43c45
3
+ size 99090432