NeMo
okuchaiev commited on
Commit
cbd2b57
1 Parent(s): d05309e

Add files using large-upload tool

Browse files
Files changed (26) hide show
  1. .gitattributes +24 -0
  2. model_weights/model.decoder.layers.mlp.linear_fc1._extra_state/shard_87_96.pt +3 -0
  3. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/1.0.0 +3 -0
  4. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/10.4.0 +3 -0
  5. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/12.3.0 +3 -0
  6. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/16.3.0 +3 -0
  7. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/4.7.0 +3 -0
  8. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/40.6.0 +3 -0
  9. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/5.0.0 +3 -0
  10. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/52.2.0 +3 -0
  11. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/64.5.0 +3 -0
  12. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/66.4.0 +3 -0
  13. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/67.0.0 +3 -0
  14. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/67.7.0 +3 -0
  15. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/7.5.0 +3 -0
  16. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/71.4.0 +3 -0
  17. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/73.5.0 +3 -0
  18. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/73.7.0 +3 -0
  19. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/76.2.0 +3 -0
  20. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/8.0.0 +3 -0
  21. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/80.3.0 +3 -0
  22. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/83.2.0 +3 -0
  23. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/84.0.0 +3 -0
  24. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/85.4.0 +3 -0
  25. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/87.6.0 +3 -0
  26. model_weights/model.decoder.layers.self_attention.linear_qkv.weight/9.7.0 +3 -0
.gitattributes CHANGED
@@ -3043,3 +3043,27 @@ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/48.3.0 filte
3043
  model_weights/model.decoder.layers.self_attention.linear_qkv.weight/37.4.0 filter=lfs diff=lfs merge=lfs -text
3044
  model_weights/model.decoder.layers.self_attention.linear_qkv.weight/32.7.0 filter=lfs diff=lfs merge=lfs -text
3045
  model_weights/model.decoder.layers.self_attention.linear_qkv.weight/57.2.0 filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3043
  model_weights/model.decoder.layers.self_attention.linear_qkv.weight/37.4.0 filter=lfs diff=lfs merge=lfs -text
3044
  model_weights/model.decoder.layers.self_attention.linear_qkv.weight/32.7.0 filter=lfs diff=lfs merge=lfs -text
3045
  model_weights/model.decoder.layers.self_attention.linear_qkv.weight/57.2.0 filter=lfs diff=lfs merge=lfs -text
3046
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/80.3.0 filter=lfs diff=lfs merge=lfs -text
3047
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/71.4.0 filter=lfs diff=lfs merge=lfs -text
3048
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/64.5.0 filter=lfs diff=lfs merge=lfs -text
3049
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/16.3.0 filter=lfs diff=lfs merge=lfs -text
3050
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/66.4.0 filter=lfs diff=lfs merge=lfs -text
3051
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/1.0.0 filter=lfs diff=lfs merge=lfs -text
3052
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/87.6.0 filter=lfs diff=lfs merge=lfs -text
3053
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/85.4.0 filter=lfs diff=lfs merge=lfs -text
3054
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/83.2.0 filter=lfs diff=lfs merge=lfs -text
3055
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/9.7.0 filter=lfs diff=lfs merge=lfs -text
3056
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/12.3.0 filter=lfs diff=lfs merge=lfs -text
3057
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/5.0.0 filter=lfs diff=lfs merge=lfs -text
3058
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/7.5.0 filter=lfs diff=lfs merge=lfs -text
3059
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/67.0.0 filter=lfs diff=lfs merge=lfs -text
3060
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/40.6.0 filter=lfs diff=lfs merge=lfs -text
3061
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/73.7.0 filter=lfs diff=lfs merge=lfs -text
3062
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/52.2.0 filter=lfs diff=lfs merge=lfs -text
3063
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/73.5.0 filter=lfs diff=lfs merge=lfs -text
3064
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/76.2.0 filter=lfs diff=lfs merge=lfs -text
3065
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/8.0.0 filter=lfs diff=lfs merge=lfs -text
3066
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/4.7.0 filter=lfs diff=lfs merge=lfs -text
3067
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/10.4.0 filter=lfs diff=lfs merge=lfs -text
3068
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/67.7.0 filter=lfs diff=lfs merge=lfs -text
3069
+ model_weights/model.decoder.layers.self_attention.linear_qkv.weight/84.0.0 filter=lfs diff=lfs merge=lfs -text
model_weights/model.decoder.layers.mlp.linear_fc1._extra_state/shard_87_96.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b05019f3b231fd0abf8b970f1dc9fe9c9258e169604f125c8aa4defb579a697b
3
+ size 1840
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/1.0.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24f5e70541b418212686d3908cf225f1e004ffed15282a06a35cf0d75e7f8daa
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/10.4.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:368edc50067e96b5d4f97c75858bbc708797abec20aa1af5dfb8c822850f5fe7
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/12.3.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49d9316b2072a7c82c85bbdb1e7f01ac78487f460452bc5f04dc0aa6b60a967a
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/16.3.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6feea5c8949e18e424a3ae0501c8b606218fbcc283438dfa56762d9bc23b84d
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/4.7.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:931b9c63d7648e92864dc176390eb5eb1bc8790ec850b47d1ef020b4ed8fae75
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/40.6.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2092d5d4242190675058d3b17405bc7af08a186dcbc074d0dd8fc7c99aa4aff
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/5.0.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3210b7dc2c52a09c74eb80db27c31a3c212ea76d5b34314b6554e06dfdb1224f
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/52.2.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a491205359458b1e936a5ddde8673d085523feca3aa6454468910816a846182
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/64.5.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:794ec2a4084c2dff47176f843470ed56ccbf28de94be3c5f1764dd8723f0817b
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/66.4.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a208d032630a29b2b0a498845f51d0c9119552228bdff58843d1fe8201b1182
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/67.0.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7af8778f658147876052e482bb7f50ae290cbd9c4404c25b2b11a5c0c900db90
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/67.7.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:edbb44bbb84fe359dddc0cf82252fde432bcec61535df2f9c29f4093c2f6eed4
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/7.5.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b5c7a2a81c044f2ced0117af8c0abb2be9e2b8743ad919efdae885f74b34cfd
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/71.4.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee9b5d849f81fd15784374f1275f55aabe76ed1de99ba29fb1d63778ef9b440a
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/73.5.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:215fc7048a5352e6dbe806c7c08d869659552569903dfcfd4de6a5a1bea6dbf0
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/73.7.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88f718235b714ed602dca21eac57da5609efec3b95f7f5ae0509348908c2409c
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/76.2.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f15f8822ad73159b8681af0acddbb4010f928246f8c270a11ba63781ceda792
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/8.0.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:535ec5adc7f1484222add6fc7e450c0be5d2507d94885e9a26aff32894d62803
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/80.3.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a256b87dcdd0cb49c3ef7ec3fd9b601c729db86fcdb968ee0e722dcbf618d54
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/83.2.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf4252659f0cf6342116a576d3087423647d54f4db14f2861c9f88d619890559
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/84.0.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:897d16fa4f76f219d202e2326da9e954ec8340d05065295e5e5a39d36a645d8f
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/85.4.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef4f10cf55d4b8cfd84d4dc8a8768ec45d01b68172dc8dd9c1bb174c76a0b72c
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/87.6.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:510dbbe91c318d89b044d4117be7584d7bda544f8d4b12698d7da65487e37659
3
+ size 99090432
model_weights/model.decoder.layers.self_attention.linear_qkv.weight/9.7.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:906d8109eac44a9a791f4ce3d0f0d2ab0bb41dd5882b3b02f9ee3dd5d7365cba
3
+ size 99090432