Upload llama-2-7b-chat-catala_q6_k.gguf with huggingface_hub
Browse files- .gitattributes +1 -0
- llama-2-7b-chat-catala_q6_k.gguf +3 -0
.gitattributes
CHANGED
@@ -41,3 +41,4 @@ llama-2-7b-chat-catala_q4_k_m.gguf filter=lfs diff=lfs merge=lfs -text
|
|
41 |
llama-2-7b-chat-catala_q2_k.gguf filter=lfs diff=lfs merge=lfs -text
|
42 |
llama-2-7b-chat-catala_q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
43 |
llama-2-7b-chat-catala_q5_0.gguf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
41 |
llama-2-7b-chat-catala_q2_k.gguf filter=lfs diff=lfs merge=lfs -text
|
42 |
llama-2-7b-chat-catala_q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
43 |
llama-2-7b-chat-catala_q5_0.gguf filter=lfs diff=lfs merge=lfs -text
|
44 |
+
llama-2-7b-chat-catala_q6_k.gguf filter=lfs diff=lfs merge=lfs -text
|
llama-2-7b-chat-catala_q6_k.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:498bd315bd640c63e1a88731a4f29b5b05d165c67da42d81b3629e08c431b41f
|
3 |
+
size 5529194272
|