Upload llama-2-7b-chat-catala_q2_k.gguf with huggingface_hub
Browse files- .gitattributes +1 -0
- llama-2-7b-chat-catala_q2_k.gguf +3 -0
.gitattributes
CHANGED
@@ -38,3 +38,4 @@ llama-2-7b-chat-catala_q4_0.gguf filter=lfs diff=lfs merge=lfs -text
|
|
38 |
llama-2-7b-chat-catala_q3_k_l.gguf filter=lfs diff=lfs merge=lfs -text
|
39 |
llama-2-7b-chat-catala_q4_1.gguf filter=lfs diff=lfs merge=lfs -text
|
40 |
llama-2-7b-chat-catala_q4_k_m.gguf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
38 |
llama-2-7b-chat-catala_q3_k_l.gguf filter=lfs diff=lfs merge=lfs -text
|
39 |
llama-2-7b-chat-catala_q4_1.gguf filter=lfs diff=lfs merge=lfs -text
|
40 |
llama-2-7b-chat-catala_q4_k_m.gguf filter=lfs diff=lfs merge=lfs -text
|
41 |
+
llama-2-7b-chat-catala_q2_k.gguf filter=lfs diff=lfs merge=lfs -text
|
llama-2-7b-chat-catala_q2_k.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:da7b3325586f264f19a6d6b9aa5a869a7dc6c293b8cb22a89c3bbabced1290ba
|
3 |
+
size 2825940768
|