Upload folder using huggingface_hub
#1
by
MaziyarPanahi
- opened
- .gitattributes +17 -0
- Nemotron-Mini-4B-Instruct-GGUF_imatrix.dat +3 -0
- Nemotron-Mini-4B-Instruct.IQ1_M.gguf +3 -0
- Nemotron-Mini-4B-Instruct.IQ1_S.gguf +3 -0
- Nemotron-Mini-4B-Instruct.IQ2_XS.gguf +3 -0
- Nemotron-Mini-4B-Instruct.IQ3_XS.gguf +3 -0
- Nemotron-Mini-4B-Instruct.IQ4_XS.gguf +3 -0
- Nemotron-Mini-4B-Instruct.Q2_K.gguf +3 -0
- Nemotron-Mini-4B-Instruct.Q3_K_L.gguf +3 -0
- Nemotron-Mini-4B-Instruct.Q3_K_M.gguf +3 -0
- Nemotron-Mini-4B-Instruct.Q3_K_S.gguf +3 -0
- Nemotron-Mini-4B-Instruct.Q4_K_M.gguf +3 -0
- Nemotron-Mini-4B-Instruct.Q4_K_S.gguf +3 -0
- Nemotron-Mini-4B-Instruct.Q5_K_M.gguf +3 -0
- Nemotron-Mini-4B-Instruct.Q5_K_S.gguf +3 -0
- Nemotron-Mini-4B-Instruct.Q6_K.gguf +3 -0
- Nemotron-Mini-4B-Instruct.Q8_0.gguf +3 -0
- Nemotron-Mini-4B-Instruct.fp16.gguf +3 -0
- README.md +46 -0
.gitattributes
CHANGED
@@ -33,3 +33,20 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
Nemotron-Mini-4B-Instruct-GGUF_imatrix.dat filter=lfs diff=lfs merge=lfs -text
|
37 |
+
Nemotron-Mini-4B-Instruct.IQ1_M.gguf filter=lfs diff=lfs merge=lfs -text
|
38 |
+
Nemotron-Mini-4B-Instruct.IQ1_S.gguf filter=lfs diff=lfs merge=lfs -text
|
39 |
+
Nemotron-Mini-4B-Instruct.IQ2_XS.gguf filter=lfs diff=lfs merge=lfs -text
|
40 |
+
Nemotron-Mini-4B-Instruct.IQ4_XS.gguf filter=lfs diff=lfs merge=lfs -text
|
41 |
+
Nemotron-Mini-4B-Instruct.IQ3_XS.gguf filter=lfs diff=lfs merge=lfs -text
|
42 |
+
Nemotron-Mini-4B-Instruct.Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
|
43 |
+
Nemotron-Mini-4B-Instruct.Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
|
44 |
+
Nemotron-Mini-4B-Instruct.Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
|
45 |
+
Nemotron-Mini-4B-Instruct.Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
|
46 |
+
Nemotron-Mini-4B-Instruct.Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
|
47 |
+
Nemotron-Mini-4B-Instruct.Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
|
48 |
+
Nemotron-Mini-4B-Instruct.Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
|
49 |
+
Nemotron-Mini-4B-Instruct.Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
|
50 |
+
Nemotron-Mini-4B-Instruct.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
51 |
+
Nemotron-Mini-4B-Instruct.fp16.gguf filter=lfs diff=lfs merge=lfs -text
|
52 |
+
Nemotron-Mini-4B-Instruct.Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
|
Nemotron-Mini-4B-Instruct-GGUF_imatrix.dat
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c20f78661f4b9349061594c0ec27d2c2ced9e628a643cc872b54c9762632f218
|
3 |
+
size 3152060
|
Nemotron-Mini-4B-Instruct.IQ1_M.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:42c2f8452939ed80887b855370bd76cd4516ee534bed31d01f8fc523b1886198
|
3 |
+
size 1414372416
|
Nemotron-Mini-4B-Instruct.IQ1_S.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c980b6fbae2aff94d3d7efb37bd03ab432bc396961713d747f6a62f8ac31b3d0
|
3 |
+
size 1365122112
|
Nemotron-Mini-4B-Instruct.IQ2_XS.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:19ca85b31901dca242892c6894736fed8cbd47623850e32fdf3c23938be0cb47
|
3 |
+
size 1571560512
|
Nemotron-Mini-4B-Instruct.IQ3_XS.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:658bbe782090edc900889a115d75ae57ad25514af14bb45e964c4148d0fd2d79
|
3 |
+
size 2064849984
|
Nemotron-Mini-4B-Instruct.IQ4_XS.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f1794a39c4be8bcbddbb2a33c9a56e178a56c8bcc6ed26bcdb40ab1328054775
|
3 |
+
size 2461260864
|
Nemotron-Mini-4B-Instruct.Q2_K.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:aaab65bd87ff9a7a784f1f56dd97ad4e282a0e74bfd96f35d7b7422103f4408e
|
3 |
+
size 1902795840
|
Nemotron-Mini-4B-Instruct.Q3_K_L.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a73a9dcb1edeadbfa630366cc23852b2ad1afeec552412d267fd53114ace427c
|
3 |
+
size 2452954176
|
Nemotron-Mini-4B-Instruct.Q3_K_M.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a7a84b61951e7363d6917fe3fd07f0015e2ea9593edf2731fec19c4c79c9d090
|
3 |
+
size 2297240640
|
Nemotron-Mini-4B-Instruct.Q3_K_S.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:82f56f9f6909ba97726cefd6d0e7546bd7766f6d1f9a8c5a192da242bc5143a2
|
3 |
+
size 2115574848
|
Nemotron-Mini-4B-Instruct.Q4_K_M.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0ac4dd1b41bf7c7754647d773e88d85ca027a59d1cabf7ffdaf6f1a603ac7059
|
3 |
+
size 2697387072
|
Nemotron-Mini-4B-Instruct.Q4_K_S.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:188376e7718c53e995115b7d88f2f90335f63a354329255431cd84b842f08107
|
3 |
+
size 2583354432
|
Nemotron-Mini-4B-Instruct.Q5_K_M.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c14ed80a98d2493781e6fa5a48f4f0daa0fb764af0faf028a24001a5a1d1fa0b
|
3 |
+
size 3059932224
|
Nemotron-Mini-4B-Instruct.Q5_K_S.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cb23ba2198e6acdb4bf1cd4ad675b2da02ff51a9f02383424f0da8b9d62d0874
|
3 |
+
size 2993085504
|
Nemotron-Mini-4B-Instruct.Q6_K.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:727904444b99fed4766c4b732558c8195c66d987a531077095362ff8e40f73c7
|
3 |
+
size 3445136448
|
Nemotron-Mini-4B-Instruct.Q8_0.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4aabc1dbb232915945ca76b612de4bb42597a2b5c0e89bb798e439df098b2d67
|
3 |
+
size 4459928640
|
Nemotron-Mini-4B-Instruct.fp16.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3b782c7292fec777a9e82c9cafeeb226e717a00193b9235c0182f0d0e62ea56b
|
3 |
+
size 8388156224
|
README.md
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
tags:
|
3 |
+
- quantized
|
4 |
+
- 2-bit
|
5 |
+
- 3-bit
|
6 |
+
- 4-bit
|
7 |
+
- 5-bit
|
8 |
+
- 6-bit
|
9 |
+
- 8-bit
|
10 |
+
- GGUF
|
11 |
+
- text-generation
|
12 |
+
- text-generation
|
13 |
+
model_name: Nemotron-Mini-4B-Instruct-GGUF
|
14 |
+
base_model: nvidia/Nemotron-Mini-4B-Instruct
|
15 |
+
inference: false
|
16 |
+
model_creator: nvidia
|
17 |
+
pipeline_tag: text-generation
|
18 |
+
quantized_by: MaziyarPanahi
|
19 |
+
---
|
20 |
+
# [MaziyarPanahi/Nemotron-Mini-4B-Instruct-GGUF](https://huggingface.co/MaziyarPanahi/Nemotron-Mini-4B-Instruct-GGUF)
|
21 |
+
- Model creator: [nvidia](https://huggingface.co/nvidia)
|
22 |
+
- Original model: [nvidia/Nemotron-Mini-4B-Instruct](https://huggingface.co/nvidia/Nemotron-Mini-4B-Instruct)
|
23 |
+
|
24 |
+
## Description
|
25 |
+
[MaziyarPanahi/Nemotron-Mini-4B-Instruct-GGUF](https://huggingface.co/MaziyarPanahi/Nemotron-Mini-4B-Instruct-GGUF) contains GGUF format model files for [nvidia/Nemotron-Mini-4B-Instruct](https://huggingface.co/nvidia/Nemotron-Mini-4B-Instruct).
|
26 |
+
|
27 |
+
### About GGUF
|
28 |
+
|
29 |
+
GGUF is a new format introduced by the llama.cpp team on August 21st 2023. It is a replacement for GGML, which is no longer supported by llama.cpp.
|
30 |
+
|
31 |
+
Here is an incomplete list of clients and libraries that are known to support GGUF:
|
32 |
+
|
33 |
+
* [llama.cpp](https://github.com/ggerganov/llama.cpp). The source project for GGUF. Offers a CLI and a server option.
|
34 |
+
* [llama-cpp-python](https://github.com/abetlen/llama-cpp-python), a Python library with GPU accel, LangChain support, and OpenAI-compatible API server.
|
35 |
+
* [LM Studio](https://lmstudio.ai/), an easy-to-use and powerful local GUI for Windows and macOS (Silicon), with GPU acceleration. Linux available, in beta as of 27/11/2023.
|
36 |
+
* [text-generation-webui](https://github.com/oobabooga/text-generation-webui), the most widely used web UI, with many features and powerful extensions. Supports GPU acceleration.
|
37 |
+
* [KoboldCpp](https://github.com/LostRuins/koboldcpp), a fully featured web UI, with GPU accel across all platforms and GPU architectures. Especially good for story telling.
|
38 |
+
* [GPT4All](https://gpt4all.io/index.html), a free and open source local running GUI, supporting Windows, Linux and macOS with full GPU accel.
|
39 |
+
* [LoLLMS Web UI](https://github.com/ParisNeo/lollms-webui), a great web UI with many interesting and unique features, including a full model library for easy model selection.
|
40 |
+
* [Faraday.dev](https://faraday.dev/), an attractive and easy to use character-based chat GUI for Windows and macOS (both Silicon and Intel), with GPU acceleration.
|
41 |
+
* [candle](https://github.com/huggingface/candle), a Rust ML framework with a focus on performance, including GPU support, and ease of use.
|
42 |
+
* [ctransformers](https://github.com/marella/ctransformers), a Python library with GPU accel, LangChain support, and OpenAI-compatible AI server. Note, as of time of writing (November 27th 2023), ctransformers has not been updated in a long time and does not support many recent models.
|
43 |
+
|
44 |
+
## Special thanks
|
45 |
+
|
46 |
+
🙏 Special thanks to [Georgi Gerganov](https://github.com/ggerganov) and the whole team working on [llama.cpp](https://github.com/ggerganov/llama.cpp/) for making all of this possible.
|