morriszms commited on
Commit
7bf92e5
·
verified ·
1 Parent(s): d61e7f4

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ PowerLM-3b-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
37
+ PowerLM-3b-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
38
+ PowerLM-3b-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ PowerLM-3b-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
40
+ PowerLM-3b-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
41
+ PowerLM-3b-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
42
+ PowerLM-3b-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
43
+ PowerLM-3b-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
44
+ PowerLM-3b-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
45
+ PowerLM-3b-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
46
+ PowerLM-3b-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
47
+ PowerLM-3b-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
PowerLM-3b-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df34752c7a1fc27db8da28cd6d39f6d0dbdb94941f4e248759cdd9cef4bdb5d6
3
+ size 1344351936
PowerLM-3b-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83c86debb9f9aa21703927b626cc8af18015833d15fdb594e0b2659ea07bf986
3
+ size 1888464576
PowerLM-3b-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4882aa310b4b7a5ba38af2f64e6fd1d1110ef4c94253f79490832b78e3af560
3
+ size 1735847616
PowerLM-3b-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4511e576f19fd10d1955f6e95e7aedeb6dcb11f0d4b3cfdd6c2d5db5ba55af6d
3
+ size 1560006336
PowerLM-3b-Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35c89b0b55dc85d31a2464bab4a926c92762aa81bb7012cb4a6eccba6d3141d3
3
+ size 2011221696
PowerLM-3b-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34dd56c88a04c8516e81f343617e0223f635f5943fb3fbb57d9158aebccdf125
3
+ size 2148079296
PowerLM-3b-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e7df7a3d1d09dcb086fa92ac26b64973b2cde8eeadf687dc80856e504703a7a
3
+ size 2027146944
PowerLM-3b-Q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91e8c43179f58f0fb76ede5904096cf58623eb3373c76129769e6ae3877cbc82
3
+ size 2435894976
PowerLM-3b-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebf146b69e8600683e9dc3158b74845b8eeeed0e8ef5aae5e405c21fa15a1977
3
+ size 2506397376
PowerLM-3b-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ce967c4906a31afb850ed750bf483f6069d64743043bb37cbc66485158c4a39
3
+ size 2435894976
PowerLM-3b-Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:093fbf0e1964c934694e0a46e509f3345c258f7f7b54bfb14c6886dd0f705187
3
+ size 2887110336
PowerLM-3b-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8ba210f9b1c7267aa59dd9908896865bcb81299b74b615bc0e03fdbfeca8ef4
3
+ size 3737341632
README.md ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pipeline_tag: text-generation
3
+ inference: false
4
+ license: apache-2.0
5
+ library_name: transformers
6
+ tags:
7
+ - TensorBlock
8
+ - GGUF
9
+ base_model: ibm/PowerLM-3b
10
+ model-index:
11
+ - name: ibm/PowerLM-3b
12
+ results:
13
+ - task:
14
+ type: text-generation
15
+ dataset:
16
+ name: ARC
17
+ type: lm-eval-harness
18
+ metrics:
19
+ - type: accuracy-norm
20
+ value: 60.5
21
+ name: accuracy-norm
22
+ verified: false
23
+ - type: accuracy
24
+ value: 72.0
25
+ name: accuracy
26
+ verified: false
27
+ - type: accuracy-norm
28
+ value: 74.6
29
+ name: accuracy-norm
30
+ verified: false
31
+ - type: accuracy-norm
32
+ value: 43.6
33
+ name: accuracy-norm
34
+ verified: false
35
+ - type: accuracy-norm
36
+ value: 79.9
37
+ name: accuracy-norm
38
+ verified: false
39
+ - type: accuracy-norm
40
+ value: 70.0
41
+ name: accuracy-norm
42
+ verified: false
43
+ - type: accuracy
44
+ value: 49.2
45
+ name: accuracy
46
+ verified: false
47
+ - type: accuracy
48
+ value: 34.9
49
+ name: accuracy
50
+ verified: false
51
+ - type: accuracy
52
+ value: 15.2
53
+ name: accuracy
54
+ verified: false
55
+ - task:
56
+ type: text-generation
57
+ dataset:
58
+ name: humaneval
59
+ type: bigcode-eval
60
+ metrics:
61
+ - type: pass@1
62
+ value: 26.8
63
+ name: pass@1
64
+ verified: false
65
+ - type: pass@1
66
+ value: 33.6
67
+ name: pass@1
68
+ verified: false
69
+ ---
70
+
71
+ <div style="width: auto; margin-left: auto; margin-right: auto">
72
+ <img src="https://i.imgur.com/jC7kdl8.jpeg" alt="TensorBlock" style="width: 100%; min-width: 400px; display: block; margin: auto;">
73
+ </div>
74
+ <div style="display: flex; justify-content: space-between; width: 100%;">
75
+ <div style="display: flex; flex-direction: column; align-items: flex-start;">
76
+ <p style="margin-top: 0.5em; margin-bottom: 0em;">
77
+ Feedback and support: TensorBlock's <a href="https://x.com/tensorblock_aoi">Twitter/X</a>, <a href="https://t.me/TensorBlock">Telegram Group</a> and <a href="https://x.com/tensorblock_aoi">Discord server</a>
78
+ </p>
79
+ </div>
80
+ </div>
81
+
82
+ ## ibm/PowerLM-3b - GGUF
83
+
84
+ This repo contains GGUF format model files for [ibm/PowerLM-3b](https://huggingface.co/ibm/PowerLM-3b).
85
+
86
+ The files were quantized using machines provided by [TensorBlock](https://tensorblock.co/), and they are compatible with llama.cpp as of [commit b4011](https://github.com/ggerganov/llama.cpp/commit/a6744e43e80f4be6398fc7733a01642c846dce1d).
87
+
88
+ ## Prompt template
89
+
90
+ ```
91
+
92
+ ```
93
+
94
+ ## Model file specification
95
+
96
+ | Filename | Quant type | File Size | Description |
97
+ | -------- | ---------- | --------- | ----------- |
98
+ | [PowerLM-3b-Q2_K.gguf](https://huggingface.co/tensorblock/PowerLM-3b-GGUF/tree/main/PowerLM-3b-Q2_K.gguf) | Q2_K | 1.252 GB | smallest, significant quality loss - not recommended for most purposes |
99
+ | [PowerLM-3b-Q3_K_S.gguf](https://huggingface.co/tensorblock/PowerLM-3b-GGUF/tree/main/PowerLM-3b-Q3_K_S.gguf) | Q3_K_S | 1.453 GB | very small, high quality loss |
100
+ | [PowerLM-3b-Q3_K_M.gguf](https://huggingface.co/tensorblock/PowerLM-3b-GGUF/tree/main/PowerLM-3b-Q3_K_M.gguf) | Q3_K_M | 1.617 GB | very small, high quality loss |
101
+ | [PowerLM-3b-Q3_K_L.gguf](https://huggingface.co/tensorblock/PowerLM-3b-GGUF/tree/main/PowerLM-3b-Q3_K_L.gguf) | Q3_K_L | 1.759 GB | small, substantial quality loss |
102
+ | [PowerLM-3b-Q4_0.gguf](https://huggingface.co/tensorblock/PowerLM-3b-GGUF/tree/main/PowerLM-3b-Q4_0.gguf) | Q4_0 | 1.873 GB | legacy; small, very high quality loss - prefer using Q3_K_M |
103
+ | [PowerLM-3b-Q4_K_S.gguf](https://huggingface.co/tensorblock/PowerLM-3b-GGUF/tree/main/PowerLM-3b-Q4_K_S.gguf) | Q4_K_S | 1.888 GB | small, greater quality loss |
104
+ | [PowerLM-3b-Q4_K_M.gguf](https://huggingface.co/tensorblock/PowerLM-3b-GGUF/tree/main/PowerLM-3b-Q4_K_M.gguf) | Q4_K_M | 2.001 GB | medium, balanced quality - recommended |
105
+ | [PowerLM-3b-Q5_0.gguf](https://huggingface.co/tensorblock/PowerLM-3b-GGUF/tree/main/PowerLM-3b-Q5_0.gguf) | Q5_0 | 2.269 GB | legacy; medium, balanced quality - prefer using Q4_K_M |
106
+ | [PowerLM-3b-Q5_K_S.gguf](https://huggingface.co/tensorblock/PowerLM-3b-GGUF/tree/main/PowerLM-3b-Q5_K_S.gguf) | Q5_K_S | 2.269 GB | large, low quality loss - recommended |
107
+ | [PowerLM-3b-Q5_K_M.gguf](https://huggingface.co/tensorblock/PowerLM-3b-GGUF/tree/main/PowerLM-3b-Q5_K_M.gguf) | Q5_K_M | 2.334 GB | large, very low quality loss - recommended |
108
+ | [PowerLM-3b-Q6_K.gguf](https://huggingface.co/tensorblock/PowerLM-3b-GGUF/tree/main/PowerLM-3b-Q6_K.gguf) | Q6_K | 2.689 GB | very large, extremely low quality loss |
109
+ | [PowerLM-3b-Q8_0.gguf](https://huggingface.co/tensorblock/PowerLM-3b-GGUF/tree/main/PowerLM-3b-Q8_0.gguf) | Q8_0 | 3.481 GB | very large, extremely low quality loss - not recommended |
110
+
111
+
112
+ ## Downloading instruction
113
+
114
+ ### Command line
115
+
116
+ Firstly, install Huggingface Client
117
+
118
+ ```shell
119
+ pip install -U "huggingface_hub[cli]"
120
+ ```
121
+
122
+ Then, downoad the individual model file the a local directory
123
+
124
+ ```shell
125
+ huggingface-cli download tensorblock/PowerLM-3b-GGUF --include "PowerLM-3b-Q2_K.gguf" --local-dir MY_LOCAL_DIR
126
+ ```
127
+
128
+ If you wanna download multiple model files with a pattern (e.g., `*Q4_K*gguf`), you can try:
129
+
130
+ ```shell
131
+ huggingface-cli download tensorblock/PowerLM-3b-GGUF --local-dir MY_LOCAL_DIR --local-dir-use-symlinks False --include='*Q4_K*gguf'
132
+ ```