morriszms commited on
Commit
1b2943d
·
verified ·
1 Parent(s): 7783307

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ ghost-7b-v0.9.0-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
37
+ ghost-7b-v0.9.0-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
38
+ ghost-7b-v0.9.0-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ ghost-7b-v0.9.0-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
40
+ ghost-7b-v0.9.0-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
41
+ ghost-7b-v0.9.0-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
42
+ ghost-7b-v0.9.0-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
43
+ ghost-7b-v0.9.0-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
44
+ ghost-7b-v0.9.0-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
45
+ ghost-7b-v0.9.0-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
46
+ ghost-7b-v0.9.0-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
47
+ ghost-7b-v0.9.0-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ - vi
5
+ license: mit
6
+ library_name: transformers
7
+ tags:
8
+ - ghost
9
+ - TensorBlock
10
+ - GGUF
11
+ pipeline_tag: text-generation
12
+ base_model: ghost-x/ghost-7b-v0.9.0
13
+ widget:
14
+ - text: '<|system|>
15
+
16
+ You are a helpful assistant.</s>
17
+
18
+ <|user|>
19
+
20
+ Thông tin về Peristernia despecta</s>
21
+
22
+ <|assistant|>
23
+
24
+ '
25
+ output:
26
+ text: Peristernia despecta là một loài ốc biển, là động vật thân mềm chân bụng
27
+ sống ở biển trong họ Fasciolariidae.
28
+ model-index:
29
+ - name: lamhieu/ghost-7b-v0.9.0
30
+ results:
31
+ - task:
32
+ type: text-generation
33
+ dataset:
34
+ name: VMLU
35
+ type: vmlu_v1.5
36
+ metrics:
37
+ - type: avg
38
+ value: 36.06
39
+ name: Average
40
+ verified: true
41
+ - type: stem
42
+ value: 33.54
43
+ name: STEM
44
+ verified: true
45
+ - type: ss
46
+ value: 38.74
47
+ name: Social science
48
+ verified: true
49
+ - type: hm
50
+ value: 37.15
51
+ name: Humanities
52
+ verified: true
53
+ - type: ot
54
+ value: 36.78
55
+ name: Other
56
+ verified: true
57
+ - task:
58
+ type: text-generation
59
+ dataset:
60
+ name: Open LLM Leaderboard
61
+ type: open_llm_leaderboard
62
+ metrics:
63
+ - type: avg
64
+ value: 56.89
65
+ name: Average
66
+ verified: true
67
+ - type: arc
68
+ value: 53.07
69
+ name: ARC
70
+ verified: true
71
+ - type: hs
72
+ value: 77.93
73
+ name: HellaSwag
74
+ verified: true
75
+ - type: hs
76
+ value: 77.93
77
+ name: HellaSwag
78
+ verified: true
79
+ - type: mmlu
80
+ value: 55.09
81
+ name: MMLU
82
+ verified: true
83
+ - type: wg
84
+ value: 73.72
85
+ name: Winogrande
86
+ verified: true
87
+ - type: gsm8k
88
+ value: 33.74
89
+ name: GSM8K
90
+ verified: true
91
+ source:
92
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=lamhieu/ghost-7b-v0.9.0
93
+ name: Open LLM Leaderboard
94
+ - task:
95
+ type: text-generation
96
+ name: Text Generation
97
+ dataset:
98
+ name: AI2 Reasoning Challenge (25-Shot)
99
+ type: ai2_arc
100
+ config: ARC-Challenge
101
+ split: test
102
+ args:
103
+ num_few_shot: 25
104
+ metrics:
105
+ - type: acc_norm
106
+ value: 53.07
107
+ name: normalized accuracy
108
+ source:
109
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=lamhieu/ghost-7b-v0.9.0
110
+ name: Open LLM Leaderboard
111
+ - task:
112
+ type: text-generation
113
+ name: Text Generation
114
+ dataset:
115
+ name: HellaSwag (10-Shot)
116
+ type: hellaswag
117
+ split: validation
118
+ args:
119
+ num_few_shot: 10
120
+ metrics:
121
+ - type: acc_norm
122
+ value: 77.93
123
+ name: normalized accuracy
124
+ source:
125
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=lamhieu/ghost-7b-v0.9.0
126
+ name: Open LLM Leaderboard
127
+ - task:
128
+ type: text-generation
129
+ name: Text Generation
130
+ dataset:
131
+ name: MMLU (5-Shot)
132
+ type: cais/mmlu
133
+ config: all
134
+ split: test
135
+ args:
136
+ num_few_shot: 5
137
+ metrics:
138
+ - type: acc
139
+ value: 55.09
140
+ name: accuracy
141
+ source:
142
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=lamhieu/ghost-7b-v0.9.0
143
+ name: Open LLM Leaderboard
144
+ - task:
145
+ type: text-generation
146
+ name: Text Generation
147
+ dataset:
148
+ name: TruthfulQA (0-shot)
149
+ type: truthful_qa
150
+ config: multiple_choice
151
+ split: validation
152
+ args:
153
+ num_few_shot: 0
154
+ metrics:
155
+ - type: mc2
156
+ value: 47.79
157
+ source:
158
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=lamhieu/ghost-7b-v0.9.0
159
+ name: Open LLM Leaderboard
160
+ - task:
161
+ type: text-generation
162
+ name: Text Generation
163
+ dataset:
164
+ name: Winogrande (5-shot)
165
+ type: winogrande
166
+ config: winogrande_xl
167
+ split: validation
168
+ args:
169
+ num_few_shot: 5
170
+ metrics:
171
+ - type: acc
172
+ value: 73.72
173
+ name: accuracy
174
+ source:
175
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=lamhieu/ghost-7b-v0.9.0
176
+ name: Open LLM Leaderboard
177
+ - task:
178
+ type: text-generation
179
+ name: Text Generation
180
+ dataset:
181
+ name: GSM8k (5-shot)
182
+ type: gsm8k
183
+ config: main
184
+ split: test
185
+ args:
186
+ num_few_shot: 5
187
+ metrics:
188
+ - type: acc
189
+ value: 33.74
190
+ name: accuracy
191
+ source:
192
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=lamhieu/ghost-7b-v0.9.0
193
+ name: Open LLM Leaderboard
194
+ ---
195
+
196
+ <div style="width: auto; margin-left: auto; margin-right: auto">
197
+ <img src="https://i.imgur.com/jC7kdl8.jpeg" alt="TensorBlock" style="width: 100%; min-width: 400px; display: block; margin: auto;">
198
+ </div>
199
+ <div style="display: flex; justify-content: space-between; width: 100%;">
200
+ <div style="display: flex; flex-direction: column; align-items: flex-start;">
201
+ <p style="margin-top: 0.5em; margin-bottom: 0em;">
202
+ Feedback and support: TensorBlock's <a href="https://x.com/tensorblock_aoi">Twitter/X</a>, <a href="https://t.me/TensorBlock">Telegram Group</a> and <a href="https://x.com/tensorblock_aoi">Discord server</a>
203
+ </p>
204
+ </div>
205
+ </div>
206
+
207
+ ## ghost-x/ghost-7b-v0.9.0 - GGUF
208
+
209
+ This repo contains GGUF format model files for [ghost-x/ghost-7b-v0.9.0](https://huggingface.co/ghost-x/ghost-7b-v0.9.0).
210
+
211
+ The files were quantized using machines provided by [TensorBlock](https://tensorblock.co/), and they are compatible with llama.cpp as of [commit b4011](https://github.com/ggerganov/llama.cpp/commit/a6744e43e80f4be6398fc7733a01642c846dce1d).
212
+
213
+ <div style="text-align: left; margin: 20px 0;">
214
+ <a href="https://tensorblock.co/waitlist/client" style="display: inline-block; padding: 10px 20px; background-color: #007bff; color: white; text-decoration: none; border-radius: 5px; font-weight: bold;">
215
+ Run them on the TensorBlock client using your local machine ↗
216
+ </a>
217
+ </div>
218
+
219
+ ## Prompt template
220
+
221
+ ```
222
+ <|system|>
223
+ {system_prompt}</s>
224
+ <|user|>
225
+ {prompt}</s>
226
+ <|assistant|>
227
+ ```
228
+
229
+ ## Model file specification
230
+
231
+ | Filename | Quant type | File Size | Description |
232
+ | -------- | ---------- | --------- | ----------- |
233
+ | [ghost-7b-v0.9.0-Q2_K.gguf](https://huggingface.co/tensorblock/ghost-7b-v0.9.0-GGUF/blob/main/ghost-7b-v0.9.0-Q2_K.gguf) | Q2_K | 2.532 GB | smallest, significant quality loss - not recommended for most purposes |
234
+ | [ghost-7b-v0.9.0-Q3_K_S.gguf](https://huggingface.co/tensorblock/ghost-7b-v0.9.0-GGUF/blob/main/ghost-7b-v0.9.0-Q3_K_S.gguf) | Q3_K_S | 2.947 GB | very small, high quality loss |
235
+ | [ghost-7b-v0.9.0-Q3_K_M.gguf](https://huggingface.co/tensorblock/ghost-7b-v0.9.0-GGUF/blob/main/ghost-7b-v0.9.0-Q3_K_M.gguf) | Q3_K_M | 3.277 GB | very small, high quality loss |
236
+ | [ghost-7b-v0.9.0-Q3_K_L.gguf](https://huggingface.co/tensorblock/ghost-7b-v0.9.0-GGUF/blob/main/ghost-7b-v0.9.0-Q3_K_L.gguf) | Q3_K_L | 3.560 GB | small, substantial quality loss |
237
+ | [ghost-7b-v0.9.0-Q4_0.gguf](https://huggingface.co/tensorblock/ghost-7b-v0.9.0-GGUF/blob/main/ghost-7b-v0.9.0-Q4_0.gguf) | Q4_0 | 3.827 GB | legacy; small, very high quality loss - prefer using Q3_K_M |
238
+ | [ghost-7b-v0.9.0-Q4_K_S.gguf](https://huggingface.co/tensorblock/ghost-7b-v0.9.0-GGUF/blob/main/ghost-7b-v0.9.0-Q4_K_S.gguf) | Q4_K_S | 3.856 GB | small, greater quality loss |
239
+ | [ghost-7b-v0.9.0-Q4_K_M.gguf](https://huggingface.co/tensorblock/ghost-7b-v0.9.0-GGUF/blob/main/ghost-7b-v0.9.0-Q4_K_M.gguf) | Q4_K_M | 4.068 GB | medium, balanced quality - recommended |
240
+ | [ghost-7b-v0.9.0-Q5_0.gguf](https://huggingface.co/tensorblock/ghost-7b-v0.9.0-GGUF/blob/main/ghost-7b-v0.9.0-Q5_0.gguf) | Q5_0 | 4.654 GB | legacy; medium, balanced quality - prefer using Q4_K_M |
241
+ | [ghost-7b-v0.9.0-Q5_K_S.gguf](https://huggingface.co/tensorblock/ghost-7b-v0.9.0-GGUF/blob/main/ghost-7b-v0.9.0-Q5_K_S.gguf) | Q5_K_S | 4.654 GB | large, low quality loss - recommended |
242
+ | [ghost-7b-v0.9.0-Q5_K_M.gguf](https://huggingface.co/tensorblock/ghost-7b-v0.9.0-GGUF/blob/main/ghost-7b-v0.9.0-Q5_K_M.gguf) | Q5_K_M | 4.779 GB | large, very low quality loss - recommended |
243
+ | [ghost-7b-v0.9.0-Q6_K.gguf](https://huggingface.co/tensorblock/ghost-7b-v0.9.0-GGUF/blob/main/ghost-7b-v0.9.0-Q6_K.gguf) | Q6_K | 5.534 GB | very large, extremely low quality loss |
244
+ | [ghost-7b-v0.9.0-Q8_0.gguf](https://huggingface.co/tensorblock/ghost-7b-v0.9.0-GGUF/blob/main/ghost-7b-v0.9.0-Q8_0.gguf) | Q8_0 | 7.167 GB | very large, extremely low quality loss - not recommended |
245
+
246
+
247
+ ## Downloading instruction
248
+
249
+ ### Command line
250
+
251
+ Firstly, install Huggingface Client
252
+
253
+ ```shell
254
+ pip install -U "huggingface_hub[cli]"
255
+ ```
256
+
257
+ Then, downoad the individual model file the a local directory
258
+
259
+ ```shell
260
+ huggingface-cli download tensorblock/ghost-7b-v0.9.0-GGUF --include "ghost-7b-v0.9.0-Q2_K.gguf" --local-dir MY_LOCAL_DIR
261
+ ```
262
+
263
+ If you wanna download multiple model files with a pattern (e.g., `*Q4_K*gguf`), you can try:
264
+
265
+ ```shell
266
+ huggingface-cli download tensorblock/ghost-7b-v0.9.0-GGUF --local-dir MY_LOCAL_DIR --local-dir-use-symlinks False --include='*Q4_K*gguf'
267
+ ```
ghost-7b-v0.9.0-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fadcd0908f067f4fb99121fbc3f2577a41f117f1bf00c667c7ce1ef8c2625cc4
3
+ size 2719243072
ghost-7b-v0.9.0-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ca984eaa3dbfeb302aa29283326418faaaba31f2e06c4a2be703b469bd1611f
3
+ size 3822025536
ghost-7b-v0.9.0-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a60baeb4722982655166556caa6006ae4309ad8ce6b47b67ab538541c3dd0365
3
+ size 3518987072
ghost-7b-v0.9.0-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37b4e38846e05e7c5dea74350818e8b867fca7d870fff6bc1dc046e2a48b58e1
3
+ size 3164568384
ghost-7b-v0.9.0-Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:918b242f2174073c7c3f8dd343ef19fdd61c4ce2dc141b12530c882f069b3c83
3
+ size 4108917568
ghost-7b-v0.9.0-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15e0a3cdf1fd2405b7313d22feaeb2499c9dcb4c1648a1bd5a1fc7e9e1dcf99a
3
+ size 4368440128
ghost-7b-v0.9.0-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:191f38edf0fbbc5995ea0223a81b55126a805178cff660c8dd8ae3c02994f3b3
3
+ size 4140374848
ghost-7b-v0.9.0-Q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b4b180c0fdafa5e0fb5ce2895681326cb807ce70be25d13c3829c560b21510d
3
+ size 4997716800
ghost-7b-v0.9.0-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a621a760b1c1a2ab9ca7f5f3ba31b27b0eb56eebee641425829a127ff4234fa
3
+ size 5131410240
ghost-7b-v0.9.0-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fb8578295a4ffcf569bdf1e97ffa8dfe52c2522bc8dad449bc0b789b5fea7dc
3
+ size 4997716800
ghost-7b-v0.9.0-Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7e9913820a03ad2c3d5e63443b4fff80e39a789d005b32945554dc8022fc8ce
3
+ size 5942065984
ghost-7b-v0.9.0-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d74ae8b9bba787ca5a20eda2da8d3d82166ef26354fff7abe22d7998b6888259
3
+ size 7695858496