mikeee commited on
Commit
0238de3
0 Parent(s):

Duplicate from mikeee/WizardCoder-15B-1.0-GGML

Browse files
Files changed (6) hide show
  1. .flake8 +21 -0
  2. .gitattributes +35 -0
  3. .gitignore +1 -0
  4. README.md +13 -0
  5. app.py +403 -0
  6. requirements.txt +6 -0
.flake8 ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [flake8]
2
+ ignore =
3
+ # E203 whitespace before ':'
4
+ E203
5
+ D203
6
+ # line too long
7
+ E501
8
+ per-file-ignores =
9
+ # imported but unused
10
+ # __init__.py: F401
11
+ test_*.py: F401
12
+ exclude =
13
+ .git
14
+ __pycache__
15
+ docs/source/conf.py
16
+ old
17
+ build
18
+ dist
19
+ .venv
20
+ pad*.py app-.py
21
+ max-complexity = 25
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ call-activate.bat
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: TheBloek/WizardCoder-15B-1.0-GGML
3
+ emoji: 🚀
4
+ colorFrom: green
5
+ colorTo: green
6
+ sdk: gradio
7
+ sdk_version: 3.35.2
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: mikeee/WizardCoder-15B-1.0-GGML
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,403 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Run codes"""
2
+ # pylint: disable=line-too-long, broad-exception-caught, invalid-name, missing-function-docstring, too-many-instance-attributes, missing-class-docstring
3
+ # import gradio
4
+
5
+ # gradio.load("models/WizardLM/WizardCoder-15B-V1.0").launch()
6
+
7
+ import os
8
+ from pathlib import Path
9
+ import time
10
+ from dataclasses import asdict, dataclass
11
+ from types import SimpleNamespace
12
+
13
+ import gradio as gr
14
+ from about_time import about_time
15
+
16
+ # from ctransformers import AutoConfig, AutoModelForCausalLM
17
+ from ctransformers import AutoModelForCausalLM
18
+ from huggingface_hub import hf_hub_download
19
+ from loguru import logger
20
+
21
+ os.environ["TZ"] = "Asia/Shanghai"
22
+ try:
23
+ time.tzset() # type: ignore # pylint: disable=no-member
24
+ except Exception:
25
+ # Windows
26
+ logger.warning("Windows, cant run time.tzset()")
27
+
28
+ ns = SimpleNamespace(
29
+ response="",
30
+ generator=[],
31
+ )
32
+
33
+ default_system_prompt = "A conversation between a user and an LLM-based AI assistant named Local Assistant. Local Assistant gives helpful and honest answers."
34
+
35
+ user_prefix = "[user]: "
36
+ assistant_prefix = "[assistant]: "
37
+
38
+
39
+ def predict(prompt, bot):
40
+ # logger.debug(f"{prompt=}, {bot=}, {timeout=}")
41
+ logger.debug(f"{prompt=}, {bot=}")
42
+
43
+ ns.response = ""
44
+ with about_time() as atime: # type: ignore
45
+ try:
46
+ # user_prompt = prompt
47
+ generator = generate(
48
+ LLM,
49
+ GENERATION_CONFIG,
50
+ system_prompt=default_system_prompt,
51
+ user_prompt=prompt.strip(),
52
+ )
53
+
54
+ ns.generator = generator # for .then
55
+
56
+ print(assistant_prefix, end=" ", flush=True)
57
+
58
+ response = ""
59
+ buff.update(value="diggin...")
60
+
61
+ for word in generator:
62
+ # print(word, end="", flush=True)
63
+ print(word, flush=True) # vertical stream
64
+ response += word
65
+ ns.response = response
66
+ buff.update(value=response)
67
+ print("")
68
+ logger.debug(f"{response=}")
69
+ except Exception as exc:
70
+ logger.error(exc)
71
+ response = f"{exc=}"
72
+
73
+ # bot = {"inputs": [response]}
74
+ _ = (
75
+ f"(time elapsed: {atime.duration_human}, " # type: ignore
76
+ f"{atime.duration/(len(prompt) + len(response)):.1f}s/char)" # type: ignore
77
+ )
78
+
79
+ bot.append([prompt, f"{response} {_}"])
80
+
81
+ return prompt, bot
82
+
83
+
84
+ def predict_api(prompt):
85
+ logger.debug(f"{prompt=}")
86
+ ns.response = ""
87
+ try:
88
+ # user_prompt = prompt
89
+ _ = GenerationConfig(
90
+ temperature=0.2,
91
+ top_k=0,
92
+ top_p=0.9,
93
+ repetition_penalty=1.0,
94
+ max_new_tokens=512, # adjust as needed
95
+ seed=42,
96
+ reset=False, # reset history (cache)
97
+ stream=True, # TODO stream=False and generator
98
+ threads=os.cpu_count() // 2, # type: ignore # adjust for your CPU
99
+ stop=["<|im_end|>", "|<"],
100
+ )
101
+
102
+ # TODO stream does not make sense in api?
103
+ generator = generate(
104
+ LLM, _, system_prompt=default_system_prompt, user_prompt=prompt.strip()
105
+ )
106
+ print(assistant_prefix, end=" ", flush=True)
107
+
108
+ response = ""
109
+ buff.update(value="diggin...")
110
+ for word in generator:
111
+ print(word, end="", flush=True)
112
+ response += word
113
+ ns.response = response
114
+ buff.update(value=response)
115
+ print("")
116
+ logger.debug(f"{response=}")
117
+ except Exception as exc:
118
+ logger.error(exc)
119
+ response = f"{exc=}"
120
+ # bot = {"inputs": [response]}
121
+ # bot = [(prompt, response)]
122
+
123
+ return response
124
+
125
+
126
+ def download_quant(destination_folder: str, repo_id: str, model_filename: str):
127
+ local_path = os.path.abspath(destination_folder)
128
+ return hf_hub_download(
129
+ repo_id=repo_id,
130
+ filename=model_filename,
131
+ local_dir=local_path,
132
+ local_dir_use_symlinks=True,
133
+ )
134
+
135
+
136
+ @dataclass
137
+ class GenerationConfig:
138
+ temperature: float
139
+ top_k: int
140
+ top_p: float
141
+ repetition_penalty: float
142
+ max_new_tokens: int
143
+ seed: int
144
+ reset: bool
145
+ stream: bool
146
+ threads: int
147
+ stop: list[str]
148
+
149
+
150
+ def format_prompt(system_prompt: str, user_prompt: str):
151
+ """Format prompt based on: https://huggingface.co/spaces/mosaicml/mpt-30b-chat/blob/main/app.py."""
152
+ # TODO im_start/im_end possible fix for WizardCoder
153
+
154
+ system_prompt = f"<|im_start|>system\n{system_prompt}<|im_end|>\n"
155
+ user_prompt = f"<|im_start|>user\n{user_prompt}<|im_end|>\n"
156
+ assistant_prompt = "<|im_start|>assistant\n"
157
+
158
+ return f"{system_prompt}{user_prompt}{assistant_prompt}"
159
+
160
+
161
+ def generate(
162
+ llm: AutoModelForCausalLM,
163
+ generation_config: GenerationConfig,
164
+ system_prompt: str = default_system_prompt,
165
+ user_prompt: str = "",
166
+ ):
167
+ """Run model inference, will return a Generator if streaming is true"""
168
+ # if not user_prompt.strip():
169
+ return llm(
170
+ format_prompt(
171
+ system_prompt,
172
+ user_prompt,
173
+ ),
174
+ **asdict(generation_config),
175
+ )
176
+
177
+
178
+ logger.info("start dl")
179
+ _ = """full url: https://huggingface.co/TheBloke/mpt-30B-chat-GGML/blob/main/mpt-30b-chat.ggmlv0.q4_1.bin"""
180
+
181
+ # https://huggingface.co/TheBloke/mpt-30B-chat-GGML
182
+ _ = """
183
+ mpt-30b-chat.ggmlv0.q4_0.bin q4_0 4 16.85 GB 19.35 GB 4-bit.
184
+ mpt-30b-chat.ggmlv0.q4_1.bin q4_1 4 18.73 GB 21.23 GB 4-bit. Higher accuracy than q4_0 but not as high as q5_0. However has quicker inference than q5 models.
185
+ mpt-30b-chat.ggmlv0.q5_0.bin q5_0 5 20.60 GB 23.10 GB
186
+ mpt-30b-chat.ggmlv0.q5_1.bin q5_1 5 22.47 GB 24.97 GB
187
+ mpt-30b-chat.ggmlv0.q8_0.bin q8_0 8 31.83 GB 34.33 GB
188
+ """
189
+ MODEL_FILENAME = "mpt-30b-chat.ggmlv0.q4_1.bin"
190
+ MODEL_FILENAME = "WizardCoder-15B-1.0.ggmlv3.q4_0.bin" # 10.7G
191
+ MODEL_FILENAME = "WizardCoder-15B-1.0.ggmlv3.q4_1.bin" # 11.9G
192
+ DESTINATION_FOLDER = "models"
193
+
194
+ REPO_ID = "TheBloke/mpt-30B-chat-GGML"
195
+ if "WizardCoder" in MODEL_FILENAME:
196
+ REPO_ID = "TheBloke/WizardCoder-15B-1.0-GGML"
197
+
198
+ download_quant(DESTINATION_FOLDER, REPO_ID, MODEL_FILENAME)
199
+
200
+ logger.info("done dl")
201
+
202
+ # if "mpt" in model_filename:
203
+ # config = AutoConfig.from_pretrained("mosaicml/mpt-30b-cha t", context_length=8192)
204
+ # llm = AutoModelForCausalLM.from_pretrained(
205
+ # os.path.abspath(f"models/{model_filename}"),
206
+ # model_type="mpt",
207
+ # config=config,
208
+ # )
209
+
210
+ # https://huggingface.co/spaces/matthoffner/wizardcoder-ggml/blob/main/main.py
211
+ _ = """
212
+ llm = AutoModelForCausalLM.from_pretrained(
213
+ "TheBloke/WizardCoder-15B-1.0-GGML",
214
+ model_file="",
215
+ model_type="starcoder",
216
+ threads=8
217
+ )
218
+ # """
219
+
220
+ logger.debug(f"{os.cpu_count()=}")
221
+
222
+ if "WizardCoder" in MODEL_FILENAME:
223
+ _ = Path("models", MODEL_FILENAME).absolute().as_posix()
224
+ LLM = AutoModelForCausalLM.from_pretrained(
225
+ "TheBloke/WizardCoder-15B-1.0-GGML",
226
+ model_file=_,
227
+ model_type="starcoder",
228
+ threads=os.cpu_count() // 2, # type: ignore
229
+ )
230
+ # LLM = AutoModelForCausalLM.from_pretrained(
231
+ # "TheBloke/WizardCoder-15B-1.0-GGML",
232
+ # model_file=MODEL_FILENAME,
233
+ # model_type="starcoder",
234
+ # threads=os.cpu_count() // 2 # type: ignore
235
+ # )
236
+
237
+ cpu_count = os.cpu_count() // 2 # type: ignore
238
+ logger.debug(f"{cpu_count=}")
239
+
240
+ GENERATION_CONFIG = GenerationConfig(
241
+ temperature=0.2,
242
+ top_k=0,
243
+ top_p=0.9,
244
+ repetition_penalty=1.0,
245
+ max_new_tokens=512, # adjust as needed
246
+ seed=42,
247
+ reset=False, # reset history (cache)
248
+ stream=True, # streaming per word/token
249
+ threads=cpu_count,
250
+ stop=["<|im_end|>", "|<"], # TODO possible fix of stop
251
+ )
252
+
253
+ css = """
254
+ .importantButton {
255
+ background: linear-gradient(45deg, #7e0570,#5d1c99, #6e00ff) !important;
256
+ border: none !important;
257
+ }
258
+ .importantButton:hover {
259
+ background: linear-gradient(45deg, #ff00e0,#8500ff, #6e00ff) !important;
260
+ border: none !important;
261
+ }
262
+ .disclaimer {font-variant-caps: all-small-caps; font-size: xx-small;}
263
+ .xsmall {font-size: x-small;}
264
+ """
265
+
266
+ with gr.Blocks(
267
+ # title="mpt-30b-chat-ggml",
268
+ title=f"{MODEL_FILENAME}",
269
+ theme=gr.themes.Soft(text_size="sm", spacing_size="sm"),
270
+ css=css,
271
+ ) as block:
272
+ with gr.Accordion("🎈 Info", open=False):
273
+ # gr.HTML(
274
+ # """<center><a href="https://huggingface.co/spaces/mikeee/mpt-30b-chat?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate"></a> and spin a CPU UPGRADE to avoid the queue</center>"""
275
+ # )
276
+ gr.Markdown(
277
+ f"""<h4><center>{MODEL_FILENAME}</center></h4>
278
+
279
+ Most examples are meant for another model. You probably should try
280
+ some coder-related prompts.
281
+
282
+ Try to refresh the browser and try again when occasionally errors occur.
283
+
284
+ It takes about >100 seconds to get a response. Restarting the space takes about 2 minutes if the space is asleep due to inactivity. If the space crashes for some reason, it will also take about 2 minutes to restart. You need to refresh the browser to reload the new space.
285
+ """,
286
+ elem_classes="xsmall",
287
+ )
288
+
289
+ # chatbot = gr.Chatbot().style(height=700) # 500
290
+ chatbot = gr.Chatbot(height=700) # 500
291
+ buff = gr.Textbox(show_label=False, visible=False)
292
+ with gr.Row():
293
+ with gr.Column(scale=4):
294
+ msg = gr.Textbox(
295
+ label="Chat Message Box",
296
+ placeholder="Ask me anything (press Enter or click Submit to send)",
297
+ show_label=False,
298
+ ).style(container=False)
299
+ with gr.Column(scale=1, min_width=100):
300
+ with gr.Row():
301
+ submit = gr.Button("Submit", elem_classes="xsmall")
302
+ stop = gr.Button("Stop", visible=False)
303
+ clear = gr.Button("Clear History", visible=True)
304
+ with gr.Row(visible=False):
305
+ with gr.Accordion("Advanced Options:", open=False):
306
+ with gr.Row():
307
+ with gr.Column(scale=2):
308
+ system = gr.Textbox(
309
+ label="System Prompt",
310
+ value=default_system_prompt,
311
+ show_label=False,
312
+ ).style(container=False)
313
+ with gr.Column():
314
+ with gr.Row():
315
+ change = gr.Button("Change System Prompt")
316
+ reset = gr.Button("Reset System Prompt")
317
+
318
+ with gr.Accordion("Example Inputs", open=True):
319
+ etext = """In America, where cars are an important part of the national psyche, a decade ago people had suddenly started to drive less, which had not happened since the oil shocks of the 1970s. """
320
+ examples = gr.Examples(
321
+ examples=[
322
+ ["判断一个数是不是质数的 javascript 码"],
323
+ ["实现python 里 range(10)的 javascript 码"],
324
+ ["实现python 里 [*(range(10)]的 javascript 码"],
325
+ ["Explain the plot of Cinderella in a sentence."],
326
+ [
327
+ "How long does it take to become proficient in French, and what are the best methods for retaining information?"
328
+ ],
329
+ ["What are some common mistakes to avoid when writing code?"],
330
+ ["Build a prompt to generate a beautiful portrait of a horse"],
331
+ ["Suggest four metaphors to describe the benefits of AI"],
332
+ ["Write a pop song about leaving home for the sandy beaches."],
333
+ ["Write a summary demonstrating my ability to tame lions"],
334
+ ["鲁迅和周树人什么关系 说中文"],
335
+ ["鲁迅和周树人什么关系"],
336
+ ["鲁迅和周树人什么关系 用英文回答"],
337
+ ["从前有一头牛,这头牛后面有什么?"],
338
+ ["正无穷大加一大于正无穷大吗?"],
339
+ ["正无穷大加正无穷大大于正无穷大吗?"],
340
+ ["-2的平方根等于什么"],
341
+ ["树上有5只鸟,猎人开枪打死了一只。树上还有几只鸟?"],
342
+ ["树上有11只鸟,猎人开枪打死了一只。树上还有几只鸟?提示:需考虑鸟可能受惊吓飞走。"],
343
+ ["以红楼梦的行文风格写一张委婉的请假条。不少于320字。"],
344
+ [f"{etext} 翻成中文,列出3个版本"],
345
+ [f"{etext} \n 翻成中文,保留原意,但使用文学性的语言。不要写解释。列出3个版本"],
346
+ ["假定 1 + 2 = 4, 试求 7 + 8"],
347
+ ["Erkläre die Handlung von Cinderella in einem Satz."],
348
+ ["Erkläre die Handlung von Cinderella in einem Satz. Auf Deutsch"],
349
+ ],
350
+ inputs=[msg],
351
+ examples_per_page=40,
352
+ )
353
+
354
+ # with gr.Row():
355
+ with gr.Accordion("Disclaimer", open=False):
356
+ _ = "-".join(MODEL_FILENAME.split("-")[:2])
357
+ gr.Markdown(
358
+ f"Disclaimer: {_} can produce factually incorrect output, and should not be relied on to produce "
359
+ "factually accurate information. {_} was trained on various public datasets; while great efforts "
360
+ "have been taken to clean the pretraining data, it is possible that this model could generate lewd, "
361
+ "biased, or otherwise offensive outputs.",
362
+ elem_classes=["disclaimer"],
363
+ )
364
+
365
+ msg.submit(
366
+ # fn=conversation.user_turn,
367
+ fn=predict,
368
+ inputs=[msg, chatbot],
369
+ outputs=[msg, chatbot],
370
+ # queue=True,
371
+ show_progress="full",
372
+ api_name="predict",
373
+ )
374
+ submit.click(
375
+ fn=lambda x, y: ("",) + predict(x, y)[1:], # clear msg
376
+ inputs=[msg, chatbot],
377
+ outputs=[msg, chatbot],
378
+ queue=True,
379
+ show_progress="full",
380
+ )
381
+ clear.click(lambda: None, None, chatbot, queue=False)
382
+
383
+ # update buff Textbox, every: units in seconds)
384
+ # https://huggingface.co/spaces/julien-c/nvidia-smi/discussions
385
+ # does not work
386
+ # AttributeError: 'Blocks' object has no attribute 'run_forever'
387
+ # block.run_forever(lambda: ns.response, None, [buff], every=1)
388
+
389
+ with gr.Accordion("For Chat/Translation API", open=False, visible=False):
390
+ input_text = gr.Text()
391
+ api_btn = gr.Button("Go", variant="primary")
392
+ out_text = gr.Text()
393
+ api_btn.click(
394
+ predict_api,
395
+ input_text,
396
+ out_text,
397
+ # show_progress="full",
398
+ api_name="api",
399
+ )
400
+
401
+ # concurrency_count=5, max_size=20
402
+ # max_size=36, concurrency_count=14
403
+ block.queue(concurrency_count=5, max_size=20).launch(debug=True)
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ ctransformers==0.2.10
2
+ transformers==4.30.2
3
+ huggingface_hub
4
+ gradio
5
+ loguru
6
+ about-time