mikeee commited on
Commit
8ffb1bd
0 Parent(s):

Duplicate from mikeee/codellama-13b-python-ggml

Browse files
Files changed (5) hide show
  1. .gitattributes +35 -0
  2. README.md +13 -0
  3. app.py +411 -0
  4. examples_list.py +12 -0
  5. requirements.txt +8 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: codellama 13b python ggml
3
+ emoji: 🦀
4
+ colorFrom: pink
5
+ colorTo: yellow
6
+ sdk: gradio
7
+ sdk_version: 3.41.2
8
+ app_file: app.py
9
+ license: mit
10
+ duplicated_from: mikeee/codellama-13b-python-ggml
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,411 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Run codes."""
2
+ # pylint: disable=line-too-long, broad-exception-caught, invalid-name, missing-function-docstring, too-many-instance-attributes, missing-class-docstring
3
+ # ruff: noqa: E501
4
+ import gc
5
+ import os
6
+ import platform
7
+ import random
8
+ import time
9
+ from dataclasses import asdict, dataclass
10
+ from pathlib import Path
11
+ from typing import Optional, Sequence
12
+
13
+ # from types import SimpleNamespace
14
+ import gradio as gr
15
+ import psutil
16
+ from about_time import about_time
17
+ from ctransformers import AutoModelForCausalLM
18
+ from dl_hf_model import dl_hf_model
19
+ from examples_list import examples_list
20
+ from loguru import logger
21
+
22
+ url = "https://huggingface.co/TheBloke/CodeLlama-13B-Python-GGML/blob/main/codellama-13b-python.ggmlv3.Q4_K_M.bin" # 7.87G
23
+
24
+ LLM = None
25
+ gc.collect()
26
+
27
+ try:
28
+ logger.debug(f" dl {url}")
29
+ model_loc, file_size = dl_hf_model(url)
30
+ logger.info(f"done load llm {model_loc=} {file_size=}G")
31
+ except Exception as exc_:
32
+ logger.error(exc_)
33
+ raise SystemExit(1) from exc_
34
+
35
+ # raise SystemExit(0)
36
+
37
+ # Prompt template: Guanaco
38
+ # {past_history}
39
+ prompt_template = """You are a helpful assistant. Let's think step by step.
40
+ ### Human:
41
+ {question}
42
+ ### Assistant:"""
43
+
44
+ # Prompt template: garage-bAInd/Stable-Platypus2-13B
45
+ prompt_template = """
46
+ ### System:
47
+ This is a system prompt, please behave and help the user.
48
+
49
+ ### Instruction:
50
+
51
+ {question}
52
+
53
+ ### Response:
54
+ """
55
+ prompt_template = """
56
+ [INST] Write code to solve the following coding problem that obeys the constraints and
57
+ passes the example test cases. Please wrap your code answer using ```:
58
+
59
+ {question}
60
+ [/INST]
61
+ """
62
+
63
+ human_prefix = "### Instruction"
64
+ ai_prefix = "### Response"
65
+ stop_list = [f"{human_prefix}:"]
66
+
67
+ _ = psutil.cpu_count(logical=False) - 1
68
+ cpu_count: int = int(_) if _ else 1
69
+ logger.debug(f"{cpu_count=}")
70
+
71
+ logger.debug(f"{model_loc=}")
72
+ LLM = AutoModelForCausalLM.from_pretrained(
73
+ model_loc,
74
+ model_type="llama",
75
+ threads=cpu_count,
76
+ )
77
+
78
+ os.environ["TZ"] = "Asia/Shanghai"
79
+ try:
80
+ time.tzset() # type: ignore # pylint: disable=no-member
81
+ except Exception:
82
+ # Windows
83
+ logger.warning("Windows, cant run time.tzset()")
84
+
85
+
86
+ # ctransformers.Config() default
87
+ # Config(top_k=40, top_p=0.95, temperature=0.8,
88
+ # repetition_penalty=1.1, last_n_tokens=64, seed=-1,
89
+ # batch_size=8, threads=-1, max_new_tokens=256,
90
+ # stop=None, stream=False, reset=True,
91
+ # context_length=-1, gpu_layers=0)
92
+ @dataclass
93
+ class GenerationConfig:
94
+ temperature: float = 0.7
95
+ top_k: int = 50
96
+ top_p: float = 0.9
97
+ repetition_penalty: float = 1.0
98
+ max_new_tokens: int = 512
99
+ seed: int = 42
100
+ reset: bool = False
101
+ stream: bool = True
102
+ threads: int = cpu_count
103
+ # stop: list[str] = field(default_factory=lambda: stop_list)
104
+
105
+ # ctransformers\llm.py
106
+ @dataclass
107
+ class Config:
108
+ # sample
109
+ top_k: int = 40
110
+ top_p: float = 0.95
111
+ temperature: float = 0.8
112
+ repetition_penalty: float = 1.1
113
+ last_n_tokens: int = 64
114
+ seed: int = -1
115
+
116
+ # eval
117
+ batch_size: int = 8
118
+ threads: int = -1
119
+
120
+ # generate
121
+ max_new_tokens: int = 512 # 256
122
+ stop: Optional[Sequence[str]] = None
123
+ stream: bool = True # False
124
+ reset: bool = False # True
125
+
126
+ # model
127
+ # context_length: int = -1
128
+ # gpu_layers: int = 0
129
+
130
+
131
+ def generate(
132
+ question: str,
133
+ llm=LLM,
134
+ # config: GenerationConfig = GenerationConfig(),
135
+ config: Config = Config(),
136
+ ):
137
+ """Run model inference, will return a Generator if streaming is true."""
138
+ # _ = prompt_template.format(question=question)
139
+ # print(_)
140
+
141
+ prompt = prompt_template.format(question=question)
142
+
143
+ return llm(
144
+ prompt,
145
+ **asdict(config),
146
+ # **vars(config),
147
+ )
148
+
149
+
150
+ # logger.debug(f"{asdict(GenerationConfig())=}")
151
+ logger.debug(f"{Config(stream=True)=}")
152
+ logger.debug(f"{vars(Config(stream=True))=}")
153
+
154
+
155
+ def user(user_message, history):
156
+ # return user_message, history + [[user_message, None]]
157
+ if history is None:
158
+ history = []
159
+ history.append([user_message, None])
160
+ return user_message, history # keep user_message
161
+
162
+
163
+ def user1(user_message, history):
164
+ # return user_message, history + [[user_message, None]]
165
+ if history is None:
166
+ history = []
167
+ history.append([user_message, None])
168
+ return "", history # clear user_message
169
+
170
+
171
+ def bot_(history):
172
+ user_message = history[-1][0]
173
+ resp = random.choice(["How are you?", "I love you", "I'm very hungry"])
174
+ bot_message = user_message + ": " + resp
175
+ history[-1][1] = ""
176
+ for character in bot_message:
177
+ history[-1][1] += character
178
+ time.sleep(0.02)
179
+ yield history
180
+
181
+ history[-1][1] = resp
182
+ yield history
183
+
184
+
185
+ def bot(history):
186
+ user_message = ""
187
+ try:
188
+ user_message = history[-1][0]
189
+ except Exception as exc:
190
+ logger.error(exc)
191
+ response = []
192
+
193
+ logger.debug(f"{user_message=}")
194
+
195
+ with about_time() as atime: # type: ignore
196
+ flag = 1
197
+ prefix = ""
198
+ then = time.time()
199
+
200
+ logger.debug("about to generate")
201
+
202
+ config = GenerationConfig(reset=True)
203
+ for elm in generate(user_message, config=config):
204
+ if flag == 1:
205
+ logger.debug("in the loop")
206
+ prefix = f"({time.time() - then:.2f}s) "
207
+ flag = 0
208
+ print(prefix, end="", flush=True)
209
+ logger.debug(f"{prefix=}")
210
+ print(elm, end="", flush=True)
211
+ # logger.debug(f"{elm}")
212
+
213
+ response.append(elm)
214
+ history[-1][1] = prefix + "".join(response)
215
+ yield history
216
+
217
+ _ = (
218
+ f"(time elapsed: {atime.duration_human}, " # type: ignore
219
+ f"{atime.duration/len(''.join(response)):.2f}s/char)" # type: ignore
220
+ )
221
+
222
+ history[-1][1] = "".join(response) + f"\n{_}"
223
+ yield history
224
+
225
+
226
+ def predict_api(prompt):
227
+ logger.debug(f"{prompt=}")
228
+ try:
229
+ # user_prompt = prompt
230
+ config = GenerationConfig(
231
+ temperature=0.2,
232
+ top_k=10,
233
+ top_p=0.9,
234
+ repetition_penalty=1.0,
235
+ max_new_tokens=512, # adjust as needed
236
+ seed=42,
237
+ reset=True, # reset history (cache)
238
+ stream=False,
239
+ # threads=cpu_count,
240
+ # stop=prompt_prefix[1:2],
241
+ )
242
+
243
+ response = generate(
244
+ prompt,
245
+ config=config,
246
+ )
247
+
248
+ logger.debug(f"api: {response=}")
249
+ except Exception as exc:
250
+ logger.error(exc)
251
+ response = f"{exc=}"
252
+ # bot = {"inputs": [response]}
253
+ # bot = [(prompt, response)]
254
+
255
+ return response
256
+
257
+
258
+ css = """
259
+ .importantButton {
260
+ background: linear-gradient(45deg, #7e0570,#5d1c99, #6e00ff) !important;
261
+ border: none !important;
262
+ }
263
+ .importantButton:hover {
264
+ background: linear-gradient(45deg, #ff00e0,#8500ff, #6e00ff) !important;
265
+ border: none !important;
266
+ }
267
+ .disclaimer {font-variant-caps: all-small-caps; font-size: xx-small;}
268
+ .xsmall {font-size: x-small;}
269
+ """
270
+
271
+ logger.info("start block")
272
+
273
+ with gr.Blocks(
274
+ title=f"{Path(model_loc).name}",
275
+ # theme=gr.themes.Soft(text_size="sm", spacing_size="sm"),
276
+ theme=gr.themes.Glass(text_size="sm", spacing_size="sm"),
277
+ css=css,
278
+ ) as block:
279
+ # buff_var = gr.State("")
280
+ with gr.Accordion("🎈 Info", open=False):
281
+ gr.Markdown(
282
+ f"""<h5><center>{Path(model_loc).name}</center></h4>
283
+ Doesn't quite work -- no output or run forever.""",
284
+ elem_classes="xsmall",
285
+ )
286
+
287
+ # chatbot = gr.Chatbot().style(height=700) # 500
288
+ chatbot = gr.Chatbot(height=500)
289
+
290
+ # buff = gr.Textbox(show_label=False, visible=True)
291
+
292
+ with gr.Row():
293
+ with gr.Column(scale=5):
294
+ msg = gr.Textbox(
295
+ label="Chat Message Box",
296
+ placeholder="Ask me anything (press Shift+Enter or click Submit to send)",
297
+ show_label=False,
298
+ # container=False,
299
+ lines=6,
300
+ max_lines=30,
301
+ show_copy_button=True,
302
+ # ).style(container=False)
303
+ )
304
+ with gr.Column(scale=1, min_width=50):
305
+ with gr.Row():
306
+ submit = gr.Button("Submit", elem_classes="xsmall")
307
+ stop = gr.Button("Stop", visible=True)
308
+ clear = gr.Button("Clear History", visible=True)
309
+ with gr.Row(visible=False):
310
+ with gr.Accordion("Advanced Options:", open=False):
311
+ with gr.Row():
312
+ with gr.Column(scale=2):
313
+ system = gr.Textbox(
314
+ label="System Prompt",
315
+ value=prompt_template,
316
+ show_label=False,
317
+ container=False,
318
+ # ).style(container=False)
319
+ )
320
+ with gr.Column():
321
+ with gr.Row():
322
+ change = gr.Button("Change System Prompt")
323
+ reset = gr.Button("Reset System Prompt")
324
+
325
+ with gr.Accordion("Example Inputs", open=True):
326
+ examples = gr.Examples(
327
+ examples=examples_list,
328
+ inputs=[msg],
329
+ examples_per_page=40,
330
+ )
331
+
332
+ # with gr.Row():
333
+ with gr.Accordion("Disclaimer", open=False):
334
+ _ = Path(model_loc).name
335
+ gr.Markdown(
336
+ f"Disclaimer: {_} can produce factually incorrect output, and should not be relied on to produce "
337
+ f"factually accurate information. {_} was trained on various public datasets; while great efforts "
338
+ "have been taken to clean the pretraining data, it is possible that this model could generate lewd, "
339
+ "biased, or otherwise offensive outputs.",
340
+ elem_classes=["disclaimer"],
341
+ )
342
+
343
+ msg_submit_event = msg.submit(
344
+ # fn=conversation.user_turn,
345
+ fn=user,
346
+ inputs=[msg, chatbot],
347
+ outputs=[msg, chatbot],
348
+ queue=True,
349
+ show_progress="full",
350
+ # api_name=None,
351
+ ).then(bot, chatbot, chatbot, queue=True)
352
+ submit_click_event = submit.click(
353
+ # fn=lambda x, y: ("",) + user(x, y)[1:], # clear msg
354
+ fn=user1, # clear msg
355
+ inputs=[msg, chatbot],
356
+ outputs=[msg, chatbot],
357
+ queue=True,
358
+ # queue=False,
359
+ show_progress="full",
360
+ # api_name=None,
361
+ ).then(bot, chatbot, chatbot, queue=True)
362
+ stop.click(
363
+ fn=None,
364
+ inputs=None,
365
+ outputs=None,
366
+ cancels=[msg_submit_event, submit_click_event],
367
+ queue=False,
368
+ )
369
+ clear.click(lambda: None, None, chatbot, queue=False)
370
+
371
+ with gr.Accordion("For Chat/Translation API", open=False, visible=False):
372
+ input_text = gr.Text()
373
+ api_btn = gr.Button("Go", variant="primary")
374
+ out_text = gr.Text()
375
+
376
+ api_btn.click(
377
+ predict_api,
378
+ input_text,
379
+ out_text,
380
+ api_name="api",
381
+ )
382
+
383
+ # block.load(update_buff, [], buff, every=1)
384
+ # block.load(update_buff, [buff_var], [buff_var, buff], every=1)
385
+
386
+ # concurrency_count=5, max_size=20
387
+ # max_size=36, concurrency_count=14
388
+ # CPU cpu_count=2 16G, model 7G
389
+ # CPU UPGRADE cpu_count=8 32G, model 7G
390
+
391
+ # does not work
392
+ _ = """
393
+ # _ = int(psutil.virtual_memory().total / 10**9 // file_size - 1)
394
+ # concurrency_count = max(_, 1)
395
+ if psutil.cpu_count(logical=False) >= 8:
396
+ # concurrency_count = max(int(32 / file_size) - 1, 1)
397
+ else:
398
+ # concurrency_count = max(int(16 / file_size) - 1, 1)
399
+ # """
400
+
401
+ # default concurrency_count = 1
402
+ # block.queue(concurrency_count=concurrency_count, max_size=5).launch(debug=True)
403
+
404
+ server_port = 7860
405
+ if "forindo" in platform.node():
406
+ server_port = 7861
407
+ block.queue(max_size=5).launch(
408
+ debug=True, server_name="0.0.0.0", server_port=server_port
409
+ )
410
+
411
+ # block.queue(max_size=5).launch(debug=True, server_name="0.0.0.0")
examples_list.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Setup examples."""
2
+ # pylint: disable=invalid-name, line-too-long
3
+ examples_list = [
4
+ ["Python Program for Bubble Sort"],
5
+ ["Bubble Sort"],
6
+ ["Python Program to Print the Fibonacci sequence"],
7
+ ["""Convert js code "const numbers = [1, 2, 3, 4, 5]; console.log(numbers.includes(4));" to python code."""],
8
+ ["Print the Fibonacci sequence"],
9
+ ["给出判断一个数是不是质数的 python 码。"],
10
+ ["给出实现python 里 range(10)的 javascript 码。"],
11
+ ["给出实现python 里 [*(range(10)]的 javascript 码。"],
12
+ ]
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ ctransformers # ==0.2.10 0.2.13
2
+ transformers # ==4.30.2
3
+ # huggingface_hub
4
+ gradio
5
+ loguru
6
+ about-time
7
+ psutil
8
+ dl-hf-model