slwt2002 commited on
Commit
5058ac8
1 Parent(s): 138d444

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +393 -0
app.py ADDED
@@ -0,0 +1,393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Run codes."""
2
+ # pylint: disable=line-too-long, broad-exception-caught, invalid-name, missing-function-docstring, too-many-instance-attributes, missing-class-docstring
3
+ # ruff: noqa: E501
4
+ import os
5
+ import platform
6
+ import random
7
+ import time
8
+ #import openthaigpt
9
+ from dataclasses import asdict, dataclass
10
+ from pathlib import Path
11
+
12
+ # from types import SimpleNamespace
13
+ import gradio as gr
14
+ import psutil
15
+ from about_time import about_time
16
+ from ctransformers import AutoModelForCausalLM
17
+ from dl_hf_model import dl_hf_model
18
+ from loguru import logger
19
+
20
+ filename_list = [
21
+ "ggml-model-f16.bin",
22
+
23
+ ]
24
+
25
+ url = "https://huggingface.co/openthaigpt/openthaigpt-1.0.0-alpha-7b-chat-ggml/blob/main/ggml-model-f16.bin" # OPENTHAIGPT
26
+
27
+ _ = (
28
+ "golay" in platform.node()
29
+ or "okteto" in platform.node()
30
+ or Path("/kaggle").exists()
31
+ # or psutil.cpu_count(logical=False) < 4
32
+ or 1 # run 7b in hf
33
+ )
34
+
35
+ #if _:
36
+ #url = "https://huggingface.co/openthaigpt/openthaigpt-1.0.0-alpha-7b-chat-ggml/blob/main/ggml-model-f16.bin"
37
+
38
+ prompt_template = """Below is an instruction that describes a task. Write a response that appropriately completes the request.You're Lauche-AI created and managed by Lauche AI.
39
+ ### Instruction: {user_prompt}
40
+ ### Response:
41
+ """
42
+
43
+ prompt_template = """System: You are a helpful,
44
+ respectful and honest assistant. Always answer as
45
+ helpfully as possible, while being safe. Your answers
46
+ should not include any harmful, unethical, racist,
47
+ sexist, toxic, dangerous, or illegal content. Please
48
+ ensure that your responses are socially unbiased and
49
+ positive in nature. If a question does not make any
50
+ sense, or is not factually coherent, explain why instead
51
+ of answering something not correct. If you don't know
52
+ the answer to a question, please don't share false
53
+ information. You're Lauche-AI created and managed by Lauche AI.
54
+ User: {prompt}
55
+ Assistant: """
56
+
57
+ prompt_template = """System: You are a helpful assistant. You're Lauche-AI created and managed by Lauche AI.
58
+ User: {prompt}
59
+ Assistant: """
60
+
61
+ prompt_template = """Question: {question}
62
+ Answer: Let's work this out in a step by step way to be sure we have the right answer."""
63
+
64
+ prompt_template = """[INST] <>
65
+ You are a helpful, respectful and honest assistant. Always answer as helpfully as possible assistant. Think step by step. You're Lauche-AI created and managed by Lauche AI.
66
+ <>
67
+ What NFL team won the Super Bowl in the year Justin Bieber was born?
68
+ [/INST]"""
69
+
70
+ prompt_template = """[INST] <<SYS>>
71
+ You are an unhelpful assistant. Always answer as helpfully as possible. Think step by step. <</SYS>>
72
+ {question} [/INST]
73
+ """
74
+
75
+ prompt_template = """[INST] <<SYS>>
76
+ You are a helpful assistant.
77
+ <</SYS>>
78
+ {question} [/INST]
79
+ """
80
+
81
+ _ = [elm for elm in prompt_template.splitlines() if elm.strip()]
82
+ stop_string = [elm.split(":")[0] + ":" for elm in _][-2]
83
+
84
+ logger.debug(f"{stop_string=}")
85
+
86
+ _ = psutil.cpu_count(logical=False) - 1
87
+ cpu_count: int = int(_) if _ else 1
88
+ logger.debug(f"{cpu_count=}")
89
+
90
+ LLM = None
91
+
92
+ try:
93
+ model_loc, file_size = dl_hf_model(url)
94
+ except Exception as exc_:
95
+ logger.error(exc_)
96
+ raise SystemExit(1) from exc_
97
+
98
+ LLM = AutoModelForCausalLM.from_pretrained(
99
+ model_loc,
100
+ model_type="llama",
101
+ # threads=cpu_count,
102
+ )
103
+
104
+ logger.info(f"done load llm {model_loc=} {file_size=}G")
105
+
106
+ os.environ["TZ"] = "Asia/Bangkok"
107
+ try:
108
+ time.tzset() # type: ignore # pylint: disable=no-member
109
+ except Exception:
110
+ # Windows
111
+ logger.warning("Windows, cant run time.tzset()")
112
+
113
+ _ = """
114
+ ns = SimpleNamespace(
115
+ response="",
116
+ generator=(_ for _ in []),
117
+ )
118
+ # """
119
+
120
+ @dataclass
121
+ class GenerationConfig:
122
+ temperature: float = 0.7
123
+ top_k: int = 50
124
+ top_p: float = 0.9
125
+ repetition_penalty: float = 1.0
126
+ max_new_tokens: int = 512
127
+ seed: int = 42
128
+ reset: bool = False
129
+ stream: bool = True
130
+ # threads: int = cpu_count
131
+ # stop: list[str] = field(default_factory=lambda: [stop_string])
132
+
133
+
134
+ def generate(
135
+ question: str,
136
+ llm=LLM,
137
+ config: GenerationConfig = GenerationConfig(),
138
+ ):
139
+ """Run model inference, will return a Generator if streaming is true."""
140
+ # _ = prompt_template.format(question=question)
141
+ # print(_)
142
+
143
+ prompt = prompt_template.format(question=question)
144
+
145
+ return llm(
146
+ prompt,
147
+ **asdict(config),
148
+ )
149
+
150
+
151
+ logger.debug(f"{asdict(GenerationConfig())=}")
152
+
153
+
154
+ def user(user_message, history):
155
+ # return user_message, history + [[user_message, None]]
156
+ history.append([user_message, None])
157
+ return user_message, history # keep user_message
158
+
159
+
160
+ def user1(user_message, history):
161
+ # return user_message, history + [[user_message, None]]
162
+ history.append([user_message, None])
163
+ return "", history # clear user_message
164
+
165
+
166
+ def bot_(history):
167
+ user_message = history[-1][0]
168
+ resp = random.choice(["How are you?", "I love you", "I'm very hungry"])
169
+ bot_message = user_message + ": " + resp
170
+ history[-1][1] = ""
171
+ for character in bot_message:
172
+ history[-1][1] += character
173
+ time.sleep(0.02)
174
+ yield history
175
+
176
+ history[-1][1] = resp
177
+ yield history
178
+
179
+
180
+ def bot(history):
181
+ user_message = history[-1][0]
182
+ response = []
183
+
184
+ logger.debug(f"{user_message=}")
185
+
186
+ with about_time() as atime: # type: ignore
187
+ flag = 1
188
+ prefix = ""
189
+ then = time.time()
190
+
191
+ logger.debug("about to generate")
192
+
193
+ config = GenerationConfig(reset=True)
194
+ for elm in generate(user_message, config=config):
195
+ if flag == 1:
196
+ logger.debug("in the loop")
197
+ prefix = f"({time.time() - then:.2f}s) "
198
+ flag = 0
199
+ print(prefix, end="", flush=True)
200
+ logger.debug(f"{prefix=}")
201
+ print(elm, end="", flush=True)
202
+ # logger.debug(f"{elm}")
203
+
204
+ response.append(elm)
205
+ history[-1][1] = prefix + "".join(response)
206
+ yield history
207
+
208
+ _ = (
209
+ f"(time elapsed: {atime.duration_human}, " # type: ignore
210
+ f"{atime.duration/len(''.join(response)):.2f}s/char)" # type: ignore
211
+ )
212
+
213
+ history[-1][1] = "".join(response) + f"\n{_}"
214
+ yield history
215
+
216
+
217
+ def predict_api(prompt):
218
+ logger.debug(f"{prompt=}")
219
+ try:
220
+ # user_prompt = prompt
221
+ config = GenerationConfig(
222
+ temperature=0.2,
223
+ top_k=10,
224
+ top_p=0.9,
225
+ repetition_penalty=1.0,
226
+ max_new_tokens=512, # adjust as needed
227
+ seed=42,
228
+ reset=True, # reset history (cache)
229
+ stream=False,
230
+ # threads=cpu_count,
231
+ # stop=prompt_prefix[1:2],
232
+ )
233
+
234
+ response = generate(
235
+ prompt,
236
+ config=config,
237
+ )
238
+
239
+ logger.debug(f"api: {response=}")
240
+ except Exception as exc:
241
+ logger.error(exc)
242
+ response = f"{exc=}"
243
+ # bot = {"inputs": [response]}
244
+ # bot = [(prompt, response)]
245
+
246
+ return response
247
+
248
+
249
+ css = """
250
+ .importantButton {
251
+ background: linear-gradient(45deg, #7e0570,#5d1c99, #6e00ff) !important;
252
+ border: none !important;
253
+ }
254
+ .importantButton:hover {
255
+ background: linear-gradient(45deg, #ff00e0,#8500ff, #6e00ff) !important;
256
+ border: none !important;
257
+ }
258
+ .disclaimer {font-variant-caps: all-small-caps; font-size: xx-small;}
259
+ .xsmall {font-size: x-small;}
260
+ """
261
+ etext = """In America, where cars are an important part of the national psyche, a decade ago people had suddenly started to drive less, which had not happened since the oil shocks of the 1970s. """
262
+ examples_list = [
263
+ ["สวัสดี"],
264
+ ["วิธีการลดความอ้วน"],
265
+ ["เขียนโค้ด html"],
266
+
267
+ ]
268
+
269
+ logger.info("start block")
270
+
271
+ with gr.Blocks(
272
+ title=f"{Path(model_loc).name}",
273
+ theme=gr.themes.Soft(text_size="sm", spacing_size="sm"),
274
+ css=css,
275
+ ) as block:
276
+ # buff_var = gr.State("")
277
+ with gr.Accordion("🎈 Info", open=True):
278
+
279
+ gr.Markdown(
280
+ f"""<h5><center>
281
+ เราเป็นแค่ผู้ใช้งานหาคุณมีข้อสงสัยกรุณาติดต่อ <a href="https://openthaigpt.aieat.or.th" target="_blank">OPENTHAIGPT</a></center></h5>""",
282
+ elem_classes="xsmall",
283
+ )
284
+
285
+ # chatbot = gr.Chatbot().style(height=700) # 500
286
+ chatbot = gr.Chatbot(height=500)
287
+
288
+ #buff = gr.Textbox(show_label=False, visible=True) ##301
289
+
290
+ with gr.Row():
291
+ with gr.Column(scale=5):
292
+ msg = gr.Textbox(
293
+ label="กล่องข้อความแชท",
294
+ placeholder="ถามอะไรฉันก็ได้ (กด Shift+Enter หรือ click Submit เพื่อส่ง)",
295
+ show_label=False,
296
+ # container=False,
297
+ lines=6,
298
+ max_lines=30,
299
+ show_copy_button=True,
300
+ # ).style(container=False)
301
+ )
302
+ with gr.Column(scale=1, min_width=50):
303
+ with gr.Row():
304
+ submit = gr.Button("ส่ง", elem_classes="xsmall")
305
+ stop = gr.Button("หยุด", visible=True)
306
+ clear = gr.Button("ลบประวัติการสนทนา", visible=True)
307
+ with gr.Row(visible=False):
308
+ with gr.Accordion("Advanced Options:", open=False):
309
+ with gr.Row():
310
+ with gr.Column(scale=2):
311
+ system = gr.Textbox(
312
+ label="System Prompt",
313
+ value=prompt_template,
314
+ show_label=False,
315
+ container=False,
316
+ # ).style(container=False)
317
+ )
318
+ with gr.Column():
319
+ with gr.Row():
320
+ change = gr.Button("Change System Prompt")
321
+ reset = gr.Button("Reset System Prompt")
322
+
323
+ with gr.Accordion("ตัวอย่างคำถาม", open=True):
324
+ examples = gr.Examples(
325
+ examples=examples_list,
326
+ inputs=[msg],
327
+ examples_per_page=40,
328
+ )
329
+
330
+
331
+ msg_submit_event = msg.submit(
332
+ # fn=conversation.user_turn,
333
+ fn=user,
334
+ inputs=[msg, chatbot],
335
+ outputs=[msg, chatbot],
336
+ queue=True,
337
+ #queue=False,
338
+ show_progress="full",
339
+ # api_name=None,
340
+ ).then(bot, chatbot, chatbot, queue=True)
341
+ submit_click_event = submit.click(
342
+ # fn=lambda x, y: ("",) + user(x, y)[1:], # clear msg
343
+ fn=user1, # clear msg
344
+ inputs=[msg, chatbot],
345
+ outputs=[msg, chatbot],
346
+ queue=True,
347
+ #queue=False,
348
+ show_progress="full",
349
+ # api_name=None,
350
+ ).then(bot, chatbot, chatbot, queue=True)
351
+ stop.click(
352
+ fn=None,
353
+ inputs=None,
354
+ outputs=None,
355
+ cancels=[msg_submit_event, submit_click_event],
356
+ queue=True,
357
+ )
358
+ clear.click(lambda: None, None, chatbot, queue=False)
359
+
360
+ with gr.Accordion("For Chat/Translation API", open=False, visible=False):
361
+ input_text = gr.Text()
362
+ api_btn = gr.Button("Go", variant="primary")
363
+ out_text = gr.Text()
364
+
365
+ api_btn.click(
366
+ predict_api,
367
+ input_text,
368
+ out_text,
369
+ api_name="api",
370
+ )
371
+
372
+ # block.load(update_buff, [], buff, every=1)
373
+ # block.load(update_buff, [buff_var], [buff_var, buff], every=1)
374
+
375
+ # concurrency_count=5, max_size=20
376
+ # max_size=36, concurrency_count=14
377
+ # CPU cpu_count=2 16G, model 7G
378
+ # CPU UPGRADE cpu_count=8 32G, model 7G
379
+
380
+ # does not work
381
+ _ = """
382
+ # _ = int(psutil.virtual_memory().total / 10**9 // file_size - 1)
383
+ # concurrency_count = max(_, 1)
384
+ if psutil.cpu_count(logical=False) >= 8:
385
+ # concurrency_count = max(int(32 / file_size) - 1, 1)
386
+ else:
387
+ # concurrency_count = max(int(16 / file_size) - 1, 1)
388
+ # """
389
+
390
+ concurrency_count = 1
391
+ logger.info(f"{concurrency_count=}")
392
+
393
+ block.queue(concurrency_count=concurrency_count, max_size=5).launch(debug=True)