shuaikang commited on
Commit
bae132e
1 Parent(s): f9c5485

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +109 -62
app.py CHANGED
@@ -1,63 +1,110 @@
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
- """
43
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
- """
45
- demo = gr.ChatInterface(
46
- respond,
47
- additional_inputs=[
48
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
49
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
50
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
51
- gr.Slider(
52
- minimum=0.1,
53
- maximum=1.0,
54
- value=0.95,
55
- step=0.05,
56
- label="Top-p (nucleus sampling)",
57
- ),
58
- ],
59
- )
60
-
61
-
62
- if __name__ == "__main__":
63
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from transformers import AutoModel, AutoTokenizer
3
  import gradio as gr
4
+ import mdtex2html
5
+ #from utils import load_model_on_gpus
6
+
7
+ tokenizer = AutoTokenizer.from_pretrained("sethuiyer/Medichat-Llama3-8B", trust_remote_code=True)
8
+ #model = AutoModel.from_pretrained("sethuiyer/Medichat-Llama3-8B", trust_remote_code=True).cuda()
9
+ model = AutoModel.from_pretrained("sethuiyer/Medichat-Llama3-8B", trust_remote_code=True)
10
+ # 多显卡支持,使用下面两行代替上面一行,将num_gpus改为你实际的显卡数量
11
+ # from utils import load_model_on_gpus
12
+ # model = load_model_on_gpus("THUDM/chatglm2-6b", num_gpus=2)
13
+ model = model.eval()
14
+
15
+ """Override Chatbot.postprocess"""
16
+
17
+
18
+ def postprocess(self, y):
19
+ if y is None:
20
+ return []
21
+ for i, (message, response) in enumerate(y):
22
+ y[i] = (
23
+ None if message is None else mdtex2html.convert((message)),
24
+ None if response is None else mdtex2html.convert(response),
25
+ )
26
+ return y
27
+
28
+
29
+ gr.Chatbot.postprocess = postprocess
30
+
31
+
32
+ def parse_text(text):
33
+ """copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/"""
34
+ lines = text.split("\n")
35
+ lines = [line for line in lines if line != ""]
36
+ count = 0
37
+ for i, line in enumerate(lines):
38
+ if "```" in line:
39
+ count += 1
40
+ items = line.split('`')
41
+ if count % 2 == 1:
42
+ lines[i] = f'<pre><code class="language-{items[-1]}">'
43
+ else:
44
+ lines[i] = f'<br></code></pre>'
45
+ else:
46
+ if i > 0:
47
+ if count % 2 == 1:
48
+ line = line.replace("`", "\`")
49
+ line = line.replace("<", "&lt;")
50
+ line = line.replace(">", "&gt;")
51
+ line = line.replace(" ", "&nbsp;")
52
+ line = line.replace("*", "&ast;")
53
+ line = line.replace("_", "&lowbar;")
54
+ line = line.replace("-", "&#45;")
55
+ line = line.replace(".", "&#46;")
56
+ line = line.replace("!", "&#33;")
57
+ line = line.replace("(", "&#40;")
58
+ line = line.replace(")", "&#41;")
59
+ line = line.replace("$", "&#36;")
60
+ lines[i] = "<br>"+line
61
+ text = "".join(lines)
62
+ return text
63
+
64
+
65
+ def predict(input, chatbot, max_length, top_p, temperature, history, past_key_values):
66
+ chatbot.append((parse_text(input), ""))
67
+ for response, history, past_key_values in model.stream_chat(tokenizer, input, history, past_key_values=past_key_values,
68
+ return_past_key_values=True,
69
+ max_length=max_length, top_p=top_p,
70
+ temperature=temperature):
71
+ chatbot[-1] = (parse_text(input), parse_text(response))
72
+
73
+ yield chatbot, history, past_key_values
74
+
75
+
76
+ def reset_user_input():
77
+ return gr.update(value='')
78
+
79
+
80
+ def reset_state():
81
+ return [], [], None
82
+
83
+
84
+ with gr.Blocks() as demo:
85
+ gr.HTML("""<h1 align="center">ChatGLM2-6B</h1>""")
86
+
87
+ chatbot = gr.Chatbot()
88
+ with gr.Row():
89
+ with gr.Column(scale=4):
90
+ with gr.Column(scale=12):
91
+ user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=10).style(
92
+ container=False)
93
+ with gr.Column(min_width=32, scale=1):
94
+ submitBtn = gr.Button("Submit", variant="primary")
95
+ with gr.Column(scale=1):
96
+ emptyBtn = gr.Button("Clear History")
97
+ max_length = gr.Slider(0, 32768, value=8192, step=1.0, label="Maximum length", interactive=True)
98
+ top_p = gr.Slider(0, 1, value=0.8, step=0.01, label="Top P", interactive=True)
99
+ temperature = gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True)
100
+
101
+ history = gr.State([])
102
+ past_key_values = gr.State(None)
103
+
104
+ submitBtn.click(predict, [user_input, chatbot, max_length, top_p, temperature, history, past_key_values],
105
+ [chatbot, history, past_key_values], show_progress=True)
106
+ submitBtn.click(reset_user_input, [], [user_input])
107
+
108
+ emptyBtn.click(reset_state, outputs=[chatbot, history, past_key_values], show_progress=True)
109
+
110
+ demo.queue().launch(server_name="0.0.0.0",server_port=7860,share=False, inbrowser=True)