duxb commited on
Commit
0f9349b
1 Parent(s): 8c6510a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -18
app.py CHANGED
@@ -1,27 +1,32 @@
1
  from transformers import AutoModel, AutoTokenizer
2
  import gradio as gr
3
  import json
4
- model_path = 'THUDM/chatglm-6b'
5
  tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
6
- model = AutoModel.from_pretrained(model_path, trust_remote_code=True).bfloat16()
7
  model = model.eval()
8
 
9
  MAX_TURNS = 20
10
  MAX_BOXES = MAX_TURNS * 2
11
 
12
 
13
- def predict(input, max_length, top_p, temperature, history=None):
14
- if history is None:
15
- history = []
16
- response, history = model.chat(tokenizer, input, history, max_length=max_length, top_p=top_p,
17
- temperature=temperature)
18
- updates = []
19
- for query, response in history:
20
- updates.append(gr.update(visible=True, value="用户:" + query))
21
- updates.append(gr.update(visible=True, value="ChatGLM-6B:" + response))
22
- if len(updates) < MAX_BOXES:
23
- updates = updates + [gr.Textbox.update(visible=False)] * (MAX_BOXES - len(updates))
24
- return [history] + updates
 
 
 
 
 
25
 
26
 
27
  with gr.Blocks() as demo:
@@ -29,9 +34,9 @@ with gr.Blocks() as demo:
29
  text_boxes = []
30
  for i in range(MAX_BOXES):
31
  if i % 2 == 0:
32
- text_boxes.append(gr.Markdown(visible=False, label="提问:"))
33
  else:
34
- text_boxes.append(gr.Markdown(visible=False, label="回复:"))
35
 
36
  with gr.Row():
37
  with gr.Column(scale=4):
@@ -41,6 +46,7 @@ with gr.Blocks() as demo:
41
  max_length = gr.Slider(0, 4096, value=2048, step=1.0, label="Maximum length", interactive=True)
42
  top_p = gr.Slider(0, 1, value=0.7, step=0.01, label="Top P", interactive=True)
43
  temperature = gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True)
 
44
  button = gr.Button("Generate")
45
- button.click(predict, [txt, max_length, top_p, temperature, state], [state] + text_boxes)
46
- demo.queue().launch(share=True, inbrowser=True)
 
1
  from transformers import AutoModel, AutoTokenizer
2
  import gradio as gr
3
  import json
4
+ model_path = 'THUDM/chatglm-6b-int4-qe'
5
  tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
6
+ model = AutoModel.from_pretrained(model_path, trust_remote_code=True).half().float()
7
  model = model.eval()
8
 
9
  MAX_TURNS = 20
10
  MAX_BOXES = MAX_TURNS * 2
11
 
12
 
13
+ def predict(input, max_length, top_p, temperature, history=None, state=None):
14
+ if state is None:
15
+ state = []
16
+ if history is None or history == "":
17
+ history = state
18
+ else:
19
+ history = json.loads(history)
20
+
21
+ for response, history in model.stream_chat(tokenizer, input, history, max_length=max_length, top_p=top_p,
22
+ temperature=temperature):
23
+ updates = []
24
+ for query, response in history:
25
+ updates.append(gr.update(visible=True, value=query))
26
+ updates.append(gr.update(visible=True, value=response))
27
+ if len(updates) < MAX_BOXES:
28
+ updates = updates + [gr.Textbox.update(visible=False)] * (MAX_BOXES - len(updates))
29
+ yield [history] + updates
30
 
31
 
32
  with gr.Blocks() as demo:
 
34
  text_boxes = []
35
  for i in range(MAX_BOXES):
36
  if i % 2 == 0:
37
+ text_boxes.append(gr.Text(visible=False, label="提问:"))
38
  else:
39
+ text_boxes.append(gr.Text(visible=False, label="回复:"))
40
 
41
  with gr.Row():
42
  with gr.Column(scale=4):
 
46
  max_length = gr.Slider(0, 4096, value=2048, step=1.0, label="Maximum length", interactive=True)
47
  top_p = gr.Slider(0, 1, value=0.7, step=0.01, label="Top P", interactive=True)
48
  temperature = gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True)
49
+ history = gr.TextArea(visible=False)
50
  button = gr.Button("Generate")
51
+ button.click(predict, [txt, max_length, top_p, temperature, history, state], [state] + text_boxes, queue=True)
52
+ demo.queue(concurrency_count=10).launch(enable_queue=True, max_threads=2)