bigPear commited on
Commit
8c4e6f3
1 Parent(s): 20b388e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -59
app.py CHANGED
@@ -1,65 +1,39 @@
1
- from transformers import AutoModel, AutoTokenizer
2
- import gradio as gr
3
-
4
- tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
5
- model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
6
- #.half().cuda()
 
 
7
  model = model.eval()
8
 
9
- # def add_text(history, text):
10
- # history = history + [(text, None)]
11
- # return history, ""
12
-
13
- # def add_file(history, file):
14
- # history = history + [((file.name,), None)]
15
- # return history
16
-
17
- # def bot(history):
18
- # response = "**That's cool!**"
19
- # history[-1][1] = response
20
- # return history
21
 
22
- # def predict(input, history=None):
23
- # if history is None:
24
- # history = []
25
- # response, history = model.chat(tokenizer, input, history)
26
- # return response, history
 
 
 
 
27
 
28
  with gr.Blocks() as demo:
29
- chatbot = gr.Chatbot()
30
- msg = gr.Textbox()
31
- clear = gr.Button("Clear")
32
-
33
- def predict(input, history=None):
34
- if history is None:
35
- history = []
36
- response, history = model.chat(tokenizer, input, history)
37
- return response, history
38
-
39
  def user(user_message, history):
40
- return "", history + [[user_message, None]]
41
-
42
- def bot(msg, history):
43
- bot_message, _ = predict(msg, history)
44
- history[-1][1] = ""
45
- for character in bot_message:
46
- history[-1][1] += character
47
- time.sleep(0.05)
48
- yield history
49
-
50
- msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
51
- bot, [msg, chatbot], [msg, chatbot]
52
- )
53
- clear.click(lambda: None, None, chatbot, queue=False)
54
-
55
- demo.queue()
56
- demo.launch()
57
-
58
- # txt.submit(predict, [txt, state], [chatbot, state])
59
- # button.click(predict, [txt, state], [chatbot, state])
60
-
61
- # btn.upload(add_file, [chatbot, btn], [chatbot]).then(
62
- # bot, chatbot, chatbot
63
- # )
64
-
65
- demo.launch()
 
1
+ import sys
2
+ sys.path.append("src")
3
+ from src import load_pretrained, ModelArguments
4
+
5
+ # if __name__ == "__main__":
6
+ model_args = ModelArguments(checkpoint_dir="temp")
7
+ model, tokenizer = load_pretrained(model_args)
8
+ model = model.half().cuda()
9
  model = model.eval()
10
 
11
+ import time
 
 
 
 
 
 
 
 
 
 
 
12
 
13
+ def predict(input, history=None):
14
+ if history is None:
15
+ history = []
16
+ response, _ = model.chat(tokenizer, input, history)
17
+ history[-1][1] = ""
18
+ for character in response:
19
+ history[-1][1] += character
20
+ time.sleep(0.05)
21
+ yield history
22
 
23
  with gr.Blocks() as demo:
24
+ gr.Markdown('''## DigitalWDF - unofficial demo
25
+ ''')
26
+ chatbot = gr.Chatbot([], elem_id="chatbot").style(height=200)
27
+
28
+
 
 
 
 
 
29
  def user(user_message, history):
30
+ return history + [[user_message, None]]
31
+
32
+ with gr.Row():
33
+ with gr.Column(scale=4):
34
+ txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter").style(container=False)
35
+ with gr.Column(scale=1):
36
+ button = gr.Button("Generate")
37
+ txt.submit(user, [txt, chatbot], chatbot, queue=False).then(predict, [txt, chatbot], chatbot)
38
+ button.click(user, [txt, chatbot], chatbot, queue=False).then(predict, [txt, chatbot], chatbot)
39
+ demo.queue().launch()