Russal commited on
Commit
2cb988a
1 Parent(s): dd92a44

Upload chatbot_demo.py

Browse files
Files changed (1) hide show
  1. chatbot_demo.py +49 -0
chatbot_demo.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+
4
+ tokenizer = AutoTokenizer.from_pretrained("AlpachinoNLP/Baichuan-13B-Instruction",trust_remote_code=True,use_fast=False)
5
+ model = AutoModelForCausalLM.from_pretrained("AlpachinoNLP/Baichuan-13B-Instruction",trust_remote_code=True ).half()
6
+ model.cuda()
7
+
8
+ def generate(histories, max_new_tokens=2048, do_sample = True, top_p = 0.95, temperature = 0.35, repetition_penalty=1.1):
9
+ prompt = ""
10
+ for history in histories:
11
+ history_with_identity = "\nHuman:" + history[0] + "\n\nAssistant:" + history[1]
12
+ prompt += history_with_identity
13
+ input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(model.device)
14
+ outputs = model.generate(
15
+ input_ids = input_ids,
16
+ max_new_tokens=max_new_tokens,
17
+ early_stopping=True,
18
+ do_sample=do_sample,
19
+ top_p=top_p,
20
+ temperature=temperature,
21
+ repetition_penalty=repetition_penalty,
22
+ )
23
+ rets = tokenizer.batch_decode(outputs, skip_special_tokens=True)
24
+ generate_text = rets[0].replace(prompt, "")
25
+ return generate_text
26
+
27
+ with gr.Blocks() as demo:
28
+ chatbot = gr.Chatbot()
29
+ msg = gr.Textbox()
30
+ clear = gr.Button("clear")
31
+
32
+ def user(user_message, history):
33
+ return "", history + [[user_message, ""]]
34
+
35
+ def bot(history):
36
+ print(history)
37
+ bot_message = generate(history)
38
+ history[-1][1] = bot_message
39
+ return history
40
+
41
+ msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
42
+ bot, chatbot, chatbot
43
+ )
44
+ clear.click(lambda: None, None, chatbot, queue=False)
45
+
46
+ if __name__ == "__main__":
47
+ demo.launch(server_name="0.0.0.0")
48
+
49
+