douglarek commited on
Commit
cde1762
1 Parent(s): 003c3f8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -1
app.py CHANGED
@@ -1,3 +1,33 @@
1
  import gradio as gr
 
 
2
 
3
- gr.load("models/HuggingFaceH4/zephyr-7b-beta").launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import torch
3
+ from transformers import pipeline
4
 
5
+ pipe = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-beta", torch_dtype=torch.bfloat16, device_map="auto")
6
+
7
+ messages = [
8
+ {
9
+ "role": "system",
10
+ "content": "You are a friendly chatbot who always responds in the style of a pirate",
11
+ },
12
+ # {"role": "user", "content": "How many helicopters can a human eat in one sitting?"},
13
+ ]
14
+
15
+
16
+ def random_response(message, history):
17
+ msg = messages.copy()
18
+ for m in history:
19
+ q, a = m
20
+ msg.append({"role": "user", "content": q})
21
+ msg.append({"role": "assistant", "content": a})
22
+ msg.append({"role": "user", "content": message})
23
+ prompt = pipe.tokenizer.apply_chat_template(msg, tokenize=False, add_generation_prompt=True)
24
+ outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
25
+ output = outputs[0]["generated_text"]
26
+ messages.append({"role": "assistant", "content": output})
27
+ response_start = output.rfind('<|assistant|>')
28
+ return output[response_start + len('<|assistant|>'):]
29
+
30
+
31
+ demo = gr.ChatInterface(random_response)
32
+
33
+ demo.launch()