Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -26,7 +26,7 @@ def generate(prompt, chat_history):
|
|
26 |
final_prompt += "User: " + prompt + "\n"
|
27 |
final_prompt += "Output:"
|
28 |
|
29 |
-
generated_text = phi2(final_prompt, max_new_tokens=
|
30 |
response = generated_text.split("Output:")[1].split("User:")[0]
|
31 |
|
32 |
if "Assistant:" in response:
|
@@ -38,7 +38,11 @@ def generate(prompt, chat_history):
|
|
38 |
|
39 |
# Chat interface with gradio
|
40 |
with gr.Blocks() as demo:
|
41 |
-
gr.Markdown("
|
|
|
|
|
|
|
|
|
42 |
|
43 |
chatbot = gr.Chatbot()
|
44 |
msg = gr.Textbox()
|
|
|
26 |
final_prompt += "User: " + prompt + "\n"
|
27 |
final_prompt += "Output:"
|
28 |
|
29 |
+
generated_text = phi2(final_prompt, max_new_tokens=32)[0]["generated_text"]
|
30 |
response = generated_text.split("Output:")[1].split("User:")[0]
|
31 |
|
32 |
if "Assistant:" in response:
|
|
|
38 |
|
39 |
# Chat interface with gradio
|
40 |
with gr.Blocks() as demo:
|
41 |
+
gr.Markdown("""
|
42 |
+
# Phi-2 Chatbot Demo
|
43 |
+
|
44 |
+
This chatbot was created using Microsoft's [phi-2](https://huggingface.co/microsoft/phi-2) model. To speed up inference, `max_new_tokens` has been set to `32` in the text generation pipeline. It might take up to 120 seconds for each response to be generated.
|
45 |
+
""")
|
46 |
|
47 |
chatbot = gr.Chatbot()
|
48 |
msg = gr.Textbox()
|