Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -20,6 +20,8 @@ def generate(
|
|
20 |
"role": "system",
|
21 |
"content": system_input,
|
22 |
},
|
|
|
|
|
23 |
{"role": "user", "content": user_input},
|
24 |
]
|
25 |
prompt = pipe.tokenizer.apply_chat_template(message_template, tokenize=False, add_generation_prompt=True)
|
@@ -27,14 +29,14 @@ def generate(
|
|
27 |
temperature=temperature, top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty)
|
28 |
return outputs[0]["generated_text"]
|
29 |
|
30 |
-
model_choices = ["Felladrin/
|
31 |
|
32 |
g = gr.Interface(
|
33 |
fn=generate,
|
34 |
inputs=[
|
35 |
gr.components.Dropdown(choices=model_choices, label="Model", value=model_choices[0], interactive=True),
|
36 |
-
gr.components.Textbox(lines=2, label="System Message", value="You are a highly knowledgeable and friendly
|
37 |
-
gr.components.Textbox(lines=2, label="User Message", value="
|
38 |
gr.components.Slider(minimum=0, maximum=1, value=0.4, label="Temperature"),
|
39 |
gr.components.Slider(minimum=0, maximum=1, value=0.25, label="Top p"),
|
40 |
gr.components.Slider(minimum=0, maximum=100, step=1, value=7, label="Top k"),
|
@@ -42,7 +44,7 @@ g = gr.Interface(
|
|
42 |
gr.components.Slider(minimum=1, maximum=1024, step=1, value=256, label="Max tokens"),
|
43 |
],
|
44 |
outputs=[gr.Textbox(lines=10, label="Output")],
|
45 |
-
title="A
|
46 |
concurrency_limit=1
|
47 |
)
|
48 |
|
|
|
20 |
"role": "system",
|
21 |
"content": system_input,
|
22 |
},
|
23 |
+
{"role": "user", "content": "Hello there!"},
|
24 |
+
{"role": "assistant", "content": "Hi!"},
|
25 |
{"role": "user", "content": user_input},
|
26 |
]
|
27 |
prompt = pipe.tokenizer.apply_chat_template(message_template, tokenize=False, add_generation_prompt=True)
|
|
|
29 |
temperature=temperature, top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty)
|
30 |
return outputs[0]["generated_text"]
|
31 |
|
32 |
+
model_choices = ["Felladrin/Llama-160M-Chat-v1", "Felladrin/Smol-Llama-101M-Chat-v1", "Felladrin/TinyMistral-248M-SFT-v4", "Felladrin/Pythia-31M-Chat-v1"]
|
33 |
|
34 |
g = gr.Interface(
|
35 |
fn=generate,
|
36 |
inputs=[
|
37 |
gr.components.Dropdown(choices=model_choices, label="Model", value=model_choices[0], interactive=True),
|
38 |
+
gr.components.Textbox(lines=2, label="System Message", value="You are a highly knowledgeable and friendly assistant. Your goal is to understand and respond to user inquiries with accuracy and clarity. You're adept at providing detailed explanations, concise summaries, and insightful responses. Your interactions are always respectful, helpful, and focused on delivering the most relevant information to the user."),
|
39 |
+
gr.components.Textbox(lines=2, label="User Message", value="Tell me something curious about the Earth!"),
|
40 |
gr.components.Slider(minimum=0, maximum=1, value=0.4, label="Temperature"),
|
41 |
gr.components.Slider(minimum=0, maximum=1, value=0.25, label="Top p"),
|
42 |
gr.components.Slider(minimum=0, maximum=100, step=1, value=7, label="Top k"),
|
|
|
44 |
gr.components.Slider(minimum=1, maximum=1024, step=1, value=256, label="Max tokens"),
|
45 |
],
|
46 |
outputs=[gr.Textbox(lines=10, label="Output")],
|
47 |
+
title="A place to try out text-generation models fine-tuned by Felladrin",
|
48 |
concurrency_limit=1
|
49 |
)
|
50 |
|