Spaces:
Running
Running
Mediocreatmybest
commited on
Commit
•
1bfc1de
1
Parent(s):
79d1874
Upload 2 files
Browse files- app.py +59 -0
- requirements.txt +8 -0
app.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import pipeline, AutoTokenizer
|
3 |
+
|
4 |
+
def load_model(model_name):
|
5 |
+
return pipeline("text-generation", model=model_name, device="cpu")
|
6 |
+
|
7 |
+
def generate(
|
8 |
+
model_name,
|
9 |
+
template_name,
|
10 |
+
user_input,
|
11 |
+
temperature=0.4,
|
12 |
+
top_p=0.95,
|
13 |
+
top_k=50,
|
14 |
+
max_new_tokens=256,
|
15 |
+
):
|
16 |
+
pipe = load_model(model_name)
|
17 |
+
# Need to add additional options later.
|
18 |
+
if template_name == "Falcon 1B Template":
|
19 |
+
message_template = [
|
20 |
+
{"role": "user", "content": "Hello!"},
|
21 |
+
{"role": "assistant", "content": "Hello! How can I assist you today?"},
|
22 |
+
{"role": "user", "content": user_input},
|
23 |
+
]
|
24 |
+
else: # Default to "TinyLlama Template"
|
25 |
+
message_template = [
|
26 |
+
{
|
27 |
+
"role": "system",
|
28 |
+
"content": "You are a highly knowledgeable and friendly chatbot equipped with extensive information across various domains. Your goal is to understand and respond to user inquiries with accuracy and clarity. You're adept at providing detailed explanations, concise summaries, and insightful responses. Your interactions are always respectful, helpful, and focused on delivering the most relevant information to the user.",
|
29 |
+
},
|
30 |
+
{"role": "user", "content": user_input},
|
31 |
+
]
|
32 |
+
|
33 |
+
# Set tokenize correctly. Otherwise ticking the box breaks it.
|
34 |
+
prompt = pipe.tokenizer.apply_chat_template(message_template, tokenize=False, add_generation_prompt=True)
|
35 |
+
outputs = pipe(prompt, max_new_tokens=max_new_tokens, do_sample=True,
|
36 |
+
temperature=temperature, top_k=top_k, top_p=top_p, repetition_penalty=1.10)
|
37 |
+
return outputs[0]["generated_text"]
|
38 |
+
|
39 |
+
model_choices = ["TinyLlama/TinyLlama-1.1B-Chat-v1.0", "ericzzz/falcon-rw-1b-chat"]
|
40 |
+
template_choices = ["TinyLlama Template", "Falcon Template"]
|
41 |
+
# What at the best options?
|
42 |
+
g = gr.Interface(
|
43 |
+
fn=generate,
|
44 |
+
inputs=[
|
45 |
+
gr.components.Dropdown(choices=model_choices, label="Model", value=model_choices[0], interactive=True),
|
46 |
+
gr.components.Dropdown(choices=template_choices, label="Template", value=template_choices[0], interactive=True),
|
47 |
+
gr.components.Textbox(lines=2, label="Prompt", value="How many planets are in our solar system?"),
|
48 |
+
gr.components.Slider(minimum=0, maximum=1, value=0.4, label="Temperature"),
|
49 |
+
gr.components.Slider(minimum=0, maximum=1, value=0.95, label="Top p"),
|
50 |
+
gr.components.Slider(minimum=0, maximum=100, step=1, value=50, label="Top k"),
|
51 |
+
gr.components.Slider(minimum=1, maximum=1024, step=1, value=256, label="Max tokens"),
|
52 |
+
],
|
53 |
+
outputs=[gr.Textbox(lines=10, label="Output")],
|
54 |
+
title="Hugging Face Transformers Model",
|
55 |
+
description="A simple interface for generating text with a Hugging Face Transformers model.",
|
56 |
+
concurrency_limit=1
|
57 |
+
)
|
58 |
+
|
59 |
+
g.launch(max_threads=2)
|
requirements.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
huggingface_hub
|
2 |
+
transformers
|
3 |
+
torch
|
4 |
+
scipy
|
5 |
+
accelerate
|
6 |
+
uvicorn
|
7 |
+
fastapi
|
8 |
+
httpx
|