Add assistant prefix and examples
Browse files
app.py
CHANGED
@@ -21,7 +21,7 @@ EOS_WORDS_IDS = [processor.tokenizer.eos_token_id]
|
|
21 |
|
22 |
@spaces.GPU
|
23 |
def model_inference(
|
24 |
-
images, text, decoding_strategy, temperature, max_new_tokens,
|
25 |
repetition_penalty, top_p
|
26 |
):
|
27 |
if text == "" and not images:
|
@@ -43,6 +43,9 @@ def model_inference(
|
|
43 |
}
|
44 |
]
|
45 |
|
|
|
|
|
|
|
46 |
|
47 |
prompt = processor.apply_chat_template(resulting_messages, add_generation_prompt=True)
|
48 |
inputs = processor(text=prompt, images=[images], return_tensors="pt")
|
@@ -81,14 +84,18 @@ with gr.Blocks(fill_height=True) as demo:
|
|
81 |
with gr.Column():
|
82 |
image_input = gr.Image(label="Upload your Image", type="pil")
|
83 |
query_input = gr.Textbox(label="Prompt")
|
|
|
|
|
84 |
submit_btn = gr.Button("Submit")
|
85 |
output = gr.Textbox(label="Output")
|
86 |
|
87 |
with gr.Accordion(label="Example Inputs and Advanced Generation Parameters"):
|
88 |
-
examples=[
|
89 |
-
["example_images/
|
90 |
-
["example_images/
|
91 |
-
["example_images/
|
|
|
|
|
92 |
|
93 |
# Hyper-parameters for generation
|
94 |
max_new_tokens = gr.Slider(
|
@@ -162,13 +169,13 @@ with gr.Blocks(fill_height=True) as demo:
|
|
162 |
)
|
163 |
gr.Examples(
|
164 |
examples = examples,
|
165 |
-
inputs=[image_input, query_input, decoding_strategy, temperature,
|
166 |
max_new_tokens, repetition_penalty, top_p],
|
167 |
outputs=output,
|
168 |
fn=model_inference
|
169 |
)
|
170 |
|
171 |
-
submit_btn.click(model_inference, inputs = [image_input, query_input, decoding_strategy, temperature,
|
172 |
max_new_tokens, repetition_penalty, top_p], outputs=output)
|
173 |
|
174 |
|
|
|
21 |
|
22 |
@spaces.GPU
|
23 |
def model_inference(
|
24 |
+
images, text, assistant_prefix, decoding_strategy, temperature, max_new_tokens,
|
25 |
repetition_penalty, top_p
|
26 |
):
|
27 |
if text == "" and not images:
|
|
|
43 |
}
|
44 |
]
|
45 |
|
46 |
+
if assistant_prefix:
|
47 |
+
text = f"{assistant_prefix} {text}"
|
48 |
+
|
49 |
|
50 |
prompt = processor.apply_chat_template(resulting_messages, add_generation_prompt=True)
|
51 |
inputs = processor(text=prompt, images=[images], return_tensors="pt")
|
|
|
84 |
with gr.Column():
|
85 |
image_input = gr.Image(label="Upload your Image", type="pil")
|
86 |
query_input = gr.Textbox(label="Prompt")
|
87 |
+
assistant_prefix = gr.Textbox(label="Assistant Prefix", placeholder="Let's think step by step.")
|
88 |
+
|
89 |
submit_btn = gr.Button("Submit")
|
90 |
output = gr.Textbox(label="Output")
|
91 |
|
92 |
with gr.Accordion(label="Example Inputs and Advanced Generation Parameters"):
|
93 |
+
examples=[
|
94 |
+
["example_images/mmmu_example.jpeg", "Let's think step by step.", "Chase wants to buy 4 kilograms of oval beads and 5 kilograms of star-shaped beads. How much will he spend?", "Greedy", 0.4, 512, 1.2, 0.8],
|
95 |
+
["example_images/travel_tips.jpg", None, "I want to go somewhere similar to the one in the photo. Give me destinations and travel tips.", "Greedy", 0.4, 512, 1.2, 0.8],
|
96 |
+
["example_images/dummy_pdf.png", None, "How much percent is the order status?", "Greedy", 0.4, 512, 1.2, 0.8],
|
97 |
+
["example_images/art_critic.png", None, "As an art critic AI assistant, could you describe this painting in details and make a thorough critic?.", "Greedy", 0.4, 512, 1.2, 0.8],
|
98 |
+
["example_images/s2w_example.png", None, "What is this UI about?", "Greedy", 0.4, 512, 1.2, 0.8]]
|
99 |
|
100 |
# Hyper-parameters for generation
|
101 |
max_new_tokens = gr.Slider(
|
|
|
169 |
)
|
170 |
gr.Examples(
|
171 |
examples = examples,
|
172 |
+
inputs=[image_input, query_input, assistant_prefix, decoding_strategy, temperature,
|
173 |
max_new_tokens, repetition_penalty, top_p],
|
174 |
outputs=output,
|
175 |
fn=model_inference
|
176 |
)
|
177 |
|
178 |
+
submit_btn.click(model_inference, inputs = [image_input, query_input, assistant_prefix, decoding_strategy, temperature,
|
179 |
max_new_tokens, repetition_penalty, top_p], outputs=output)
|
180 |
|
181 |
|