Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -53,11 +53,7 @@ def predict(image, text):
|
|
53 |
|
54 |
# Decode the output to return the final response
|
55 |
response = processor.decode(outputs[0], skip_special_tokens=True)
|
56 |
-
|
57 |
-
# Format the conversation for a better appearance without repetition
|
58 |
-
formatted_response = f"User: {text}\n\nAssistant: {response}"
|
59 |
-
|
60 |
-
return formatted_response
|
61 |
|
62 |
# Define the Gradio interface
|
63 |
interface = gr.Interface(
|
@@ -66,7 +62,7 @@ interface = gr.Interface(
|
|
66 |
gr.Image(type="pil", label="Image Input"), # Image input with label
|
67 |
gr.Textbox(label="Text Input") # Textbox input with label
|
68 |
],
|
69 |
-
outputs=gr.Textbox(label="
|
70 |
title="Llama 3.2 11B Vision Instruct Demo", # Title of the interface
|
71 |
description="This demo uses Meta's Llama 3.2 11B Vision model to generate responses based on an image and text input.", # Short description
|
72 |
theme="compact" # Using a compact theme for a cleaner look
|
|
|
53 |
|
54 |
# Decode the output to return the final response
|
55 |
response = processor.decode(outputs[0], skip_special_tokens=True)
|
56 |
+
return response
|
|
|
|
|
|
|
|
|
57 |
|
58 |
# Define the Gradio interface
|
59 |
interface = gr.Interface(
|
|
|
62 |
gr.Image(type="pil", label="Image Input"), # Image input with label
|
63 |
gr.Textbox(label="Text Input") # Textbox input with label
|
64 |
],
|
65 |
+
outputs=gr.Textbox(label="Generated Response"), # Output with a more descriptive label
|
66 |
title="Llama 3.2 11B Vision Instruct Demo", # Title of the interface
|
67 |
description="This demo uses Meta's Llama 3.2 11B Vision model to generate responses based on an image and text input.", # Short description
|
68 |
theme="compact" # Using a compact theme for a cleaner look
|