merve HF staff commited on
Commit
959aec5
1 Parent(s): b926faa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -0
app.py CHANGED
@@ -81,6 +81,7 @@ def model_inference(
81
  with gr.Blocks(fill_height=True) as demo:
82
  gr.Markdown("## IDEFICS3-Llama 🐶")
83
  gr.Markdown("Play with [HuggingFaceM4/Idefics3-8B-Llama3](https://huggingface.co/HuggingFaceM4/Idefics3-8B-Llama3) in this demo. To get started, upload an image and text or try one of the examples.")
 
84
  with gr.Column():
85
  image_input = gr.Image(label="Upload your Image", type="pil")
86
  query_input = gr.Textbox(label="Prompt")
 
81
  with gr.Blocks(fill_height=True) as demo:
82
  gr.Markdown("## IDEFICS3-Llama 🐶")
83
  gr.Markdown("Play with [HuggingFaceM4/Idefics3-8B-Llama3](https://huggingface.co/HuggingFaceM4/Idefics3-8B-Llama3) in this demo. To get started, upload an image and text or try one of the examples.")
84
+ gr.Markdown("**Disclaimer:** Idefics3 does not include an RLHF alignment stage, so it may not consistently follow prompts or handle complex tasks. However, this doesn't mean it is incapable of doing so. Adding a prefix to the assistant's response, such as Let's think step for a reasoning question or <html> for HTML code generation, can significantly improve the output in practice. You could also play with the parameters such as the temperature in non-greedy mode.")
85
  with gr.Column():
86
  image_input = gr.Image(label="Upload your Image", type="pil")
87
  query_input = gr.Textbox(label="Prompt")