Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -264,7 +264,7 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
264
  with gr.Column():
265
  with gr.Column(elem_classes="canvas-output"):
266
  gr.Markdown("## Output")
267
- output = gr.Textbox(label="Raw Output Stream", interactive=False, lines=2)
268
  with gr.Accordion("(Result.md)", open=False):
269
  markdown_output = gr.Markdown(label="(Result.md)")
270
  model_choice = gr.Radio(
@@ -275,7 +275,9 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
275
  gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Multimodal-VLM-v1.0/discussions)")
276
  gr.Markdown("> [Camel-Doc-OCR-062825](https://huggingface.co/prithivMLmods/Camel-Doc-OCR-062825), [GLM-4.1V-9B-Thinking](https://huggingface.co/zai-org/GLM-4.1V-9B-Thinking), [Megalodon-OCR-Sync-0713](https://huggingface.co/prithivMLmods/Megalodon-OCR-Sync-0713), and [ViLaSR-7B](https://huggingface.co/inclusionAI/ViLaSR) are recent vision-language models excelling in document intelligence and multimodal understanding. Camel-Doc-OCR-062825 is a Qwen2.5-VL-7B-Instruct finetune, highly optimized for document retrieval, structured extraction, analysis, and direct Markdown generation from images and PDFs. GLM-4.1V-9B-Thinking offers next-level multimodal reasoning, bringing visual and textual comprehension together for advanced question answering.")
277
  gr.Markdown("> Megalodon-OCR-Sync-0713, finetuned from Qwen2.5-VL-3B-Instruct, specializes in context-aware multimodal document extraction and analysis, excelling at retrieval, layout parsing, math, and chart/table recognition, with robust video and long-form comprehension capabilities. ViLaSR-7B focuses on reinforcing spatial reasoning in visual-language tasks by combining interwoven thinking with visual drawing, making it especially suited for spatial reasoning and complex tip-based queries.")
278
-
 
 
279
  # Define the submit button actions
280
  image_submit.click(fn=generate_image,
281
  inputs=[
 
264
  with gr.Column():
265
  with gr.Column(elem_classes="canvas-output"):
266
  gr.Markdown("## Output")
267
+ output = gr.Textbox(label="Raw Output Stream", interactive=False, lines=3)
268
  with gr.Accordion("(Result.md)", open=False):
269
  markdown_output = gr.Markdown(label="(Result.md)")
270
  model_choice = gr.Radio(
 
275
  gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Multimodal-VLM-v1.0/discussions)")
276
  gr.Markdown("> [Camel-Doc-OCR-062825](https://huggingface.co/prithivMLmods/Camel-Doc-OCR-062825), [GLM-4.1V-9B-Thinking](https://huggingface.co/zai-org/GLM-4.1V-9B-Thinking), [Megalodon-OCR-Sync-0713](https://huggingface.co/prithivMLmods/Megalodon-OCR-Sync-0713), and [ViLaSR-7B](https://huggingface.co/inclusionAI/ViLaSR) are recent vision-language models excelling in document intelligence and multimodal understanding. Camel-Doc-OCR-062825 is a Qwen2.5-VL-7B-Instruct finetune, highly optimized for document retrieval, structured extraction, analysis, and direct Markdown generation from images and PDFs. GLM-4.1V-9B-Thinking offers next-level multimodal reasoning, bringing visual and textual comprehension together for advanced question answering.")
277
  gr.Markdown("> Megalodon-OCR-Sync-0713, finetuned from Qwen2.5-VL-3B-Instruct, specializes in context-aware multimodal document extraction and analysis, excelling at retrieval, layout parsing, math, and chart/table recognition, with robust video and long-form comprehension capabilities. ViLaSR-7B focuses on reinforcing spatial reasoning in visual-language tasks by combining interwoven thinking with visual drawing, making it especially suited for spatial reasoning and complex tip-based queries.")
278
+ gr.Markdown("> ✋ ViLaSR-7B - demo only supports text-only reasoning, which doesn't reflect the full behavior of the model and may underrepresent its capabilities.")
279
+
280
+ gr.Markdown("> ⚠️ Note: Models in this space may not perform well on video inference tasks.")
281
  # Define the submit button actions
282
  image_submit.click(fn=generate_image,
283
  inputs=[