sflindrs commited on
Commit
981c158
·
verified ·
1 Parent(s): a8606ee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -7,7 +7,7 @@ import gradio as gr
7
  from gradio import FileData
8
  import time
9
  import spaces
10
- ckpt = "meta-llama/Llama-3.2-11B-Vision-Instruct"
11
  model = MllamaForConditionalGeneration.from_pretrained(ckpt,
12
  torch_dtype=torch.bfloat16).to("cuda")
13
  processor = AutoProcessor.from_pretrained(ckpt)
@@ -92,7 +92,7 @@ demo = gr.ChatInterface(fn=bot_streaming, title="Multimodal Llama", examples=[
92
  )
93
  ],
94
  cache_examples=False,
95
- description="Try Multimodal Llama by Meta with transformers in this demo. Upload an image, and start chatting about it, or simply try one of the examples below. To learn more about Llama Vision, visit [our blog post](https://huggingface.co/blog/llama32). ",
96
  stop_btn="Stop Generation",
97
  fill_height=True,
98
  multimodal=True)
 
7
  from gradio import FileData
8
  import time
9
  import spaces
10
+ ckpt = "mlx-community/Llama-3.2-11B-Vision-Instruct-abliterated"
11
  model = MllamaForConditionalGeneration.from_pretrained(ckpt,
12
  torch_dtype=torch.bfloat16).to("cuda")
13
  processor = AutoProcessor.from_pretrained(ckpt)
 
92
  )
93
  ],
94
  cache_examples=False,
95
+ description="Try Multimodal Llama (Abliterated) by Mlx with transformers in this demo. Upload an image, and start chatting about it, or simply try one of the examples below. To learn more about Llama Vision, visit [our blog post](https://huggingface.co/blog/llama32). ",
96
  stop_btn="Stop Generation",
97
  fill_height=True,
98
  multimodal=True)