Kendamarron commited on
Commit
65aba35
·
verified ·
1 Parent(s): 817d69b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -7,7 +7,7 @@ import gradio as gr
7
  from gradio import FileData
8
  import time
9
  import spaces
10
- ckpt = "Kendamarron/Llama-3.2-11B-Vision-Instruct-Swallow-8B-Merge"
11
  model = MllamaForConditionalGeneration.from_pretrained(ckpt,
12
  torch_dtype=torch.bfloat16).to("cuda")
13
  processor = AutoProcessor.from_pretrained(ckpt)
@@ -92,7 +92,7 @@ demo = gr.ChatInterface(fn=bot_streaming, title="Multimodal Llama", examples=[
92
  )
93
  ],
94
  cache_examples=False,
95
- description="[Kendamarron/Llama-3.2-11B-Vision-Instruct-Swallow-8B-Merge](https://huggingface.co/Kendamarron/Llama-3.2-11B-Vision-Instruct-Swallow-8B-Merge)のデモ",
96
  stop_btn="Stop Generation",
97
  fill_height=True,
98
  multimodal=True)
 
7
  from gradio import FileData
8
  import time
9
  import spaces
10
+ ckpt = "Kendamarron/Llama-3.2-11B-Vision-Instruct-Swallow-8B-LoRA"
11
  model = MllamaForConditionalGeneration.from_pretrained(ckpt,
12
  torch_dtype=torch.bfloat16).to("cuda")
13
  processor = AutoProcessor.from_pretrained(ckpt)
 
92
  )
93
  ],
94
  cache_examples=False,
95
+ description="[Kendamarron/Llama-3.2-11B-Vision-Instruct-Swallow-8B-LoRA](https://huggingface.co/Kendamarron/Llama-3.2-11B-Vision-Instruct-Swallow-8B-LoRA)のデモ",
96
  stop_btn="Stop Generation",
97
  fill_height=True,
98
  multimodal=True)