merve HF staff commited on
Commit
e5327ee
β€’
1 Parent(s): 638abae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -2
app.py CHANGED
@@ -13,6 +13,14 @@ import re
13
 
14
  DESCRIPTION = "# LLaVA πŸŒ‹"
15
 
 
 
 
 
 
 
 
 
16
  def extract_response_pairs(text):
17
  pattern = re.compile(r'(USER:.*?)ASSISTANT:(.*?)(?:$|USER:)', re.DOTALL)
18
  matches = pattern.findall(text)
@@ -61,11 +69,15 @@ css = """
61
  """
62
  with gr.Blocks(css="style.css") as demo:
63
  gr.Markdown(DESCRIPTION)
64
- gr.Markdown("## LLaVA, one of the greatest multimodal chat models is now available in transformers with 4-bit quantization!")
 
 
65
  chatbot = gr.Chatbot(label="Chat", show_label=False)
 
66
  with gr.Row():
 
67
  image = gr.Image(type="pil")
68
- text_input = gr.Text(label="Chat Input", show_label=False, max_lines=1, container=False)
69
 
70
 
71
 
 
13
 
14
  DESCRIPTION = "# LLaVA πŸŒ‹"
15
 
16
+ model_id = "llava-hf/llava-1.5-7b-hf"
17
+ quantization_config = BitsAndBytesConfig(
18
+ load_in_4bit=True,
19
+ bnb_4bit_compute_dtype=torch.float16
20
+ )
21
+ pipe = pipeline("image-to-text", model=model_id, model_kwargs={"quantization_config": quantization_config})
22
+
23
+
24
  def extract_response_pairs(text):
25
  pattern = re.compile(r'(USER:.*?)ASSISTANT:(.*?)(?:$|USER:)', re.DOTALL)
26
  matches = pattern.findall(text)
 
69
  """
70
  with gr.Blocks(css="style.css") as demo:
71
  gr.Markdown(DESCRIPTION)
72
+ gr.Markdown("**LLaVA, one of the greatest multimodal chat models is now available in transformers with 4-bit quantization! ⚑️ **")
73
+ gr.Markdown("**Try it in this demo πŸ€— **")
74
+
75
  chatbot = gr.Chatbot(label="Chat", show_label=False)
76
+ gr.Markdown("Input image and text and start chatting πŸ‘‡")
77
  with gr.Row():
78
+
79
  image = gr.Image(type="pil")
80
+ text_input = gr.Text(label="Chat Input", show_label=False, max_lines=3, container=False)
81
 
82
 
83