ahmedmbutt commited on
Commit
5e422b6
1 Parent(s): 2dda520

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -1,12 +1,12 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceApi
3
  import requests
4
  from PIL import Image
5
  import io
6
 
7
- # Initialize the Hugging Face Inference API
8
  model_id = "Salesforce/blip-image-captioning-large"
9
- api = InferenceApi(repo_id=model_id)
10
 
11
  def caption_image(image):
12
  # Convert the PIL image to bytes
@@ -15,7 +15,7 @@ def caption_image(image):
15
  img_bytes = buffered.getvalue()
16
 
17
  # Call the Hugging Face inference API
18
- response = api(inputs=img_bytes)
19
 
20
  # Check the response and format it properly
21
  if isinstance(response, list) and response:
@@ -26,7 +26,7 @@ def caption_image(image):
26
  # Set up the Gradio interface
27
  interface = gr.Interface(
28
  fn=caption_image,
29
- inputs=gr.inputs.Image(type="pil"),
30
  outputs="text",
31
  title="Image Captioning",
32
  description="Generate captions for images using the BLIP model."
 
1
  import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
  import requests
4
  from PIL import Image
5
  import io
6
 
7
+ # Initialize the Hugging Face Inference Client
8
  model_id = "Salesforce/blip-image-captioning-large"
9
+ client = InferenceClient(model=model_id)
10
 
11
  def caption_image(image):
12
  # Convert the PIL image to bytes
 
15
  img_bytes = buffered.getvalue()
16
 
17
  # Call the Hugging Face inference API
18
+ response = client.image_to_text(inputs=img_bytes)
19
 
20
  # Check the response and format it properly
21
  if isinstance(response, list) and response:
 
26
  # Set up the Gradio interface
27
  interface = gr.Interface(
28
  fn=caption_image,
29
+ inputs=gr.Image(type="pil"),
30
  outputs="text",
31
  title="Image Captioning",
32
  description="Generate captions for images using the BLIP model."