muneebashraf commited on
Commit
bac3301
·
1 Parent(s): 524f1cf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -10
app.py CHANGED
@@ -1,16 +1,25 @@
1
  import gradio as gr
2
- from transformers import pipeline, BertTokenizer, BertModel
3
 
4
- tokenizer = BertTokenizer.from_pretrained('bert-large-uncased')
5
- model = BertModel.from_pretrained("bert-large-uncased")
6
- unmasker = pipeline('fill-mask', model=model, tokenizer=tokenizer)
 
7
 
8
- def fill_mask(sentence):
9
- results = unmasker(sentence)
10
- return [result['sequence'] for result in results]
11
 
12
- inputs = gr.inputs.Image(label="Upload an image")
13
- outputs = gr.outputs.Textbox(label="Predicted sequences")
 
 
 
 
14
 
15
- gr.Interface(fn=fill_mask, inputs=inputs, outputs=outputs).launch()
 
 
16
 
 
 
 
1
  import gradio as gr
2
+ from transformers import BlipProcessor, BlipForConditionalGeneration
3
 
4
+ # Load the model and tokenizer
5
+ model_name = "Salesforce/blip-image-captioning-large"
6
+ processor = BlipProcessor.from_pretrained(model_name)
7
+ model = BlipForConditionalGeneration.from_pretrained(model_name)
8
 
9
+ def generate_caption(image):
10
+ # Preprocess the image
11
+ inputs = processor(images=image, return_tensors="pt")
12
 
13
+ # Generate caption using the model
14
+ caption = model.generate(**inputs)
15
+
16
+ # Decode the output caption
17
+ decoded_caption = processor.decode(caption[0], skip_special_tokens=True)
18
+ return decoded_caption
19
 
20
+ # Define the Gradio interface
21
+ inputs = gr.inputs.Image(label="Upload an image")
22
+ outputs = gr.outputs.Textbox(label="Generated Caption")
23
 
24
+ # Create the Gradio app
25
+ gr.Interface(fn=generate_caption, inputs=inputs, outputs=outputs).launch()