hubsnippetai commited on
Commit
77ef807
1 Parent(s): 177d2ff

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -0
app.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+
3
+ from PIL import Image
4
+ from transformers import AutoProcessor, AutoModelForVision2Seq
5
+
6
+
7
+ model = AutoModelForVision2Seq.from_pretrained("microsoft/kosmos-2-patch14-224")
8
+ processor = AutoProcessor.from_pretrained("microsoft/kosmos-2-patch14-224")
9
+
10
+ # The original Kosmos-2 demo saves the image first then reload it. For some images, this will give slightly different image input and change the generation outputs.
11
+
12
+ prompt = "An image of"
13
+
14
+ def describe_image(image_path):
15
+ inputs = processor(text=prompt, images=image_path, return_tensors="pt")
16
+
17
+ generated_ids = model.generate(
18
+ pixel_values=inputs["pixel_values"],
19
+ input_ids=inputs["input_ids"],
20
+ attention_mask=inputs["attention_mask"],
21
+ image_embeds=None,
22
+ image_embeds_position_mask=inputs["image_embeds_position_mask"],
23
+ use_cache=True,
24
+ max_new_tokens=128,
25
+ )
26
+ generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
27
+
28
+ # Specify `cleanup_and_extract=False` in order to see the raw model generation.
29
+ processed_text = processor.post_process_generation(generated_text, cleanup_and_extract=False)
30
+
31
+ processed_text, entities = processor.post_process_generation(generated_text)
32
+
33
+ return processed_text
34
+
35
+ import gradio as gr
36
+
37
+ gr_app = gr.Interface(fn=describe_image, inputs=[gr.Image(label="Upload an image for description", type='pil')],
38
+ outputs=[gr.Textbox(label="Image description")], title="App for image description")
39
+
40
+ if __name__ == "__main__":
41
+ gr_app.launch(show_error = True)