coco-gelamay commited on
Commit
52bf25d
1 Parent(s): bc8f787

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -0
app.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import SegformerFeatureExtractor, SegformerForSemanticSegmentation
2
+ from datasets import load_dataset
3
+ from transformers import pipeline
4
+ from PIL import Image, ImageDraw, ImageFont
5
+ import gradio as gr
6
+
7
+ ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
8
+
9
+ def object_classify(img1,img2):
10
+
11
+ feature_extractor = SegformerFeatureExtractor.from_pretrained("nvidia/segformer-b2-finetuned-ade-512-512")
12
+ model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b2-finetuned-ade-512-512")
13
+
14
+ object_detector = pipeline(task="image-segmentation", model = model, feature_extractor = feature_extractor)
15
+
16
+ #list of dictionaries
17
+ dict_obj1 = object_detector(img1)
18
+ dict_obj2 = object_detector(img2)
19
+
20
+ #list of object labels present in the image
21
+ objects_1=[]
22
+ objects_2=[]
23
+
24
+ #gets the label from each dictionary
25
+ for i in dict_obj1:
26
+ objects_1.append(i['label'])
27
+
28
+ for j in dict_obj2:
29
+ objects_2.append(j['label'])
30
+
31
+ #gets the uncommon elements from the 2 lists
32
+ missing_objects=list(set(objects_1)-set(objects_2))
33
+
34
+ return missing_objects
35
+
36
+
37
+ TITLE = 'Missing Items'
38
+ DESCRIPTION = 'Input two indoor pictures. First image being the original and second is one with the missing item/s'
39
+ EXAMPLES = [['Bedroom_1.jpg'],['Bedroom_2.jpg']]
40
+
41
+ INPUTS=[gr.inputs.Image(type = 'pil'),gr.inputs.Image(type = 'pil')]
42
+ OUTPUTS=gr.outputs.Textbox()
43
+
44
+ interface=gr.Interface(object_classify,
45
+ INPUTS,
46
+ OUTPUTS,
47
+ examples = EXAMPLES,
48
+ title = TITLE,
49
+ description=DESCRIPTION, allow_flagging="never")
50
+
51
+
52
+ interface.launch()