File size: 1,688 Bytes
52bf25d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
from transformers import SegformerFeatureExtractor, SegformerForSemanticSegmentation
from datasets import load_dataset
from transformers import pipeline
from PIL import Image, ImageDraw, ImageFont
import gradio as gr

ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")

def object_classify(img1,img2):

  feature_extractor = SegformerFeatureExtractor.from_pretrained("nvidia/segformer-b2-finetuned-ade-512-512")
  model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b2-finetuned-ade-512-512")
  
  object_detector = pipeline(task="image-segmentation", model = model, feature_extractor = feature_extractor)
  
  #list of dictionaries
  dict_obj1 = object_detector(img1) 
  dict_obj2 = object_detector(img2)
  
  #list of object labels present in the image
  objects_1=[]
  objects_2=[]
  
  #gets the label from each dictionary
  for i in dict_obj1:
    objects_1.append(i['label'])
  
  for j in dict_obj2:
    objects_2.append(j['label'])
   
  #gets the uncommon elements from the 2 lists
  missing_objects=list(set(objects_1)-set(objects_2))
  
  return missing_objects
 
  
TITLE = 'Missing Items'
DESCRIPTION = 'Input two indoor pictures. First image being the original and second is one with the missing item/s'
EXAMPLES = [['Bedroom_1.jpg'],['Bedroom_2.jpg']]

INPUTS=[gr.inputs.Image(type = 'pil'),gr.inputs.Image(type = 'pil')]
OUTPUTS=gr.outputs.Textbox()

interface=gr.Interface(object_classify,
                       INPUTS,
                       OUTPUTS,
                       examples = EXAMPLES,
                       title = TITLE, 
                       description=DESCRIPTION, allow_flagging="never")


interface.launch()