aliabd HF staff commited on
Commit
f355134
·
1 Parent(s): 5998e4e

Upload with huggingface_hub

Browse files
Files changed (5) hide show
  1. DESCRIPTION.md +1 -0
  2. README.md +6 -7
  3. example_2.png +0 -0
  4. requirements.txt +4 -0
  5. run.py +42 -0
DESCRIPTION.md ADDED
@@ -0,0 +1 @@
 
 
1
+ Image segmentation using DETR. Takes in both an inputu image and the desired confidence, and returns a segmented image.
README.md CHANGED
@@ -1,12 +1,11 @@
 
1
  ---
2
- title: Image Segmentation Main
3
- emoji: 💻
4
- colorFrom: pink
5
- colorTo: pink
6
  sdk: gradio
7
  sdk_version: 3.6
8
- app_file: app.py
9
  pinned: false
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+
2
  ---
3
+ title: image_segmentation_main
4
+ emoji: 🔥
5
+ colorFrom: indigo
6
+ colorTo: indigo
7
  sdk: gradio
8
  sdk_version: 3.6
9
+ app_file: run.py
10
  pinned: false
11
  ---
 
 
example_2.png ADDED
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ transformers
2
+ torch
3
+ scipy
4
+ numpyhttps://gradio-main-build.s3.amazonaws.com/c3bec6153737855510542e8154391f328ac72606/gradio-3.6-py3-none-any.whl
run.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import random
4
+ import numpy as np
5
+ from transformers import MaskFormerFeatureExtractor, MaskFormerForInstanceSegmentation
6
+
7
+ device = torch.device("cpu")
8
+ model = MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-tiny-ade").to(device)
9
+ model.eval()
10
+ preprocessor = MaskFormerFeatureExtractor.from_pretrained("facebook/maskformer-swin-tiny-ade")
11
+
12
+ def visualize_instance_seg_mask(mask):
13
+ image = np.zeros((mask.shape[0], mask.shape[1], 3))
14
+ labels = np.unique(mask)
15
+ label2color = {label: (random.randint(0, 1), random.randint(0, 255), random.randint(0, 255)) for label in labels}
16
+ for i in range(image.shape[0]):
17
+ for j in range(image.shape[1]):
18
+ image[i, j, :] = label2color[mask[i, j]]
19
+ image = image / 255
20
+ return image
21
+
22
+ def query_image(img):
23
+ target_size = (img.shape[0], img.shape[1])
24
+ inputs = preprocessor(images=img, return_tensors="pt")
25
+ with torch.no_grad():
26
+ outputs = model(**inputs)
27
+ outputs.class_queries_logits = outputs.class_queries_logits.cpu()
28
+ outputs.masks_queries_logits = outputs.masks_queries_logits.cpu()
29
+ results = preprocessor.post_process_segmentation(outputs=outputs, target_size=target_size)[0].cpu().detach()
30
+ results = torch.argmax(results, dim=0).numpy()
31
+ results = visualize_instance_seg_mask(results)
32
+ return results
33
+
34
+ demo = gr.Interface(
35
+ query_image,
36
+ inputs=[gr.Image()],
37
+ outputs="image",
38
+ title="MaskFormer Demo",
39
+ examples=[["example_2.png"]]
40
+ )
41
+
42
+ demo.launch()