aliabd HF staff commited on
Commit
1b2c122
1 Parent(s): c1b5cdb

Upload with huggingface_hub

Browse files
Files changed (4) hide show
  1. DESCRIPTION.md +1 -1
  2. README.md +1 -1
  3. run.ipynb +1 -1
  4. run.py +59 -40
DESCRIPTION.md CHANGED
@@ -1 +1 @@
1
- Image segmentation using DETR. Takes in both an inputu image and the desired confidence, and returns a segmented image.
 
1
+ Simple image segmentation using gradio's AnnotatedImage component.
README.md CHANGED
@@ -5,7 +5,7 @@ emoji: 🔥
5
  colorFrom: indigo
6
  colorTo: indigo
7
  sdk: gradio
8
- sdk_version: 3.26.0
9
  app_file: run.py
10
  pinned: false
11
  ---
 
5
  colorFrom: indigo
6
  colorTo: indigo
7
  sdk: gradio
8
+ sdk_version: 3.27.0
9
  app_file: run.py
10
  pinned: false
11
  ---
run.ipynb CHANGED
@@ -1 +1 @@
1
- {"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: image_segmentation\n", "### Image segmentation using DETR. Takes in both an inputu image and the desired confidence, and returns a segmented image.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio transformers torch scipy numpy"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/image_segmentation/example_2.png"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import torch\n", "import random\n", "import numpy as np\n", "from transformers import MaskFormerFeatureExtractor, MaskFormerForInstanceSegmentation\n", "\n", "device = torch.device(\"cpu\")\n", "model = MaskFormerForInstanceSegmentation.from_pretrained(\"facebook/maskformer-swin-tiny-ade\").to(device)\n", "model.eval()\n", "preprocessor = MaskFormerFeatureExtractor.from_pretrained(\"facebook/maskformer-swin-tiny-ade\")\n", "\n", "def visualize_instance_seg_mask(mask):\n", " image = np.zeros((mask.shape[0], mask.shape[1], 3))\n", " labels = np.unique(mask)\n", " label2color = {label: (random.randint(0, 1), random.randint(0, 255), random.randint(0, 255)) for label in labels}\n", " for i in range(image.shape[0]):\n", " for j in range(image.shape[1]):\n", " image[i, j, :] = label2color[mask[i, j]]\n", " image = image / 255\n", " return image\n", "\n", "def query_image(img):\n", " target_size = (img.shape[0], img.shape[1])\n", " inputs = preprocessor(images=img, return_tensors=\"pt\")\n", " with torch.no_grad():\n", " outputs = model(**inputs)\n", " outputs.class_queries_logits = outputs.class_queries_logits.cpu()\n", " outputs.masks_queries_logits = outputs.masks_queries_logits.cpu()\n", " results = preprocessor.post_process_segmentation(outputs=outputs, target_size=target_size)[0].cpu().detach()\n", " results = torch.argmax(results, dim=0).numpy()\n", " results = visualize_instance_seg_mask(results)\n", " return results\n", "\n", "demo = gr.Interface(\n", " query_image, \n", " inputs=[gr.Image()], \n", " outputs=\"image\",\n", " title=\"MaskFormer Demo\",\n", " examples=[[\"example_2.png\"]]\n", ")\n", "\n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
 
1
+ {"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: image_segmentation\n", "### Simple image segmentation using gradio's AnnotatedImage component.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "import random\n", "\n", "with gr.Blocks() as demo:\n", " section_labels = [\n", " \"apple\",\n", " \"banana\",\n", " \"carrot\",\n", " \"donut\",\n", " \"eggplant\",\n", " \"fish\",\n", " \"grapes\",\n", " \"hamburger\",\n", " \"ice cream\",\n", " \"juice\",\n", " ]\n", "\n", " with gr.Row():\n", " num_boxes = gr.Slider(0, 5, 2, step=1, label=\"Number of boxes\")\n", " num_segments = gr.Slider(0, 5, 1, step=1, label=\"Number of segments\")\n", "\n", " with gr.Row():\n", " img_input = gr.Image()\n", " img_output = gr.AnnotatedImage().style(\n", " color_map={\"banana\": \"#a89a00\", \"carrot\": \"#ffae00\"}\n", " )\n", "\n", " section_btn = gr.Button(\"Identify Sections\")\n", " selected_section = gr.Textbox(label=\"Selected Section\")\n", "\n", " def section(img, num_boxes, num_segments):\n", " sections = []\n", " for a in range(num_boxes):\n", " x = random.randint(0, img.shape[1])\n", " y = random.randint(0, img.shape[0])\n", " w = random.randint(0, img.shape[1] - x)\n", " h = random.randint(0, img.shape[0] - y)\n", " sections.append(((x, y, x + w, y + h), section_labels[a]))\n", " for b in range(num_segments):\n", " x = random.randint(0, img.shape[1])\n", " y = random.randint(0, img.shape[0])\n", " r = random.randint(0, min(x, y, img.shape[1] - x, img.shape[0] - y))\n", " mask = np.zeros(img.shape[:2])\n", " for i in range(img.shape[0]):\n", " for j in range(img.shape[1]):\n", " dist_square = (i - y) ** 2 + (j - x) ** 2\n", " if dist_square < r**2:\n", " mask[i, j] = round((r**2 - dist_square) / r**2 * 4) / 4\n", " sections.append((mask, section_labels[b + num_boxes]))\n", " return (img, sections)\n", "\n", " section_btn.click(section, [img_input, num_boxes, num_segments], img_output)\n", "\n", " def select_section(evt: gr.SelectData):\n", " return section_labels[evt.index]\n", "\n", " img_output.select(select_section, None, selected_section)\n", "\n", "\n", "demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
run.py CHANGED
@@ -1,42 +1,61 @@
1
  import gradio as gr
2
- import torch
3
- import random
4
  import numpy as np
5
- from transformers import MaskFormerFeatureExtractor, MaskFormerForInstanceSegmentation
6
-
7
- device = torch.device("cpu")
8
- model = MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-tiny-ade").to(device)
9
- model.eval()
10
- preprocessor = MaskFormerFeatureExtractor.from_pretrained("facebook/maskformer-swin-tiny-ade")
11
-
12
- def visualize_instance_seg_mask(mask):
13
- image = np.zeros((mask.shape[0], mask.shape[1], 3))
14
- labels = np.unique(mask)
15
- label2color = {label: (random.randint(0, 1), random.randint(0, 255), random.randint(0, 255)) for label in labels}
16
- for i in range(image.shape[0]):
17
- for j in range(image.shape[1]):
18
- image[i, j, :] = label2color[mask[i, j]]
19
- image = image / 255
20
- return image
21
-
22
- def query_image(img):
23
- target_size = (img.shape[0], img.shape[1])
24
- inputs = preprocessor(images=img, return_tensors="pt")
25
- with torch.no_grad():
26
- outputs = model(**inputs)
27
- outputs.class_queries_logits = outputs.class_queries_logits.cpu()
28
- outputs.masks_queries_logits = outputs.masks_queries_logits.cpu()
29
- results = preprocessor.post_process_segmentation(outputs=outputs, target_size=target_size)[0].cpu().detach()
30
- results = torch.argmax(results, dim=0).numpy()
31
- results = visualize_instance_seg_mask(results)
32
- return results
33
-
34
- demo = gr.Interface(
35
- query_image,
36
- inputs=[gr.Image()],
37
- outputs="image",
38
- title="MaskFormer Demo",
39
- examples=[["example_2.png"]]
40
- )
41
-
42
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
 
 
2
  import numpy as np
3
+ import random
4
+
5
+ with gr.Blocks() as demo:
6
+ section_labels = [
7
+ "apple",
8
+ "banana",
9
+ "carrot",
10
+ "donut",
11
+ "eggplant",
12
+ "fish",
13
+ "grapes",
14
+ "hamburger",
15
+ "ice cream",
16
+ "juice",
17
+ ]
18
+
19
+ with gr.Row():
20
+ num_boxes = gr.Slider(0, 5, 2, step=1, label="Number of boxes")
21
+ num_segments = gr.Slider(0, 5, 1, step=1, label="Number of segments")
22
+
23
+ with gr.Row():
24
+ img_input = gr.Image()
25
+ img_output = gr.AnnotatedImage().style(
26
+ color_map={"banana": "#a89a00", "carrot": "#ffae00"}
27
+ )
28
+
29
+ section_btn = gr.Button("Identify Sections")
30
+ selected_section = gr.Textbox(label="Selected Section")
31
+
32
+ def section(img, num_boxes, num_segments):
33
+ sections = []
34
+ for a in range(num_boxes):
35
+ x = random.randint(0, img.shape[1])
36
+ y = random.randint(0, img.shape[0])
37
+ w = random.randint(0, img.shape[1] - x)
38
+ h = random.randint(0, img.shape[0] - y)
39
+ sections.append(((x, y, x + w, y + h), section_labels[a]))
40
+ for b in range(num_segments):
41
+ x = random.randint(0, img.shape[1])
42
+ y = random.randint(0, img.shape[0])
43
+ r = random.randint(0, min(x, y, img.shape[1] - x, img.shape[0] - y))
44
+ mask = np.zeros(img.shape[:2])
45
+ for i in range(img.shape[0]):
46
+ for j in range(img.shape[1]):
47
+ dist_square = (i - y) ** 2 + (j - x) ** 2
48
+ if dist_square < r**2:
49
+ mask[i, j] = round((r**2 - dist_square) / r**2 * 4) / 4
50
+ sections.append((mask, section_labels[b + num_boxes]))
51
+ return (img, sections)
52
+
53
+ section_btn.click(section, [img_input, num_boxes, num_segments], img_output)
54
+
55
+ def select_section(evt: gr.SelectData):
56
+ return section_labels[evt.index]
57
+
58
+ img_output.select(select_section, None, selected_section)
59
+
60
+
61
+ demo.launch()