Thiago Hersan commited on
Commit
7c653a9
1 Parent(s): efe209b

clean up app setup

Browse files
Files changed (2) hide show
  1. app.ipynb +6 -7
  2. app.py +16 -16
app.ipynb CHANGED
@@ -9,7 +9,7 @@
9
  "import gradio as gr\n",
10
  "import numpy as np\n",
11
  "from PIL import Image\n",
12
- "from transformers import MaskFormerFeatureExtractor, MaskFormerForInstanceSegmentation"
13
  ]
14
  },
15
  {
@@ -18,15 +18,14 @@
18
  "metadata": {},
19
  "outputs": [],
20
  "source": [
21
- "# feature_extractor = MaskFormerFeatureExtractor.from_pretrained(\"facebook/maskformer-swin-tiny-coco\")\n",
22
- "# model = MaskFormerForInstanceSegmentation.from_pretrained(\"facebook/maskformer-swin-tiny-coco\")\n",
23
- "feature_extractor = MaskFormerFeatureExtractor.from_pretrained(\"facebook/maskformer-swin-large-coco\")\n",
24
- "model = MaskFormerForInstanceSegmentation.from_pretrained(\"facebook/maskformer-swin-large-coco\")\n",
25
  "\n",
26
  "with Image.open(\"../color-filter-calculator/assets/Artshack_screen.jpg\") as img:\n",
27
  " img_size = (img.height, img.width)\n",
28
- " inputs = feature_extractor(images=img, return_tensors=\"pt\")\n",
29
- " "
30
  ]
31
  },
32
  {
 
9
  "import gradio as gr\n",
10
  "import numpy as np\n",
11
  "from PIL import Image\n",
12
+ "from transformers import MaskFormerForInstanceSegmentation, MaskFormerImageProcessor"
13
  ]
14
  },
15
  {
 
18
  "metadata": {},
19
  "outputs": [],
20
  "source": [
21
+ "model_id = f\"facebook/maskformer-swin-large-coco\"\n",
22
+ "\n",
23
+ "feature_extractor = MaskFormerImageProcessor.from_pretrained(model_id)\n",
24
+ "model = MaskFormerForInstanceSegmentation.from_pretrained(model_id)\n",
25
  "\n",
26
  "with Image.open(\"../color-filter-calculator/assets/Artshack_screen.jpg\") as img:\n",
27
  " img_size = (img.height, img.width)\n",
28
+ " inputs = feature_extractor(images=img, return_tensors=\"pt\")"
 
29
  ]
30
  },
31
  {
app.py CHANGED
@@ -2,25 +2,26 @@ import glob
2
  import gradio as gr
3
  import numpy as np
4
  from PIL import Image
5
- from transformers import MaskFormerFeatureExtractor, MaskFormerForInstanceSegmentation
6
 
7
 
8
- # feature_extractor = MaskFormerFeatureExtractor.from_pretrained("facebook/maskformer-swin-tiny-coco")
9
- # model = MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-tiny-coco")
10
- feature_extractor = MaskFormerFeatureExtractor.from_pretrained("facebook/maskformer-swin-large-coco")
11
- model = MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-large-coco")
12
-
13
  example_images = sorted(glob.glob('examples/map*.jpg'))
14
 
15
- def visualize_instance_seg_mask(img_in, mask, id2label):
 
 
 
 
 
 
 
16
  img_out = np.zeros((mask.shape[0], mask.shape[1], 3))
17
  image_total_pixels = mask.shape[0] * mask.shape[1]
18
  label_ids = np.unique(mask)
19
- vegetation_labels = ["tree-merged", "grass-merged"]
20
 
21
  def get_color(id):
22
  id_color = (np.random.randint(0, 2), np.random.randint(0, 4), np.random.randint(0, 256))
23
- if id2label[id] in vegetation_labels:
24
  id_color = (0, 140, 0)
25
  return id_color
26
 
@@ -34,13 +35,13 @@ def visualize_instance_seg_mask(img_in, mask, id2label):
34
 
35
  image_res = (0.5 * img_in + 0.5 * img_out).astype(np.uint8)
36
 
37
- vegetation_count = sum([id2count[id] for id in label_ids if id2label[id] in vegetation_labels])
38
 
39
  dataframe_vegetation_items = [[
40
  f"{id2label[id]}",
41
  f"{(100 * id2count[id] / image_total_pixels):.2f} %",
42
  f"{np.sqrt(id2count[id] / image_total_pixels):.2f} m"
43
- ] for id in label_ids if id2label[id] in vegetation_labels]
44
  dataframe_all_items = [[
45
  f"{id2label[id]}",
46
  f"{(100 * id2count[id] / image_total_pixels):.2f} %",
@@ -65,10 +66,10 @@ def visualize_instance_seg_mask(img_in, mask, id2label):
65
  def query_image(image_path):
66
  img = np.array(Image.open(image_path))
67
  img_size = (img.shape[0], img.shape[1])
68
- inputs = feature_extractor(images=img, return_tensors="pt")
69
  outputs = model(**inputs)
70
- results = feature_extractor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[img_size])[0]
71
- mask_img, dataframe = visualize_instance_seg_mask(img, results.numpy(), model.config.id2label)
72
  return mask_img, dataframe
73
 
74
 
@@ -83,8 +84,7 @@ demo = gr.Interface(
83
  allow_flagging="never",
84
  analytics_enabled=None,
85
  examples=example_images,
86
- # cache_examples=True
87
  )
88
 
89
- demo.queue(concurrency_count=8, max_size=8)
90
  demo.launch(show_api=False)
 
2
  import gradio as gr
3
  import numpy as np
4
  from PIL import Image
5
+ from transformers import MaskFormerForInstanceSegmentation, MaskFormerImageProcessor
6
 
7
 
 
 
 
 
 
8
  example_images = sorted(glob.glob('examples/map*.jpg'))
9
 
10
+ model_id = f"facebook/maskformer-swin-large-coco"
11
+ vegetation_labels = ["tree-merged", "grass-merged"]
12
+
13
+ preprocessor = MaskFormerImageProcessor.from_pretrained(model_id)
14
+ model = MaskFormerForInstanceSegmentation.from_pretrained(model_id)
15
+
16
+
17
+ def visualize_instance_seg_mask(img_in, mask, id2label, included_labels):
18
  img_out = np.zeros((mask.shape[0], mask.shape[1], 3))
19
  image_total_pixels = mask.shape[0] * mask.shape[1]
20
  label_ids = np.unique(mask)
 
21
 
22
  def get_color(id):
23
  id_color = (np.random.randint(0, 2), np.random.randint(0, 4), np.random.randint(0, 256))
24
+ if id2label[id] in included_labels:
25
  id_color = (0, 140, 0)
26
  return id_color
27
 
 
35
 
36
  image_res = (0.5 * img_in + 0.5 * img_out).astype(np.uint8)
37
 
38
+ vegetation_count = sum([id2count[id] for id in label_ids if id2label[id] in included_labels])
39
 
40
  dataframe_vegetation_items = [[
41
  f"{id2label[id]}",
42
  f"{(100 * id2count[id] / image_total_pixels):.2f} %",
43
  f"{np.sqrt(id2count[id] / image_total_pixels):.2f} m"
44
+ ] for id in label_ids if id2label[id] in included_labels]
45
  dataframe_all_items = [[
46
  f"{id2label[id]}",
47
  f"{(100 * id2count[id] / image_total_pixels):.2f} %",
 
66
  def query_image(image_path):
67
  img = np.array(Image.open(image_path))
68
  img_size = (img.shape[0], img.shape[1])
69
+ inputs = preprocessor(images=img, return_tensors="pt")
70
  outputs = model(**inputs)
71
+ results = preprocessor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[img_size])[0]
72
+ mask_img, dataframe = visualize_instance_seg_mask(img, results.numpy(), model.config.id2label, vegetation_labels)
73
  return mask_img, dataframe
74
 
75
 
 
84
  allow_flagging="never",
85
  analytics_enabled=None,
86
  examples=example_images,
87
+ cache_examples=True
88
  )
89
 
 
90
  demo.launch(show_api=False)