Thiago Hersan commited on
Commit
9becae6
1 Parent(s): d23383d

initial commit of space using maskformer-satellite-trees model

Browse files
Files changed (5) hide show
  1. .gitignore +2 -0
  2. README.md +8 -9
  3. app.ipynb +116 -0
  4. app.py +77 -0
  5. requirements.txt +4 -0
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ .DS_Store
2
+
README.md CHANGED
@@ -1,13 +1,12 @@
1
- ---
2
- title: Maskformer Satellite Trees Gradio
3
- emoji: 👀
4
  colorFrom: blue
5
- colorTo: yellow
6
  sdk: gradio
7
- sdk_version: 3.16.2
8
  app_file: app.py
 
 
9
  pinned: false
10
- license: other
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+
2
+ title: Maskformer Satellite+Tree Gradio
3
+ emoji: 🛰
4
  colorFrom: blue
5
+ colorTo: indigo
6
  sdk: gradio
7
+ sdk_version: 3.16.1
8
  app_file: app.py
9
+ models:
10
+ - "thiagohersan/maskformer-satellite-trees"
11
  pinned: false
12
+ license: cc-by-nc-sa-4.0
 
 
 
app.ipynb ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "import gradio as gr\n",
10
+ "import numpy as np\n",
11
+ "from PIL import Image as PImage\n",
12
+ "from torchvision import transforms as T\n",
13
+ "from transformers import MaskFormerForInstanceSegmentation, MaskFormerImageProcessor"
14
+ ]
15
+ },
16
+ {
17
+ "cell_type": "code",
18
+ "execution_count": null,
19
+ "metadata": {},
20
+ "outputs": [],
21
+ "source": [
22
+ "ade_mean=[0.485, 0.456, 0.406]\n",
23
+ "ade_std=[0.229, 0.224, 0.225]\n",
24
+ "\n",
25
+ "model_id = f\"thiagohersan/maskformer-satellite-trees\""
26
+ ]
27
+ },
28
+ {
29
+ "cell_type": "code",
30
+ "execution_count": null,
31
+ "metadata": {},
32
+ "outputs": [],
33
+ "source": [
34
+ "# preprocessor = MaskFormerImageProcessor.from_pretrained(model_id)\n",
35
+ "preprocessor = MaskFormerImageProcessor(\n",
36
+ " do_resize=False,\n",
37
+ " do_normalize=False,\n",
38
+ " do_rescale=False,\n",
39
+ " ignore_index=255,\n",
40
+ " reduce_labels=False\n",
41
+ ")\n",
42
+ "\n",
43
+ "model = MaskFormerForInstanceSegmentation.from_pretrained(model_id)\n",
44
+ "\n",
45
+ "test_transform = T.Compose([\n",
46
+ " T.ToTensor(),\n",
47
+ " T.Normalize(mean=ade_mean, std=ade_std)\n",
48
+ "])\n",
49
+ "\n",
50
+ "with PImage.open(\"../color-filter-calculator/assets/Artshack_screen.jpg\") as img:\n",
51
+ " img_size = (img.height, img.width)\n",
52
+ " norm_image = test_transform(np.array(img))\n",
53
+ " inputs = preprocessor(images=norm_image, return_tensors=\"pt\")\n",
54
+ " "
55
+ ]
56
+ },
57
+ {
58
+ "cell_type": "code",
59
+ "execution_count": null,
60
+ "metadata": {},
61
+ "outputs": [],
62
+ "source": [
63
+ "outputs = model(**inputs)"
64
+ ]
65
+ },
66
+ {
67
+ "cell_type": "code",
68
+ "execution_count": null,
69
+ "metadata": {},
70
+ "outputs": [],
71
+ "source": [
72
+ "results = preprocessor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[img_size])[0]\n",
73
+ "results = results.numpy()\n",
74
+ "\n",
75
+ "labels = np.unique(results)"
76
+ ]
77
+ },
78
+ {
79
+ "cell_type": "code",
80
+ "execution_count": null,
81
+ "metadata": {},
82
+ "outputs": [],
83
+ "source": [
84
+ "for label_id in labels:\n",
85
+ " print(model.config.id2label[label_id])"
86
+ ]
87
+ }
88
+ ],
89
+ "metadata": {
90
+ "kernelspec": {
91
+ "display_name": "Python 3.8.15 ('hf-gradio')",
92
+ "language": "python",
93
+ "name": "python3"
94
+ },
95
+ "language_info": {
96
+ "codemirror_mode": {
97
+ "name": "ipython",
98
+ "version": 3
99
+ },
100
+ "file_extension": ".py",
101
+ "mimetype": "text/x-python",
102
+ "name": "python",
103
+ "nbconvert_exporter": "python",
104
+ "pygments_lexer": "ipython3",
105
+ "version": "3.8.15"
106
+ },
107
+ "orig_nbformat": 4,
108
+ "vscode": {
109
+ "interpreter": {
110
+ "hash": "4888b226c77b860705e4be316b14a092026f41c3585ee0ddb38f3008c0cb495e"
111
+ }
112
+ }
113
+ },
114
+ "nbformat": 4,
115
+ "nbformat_minor": 2
116
+ }
app.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ from torchvision import transforms as T
4
+ from transformers import MaskFormerForInstanceSegmentation, MaskFormerImageProcessor
5
+
6
+
7
+ ade_mean=[0.485, 0.456, 0.406]
8
+ ade_std=[0.229, 0.224, 0.225]
9
+
10
+ palette = [
11
+ [120, 120, 120], [4, 200, 4], [180, 120, 120], [6, 230, 230],
12
+ [80, 50, 50], [120, 120, 80], [140, 140, 140], [204, 5, 255]
13
+ ]
14
+
15
+ model_id = f"thiagohersan/maskformer-satellite-trees"
16
+
17
+ # preprocessor = MaskFormerImageProcessor.from_pretrained(model_id)
18
+ preprocessor = MaskFormerImageProcessor(
19
+ do_resize=False,
20
+ do_normalize=False,
21
+ do_rescale=False,
22
+ ignore_index=255,
23
+ reduce_labels=False
24
+ )
25
+
26
+ model = MaskFormerForInstanceSegmentation.from_pretrained(model_id)
27
+
28
+ test_transform = T.Compose([
29
+ T.ToTensor(),
30
+ T.Normalize(mean=ade_mean, std=ade_std)
31
+ ])
32
+
33
+ def visualize_instance_seg_mask(img_in, mask, id2label):
34
+ img_out = np.zeros((mask.shape[0], mask.shape[1], 3))
35
+ image_total_pixels = mask.shape[0] * mask.shape[1]
36
+ label_ids = np.unique(mask)
37
+
38
+ id2color = {id: palette[id] for id in label_ids}
39
+ id2count = {id: 0 for id in label_ids}
40
+
41
+ for i in range(img_out.shape[0]):
42
+ for j in range(img_out.shape[1]):
43
+ img_out[i, j, :] = id2color[mask[i, j]]
44
+ id2count[mask[i, j]] = id2count[mask[i, j]] + 1
45
+
46
+ image_res = (0.5 * img_in + 0.5 * img_out) / 255
47
+
48
+ dataframe = [[
49
+ f"{id2label[id]}",
50
+ f"{(100 * id2count[id] / image_total_pixels):.2f} %",
51
+ f"{np.sqrt(id2count[id] / image_total_pixels):.2f} m"
52
+ ] for id in label_ids if 'tree' in id2label[id]]
53
+
54
+ return image_res, dataframe
55
+
56
+
57
+ def query_image(img):
58
+ img_size = (img.shape[0], img.shape[1])
59
+ inputs = preprocessor(images=test_transform(np.array(img)), return_tensors="pt")
60
+
61
+ outputs = model(**inputs)
62
+
63
+ results = preprocessor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[img_size])[0]
64
+ results = visualize_instance_seg_mask(img, results.numpy(), model.config.id2label)
65
+ return results
66
+
67
+
68
+ demo = gr.Interface(
69
+ query_image,
70
+ inputs=[gr.Image(label="Input Image")],
71
+ outputs=[gr.Image(label="Trees"), gr.DataFrame(headers=None, label="Area Info")],
72
+ title="maskformer-satellite-trees",
73
+ allow_flagging="never",
74
+ analytics_enabled=None
75
+ )
76
+
77
+ demo.launch(show_api=False)
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ scipy
2
+ torch
3
+ torchvision
4
+ transformers