Spaces:
Sleeping
Sleeping
Pawel_Mar
commited on
Commit
•
4574f39
1
Parent(s):
7998469
refactors_needed
Browse files
app.py
CHANGED
@@ -1,11 +1,9 @@
|
|
1 |
import os
|
2 |
import cv2
|
3 |
-
from PIL import Image
|
4 |
-
import pandas as pd
|
5 |
import streamlit as st
|
6 |
from ultralytics import YOLO
|
7 |
from streamlit_image_comparison import image_comparison
|
8 |
-
import
|
9 |
|
10 |
model = YOLO('models/last.pt')
|
11 |
|
@@ -23,47 +21,7 @@ url = "https://github.com/AkanimohOD19A/img-segmentation"
|
|
23 |
link = f'<a href="{url}">This sample app was heavily based on code shared by Akan Daniel. Huge credits to him.</a>'
|
24 |
st.markdown(link, unsafe_allow_html=True)
|
25 |
|
26 |
-
def save_uploadedfile(uploadedfile):
|
27 |
-
with open(os.path.join(parent_media_path, "captured_picture.jpg"), "wb") as f:
|
28 |
-
im = Image.open(uploadedfile)
|
29 |
-
im.save(os.path.join(parent_media_path, "captured_picture.jpg"))
|
30 |
|
31 |
-
def convert_to_jpg(uploaded_image):
|
32 |
-
im = Image.open(uploaded_image)
|
33 |
-
if im.mode in ("RGBA", "P"):
|
34 |
-
im = im.convert("RGB")
|
35 |
-
uploaded_image_path = os.path.join(parent_media_path, "uploaded_image.jpg")
|
36 |
-
im.save(uploaded_image_path)
|
37 |
-
|
38 |
-
|
39 |
-
def overlay(image, mask, color, alpha, resize=None):
|
40 |
-
"""Combines image and its segmentation mask into a single image.
|
41 |
-
|
42 |
-
Params:
|
43 |
-
image: Training image. np.ndarray,
|
44 |
-
mask: Segmentation mask. np.ndarray,
|
45 |
-
color: Color for segmentation mask rendering. tuple[int, int, int] = (255, 0, 0)
|
46 |
-
alpha: Segmentation mask's transparency. float = 0.5,
|
47 |
-
resize: If provided, both image and its mask are resized before blending them together.
|
48 |
-
tuple[int, int] = (1024, 1024))
|
49 |
-
|
50 |
-
Returns:
|
51 |
-
image_combined: The combined image. np.ndarray
|
52 |
-
|
53 |
-
"""
|
54 |
-
# color = color[::-1]
|
55 |
-
colored_mask = np.expand_dims(mask, 0).repeat(3, axis=0)
|
56 |
-
colored_mask = np.moveaxis(colored_mask, 0, -1)
|
57 |
-
masked = np.ma.MaskedArray(image, mask=colored_mask, fill_value=color)
|
58 |
-
image_overlay = masked.filled()
|
59 |
-
|
60 |
-
if resize is not None:
|
61 |
-
image = cv2.resize(image.transpose(1, 2, 0), resize)
|
62 |
-
image_overlay = cv2.resize(image_overlay.transpose(1, 2, 0), resize)
|
63 |
-
|
64 |
-
image_combined = cv2.addWeighted(image, 1 - alpha, image_overlay, alpha, 0)
|
65 |
-
|
66 |
-
return image_combined
|
67 |
|
68 |
st.divider()
|
69 |
|
@@ -84,7 +42,7 @@ if APPLICATION_MODE == "Take Picture":
|
|
84 |
st.sidebar.write(
|
85 |
"""
|
86 |
A computer aided application that segments your input image, built on
|
87 |
-
the powerful YOLOv8
|
88 |
|
89 |
Simply take a captured_picture and it gets segmentated in real time.
|
90 |
"""
|
@@ -95,12 +53,9 @@ if APPLICATION_MODE == "Take Picture":
|
|
95 |
st.sidebar.divider()
|
96 |
st.sidebar.image(picture, caption="captured_picture")
|
97 |
if st.button("Segment!"):
|
98 |
-
## Function to save image
|
99 |
-
|
100 |
-
save_uploadedfile(picture)
|
101 |
-
st.sidebar.success("Saved File")
|
102 |
-
# captured_picture_img = os.path.join(parent_media_path, "captured_picture.jpg")
|
103 |
img_file = os.path.join(parent_media_path, "captured_picture.jpg")
|
|
|
|
|
104 |
st.write("Click on **Clear photo** to retake picture")
|
105 |
|
106 |
st.divider()
|
@@ -115,45 +70,29 @@ elif APPLICATION_MODE == "Upload Picture":
|
|
115 |
"""
|
116 |
)
|
117 |
st.sidebar.divider()
|
118 |
-
# uploaded_file = st.sidebar.file_uploader("Upload your Image here", type=['png', 'jpeg', 'jpg'])
|
119 |
uploaded_file = st.sidebar.file_uploader("Drop a JPG/PNG file", accept_multiple_files=False, type=['jpg', 'png'])
|
120 |
-
if uploaded_file is not None and uploaded_file.type != ".jpg":
|
121 |
-
convert_to_jpg(uploaded_file)
|
122 |
if uploaded_file is not None:
|
|
|
|
|
123 |
file_details = {"FileName": uploaded_file.name, "FileType": uploaded_file.type}
|
124 |
-
|
125 |
-
with open(os.path.join(parent_media_path, new_file_name), "wb") as f:
|
126 |
-
f.write(uploaded_file.getbuffer())
|
127 |
-
img_file = os.path.join(parent_media_path, new_file_name)
|
128 |
st.sidebar.success("File saved successfully")
|
129 |
-
print(f"File saved successfully to {os.path.abspath(
|
130 |
else:
|
131 |
st.sidebar.write("You are using a placeholder image, Upload your Image (.jpg for now) to explore")
|
132 |
|
133 |
-
# def make_segmentation(img_file):
|
134 |
results = model(img_file)
|
135 |
img = cv2.imread(img_file)
|
136 |
-
names_list = []
|
137 |
for result in results:
|
138 |
-
# boxes = result.boxes.cpu().numpy()
|
139 |
# segmentation
|
140 |
-
if result.masks
|
141 |
-
masks = result.masks.data.cpu().numpy() # masks, (N, H, W)
|
142 |
-
else:
|
143 |
-
masks = []
|
144 |
-
|
145 |
numCols = len(masks)
|
146 |
if numCols > 0:
|
147 |
cols = st.columns(numCols)
|
|
|
148 |
else:
|
149 |
-
print(f"Number of Boxes found: {numCols}")
|
150 |
st.warning("Unable to id Distinct items - Please retry with a clearer Image")
|
151 |
-
|
152 |
-
h, w, _ = img.shape
|
153 |
-
mask = cv2.resize(mask, (w, h))
|
154 |
-
img = overlay(img, mask, color=(0, 255, 0), alpha=0.3)
|
155 |
-
# st.image(rect)
|
156 |
-
# render image-comparison
|
157 |
|
158 |
st.markdown('')
|
159 |
st.markdown('##### Slider of Uploaded Image and Segments')
|
@@ -173,14 +112,6 @@ st.sidebar.divider()
|
|
173 |
st.sidebar.markdown('')
|
174 |
st.sidebar.markdown('#### Distribution of identified items')
|
175 |
|
176 |
-
# Boolean to resize the dataframe, stored as a session state variable
|
177 |
-
st.sidebar.checkbox("Use container width", value=False, key="use_container_width")
|
178 |
-
if len(names_list) > 0:
|
179 |
-
df_x = pd.DataFrame(names_list)
|
180 |
-
summary_table = df_x[0].value_counts().rename_axis('unique_values').reset_index(name='counts')
|
181 |
-
st.sidebar.dataframe(summary_table, use_container_width=st.session_state.use_container_width)
|
182 |
-
else:
|
183 |
-
st.sidebar.warning("Unable to id Distinct items - Please retry with a clearer Image")
|
184 |
|
185 |
st.markdown('')
|
186 |
st.markdown('')
|
|
|
1 |
import os
|
2 |
import cv2
|
|
|
|
|
3 |
import streamlit as st
|
4 |
from ultralytics import YOLO
|
5 |
from streamlit_image_comparison import image_comparison
|
6 |
+
from utils import save_uploadedfile, apply_masks
|
7 |
|
8 |
model = YOLO('models/last.pt')
|
9 |
|
|
|
21 |
link = f'<a href="{url}">This sample app was heavily based on code shared by Akan Daniel. Huge credits to him.</a>'
|
22 |
st.markdown(link, unsafe_allow_html=True)
|
23 |
|
|
|
|
|
|
|
|
|
24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
st.divider()
|
27 |
|
|
|
42 |
st.sidebar.write(
|
43 |
"""
|
44 |
A computer aided application that segments your input image, built on
|
45 |
+
the powerful YOLOv8 instance segmentation algorithm developed by *ultralytics*.
|
46 |
|
47 |
Simply take a captured_picture and it gets segmentated in real time.
|
48 |
"""
|
|
|
53 |
st.sidebar.divider()
|
54 |
st.sidebar.image(picture, caption="captured_picture")
|
55 |
if st.button("Segment!"):
|
|
|
|
|
|
|
|
|
|
|
56 |
img_file = os.path.join(parent_media_path, "captured_picture.jpg")
|
57 |
+
save_uploadedfile(picture, save_path=img_file)
|
58 |
+
st.sidebar.success("Saved File")
|
59 |
st.write("Click on **Clear photo** to retake picture")
|
60 |
|
61 |
st.divider()
|
|
|
70 |
"""
|
71 |
)
|
72 |
st.sidebar.divider()
|
|
|
73 |
uploaded_file = st.sidebar.file_uploader("Drop a JPG/PNG file", accept_multiple_files=False, type=['jpg', 'png'])
|
|
|
|
|
74 |
if uploaded_file is not None:
|
75 |
+
img_file = os.path.join(parent_media_path, "uploaded_image.jpg")
|
76 |
+
save_uploadedfile(uploaded_file, save_path=os.path.join(parent_media_path, "uploaded_image.jpg"))
|
77 |
file_details = {"FileName": uploaded_file.name, "FileType": uploaded_file.type}
|
78 |
+
|
|
|
|
|
|
|
79 |
st.sidebar.success("File saved successfully")
|
80 |
+
print(f"File saved successfully to {os.path.abspath(img_file)}")
|
81 |
else:
|
82 |
st.sidebar.write("You are using a placeholder image, Upload your Image (.jpg for now) to explore")
|
83 |
|
|
|
84 |
results = model(img_file)
|
85 |
img = cv2.imread(img_file)
|
|
|
86 |
for result in results:
|
|
|
87 |
# segmentation
|
88 |
+
masks = [] if not result.masks else result.masks.data.cpu().numpy()
|
|
|
|
|
|
|
|
|
89 |
numCols = len(masks)
|
90 |
if numCols > 0:
|
91 |
cols = st.columns(numCols)
|
92 |
+
print(f"Number of instances found: {numCols}")
|
93 |
else:
|
|
|
94 |
st.warning("Unable to id Distinct items - Please retry with a clearer Image")
|
95 |
+
img = apply_masks(img, masks)
|
|
|
|
|
|
|
|
|
|
|
96 |
|
97 |
st.markdown('')
|
98 |
st.markdown('##### Slider of Uploaded Image and Segments')
|
|
|
112 |
st.sidebar.markdown('')
|
113 |
st.sidebar.markdown('#### Distribution of identified items')
|
114 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
|
116 |
st.markdown('')
|
117 |
st.markdown('')
|
utils.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from PIL import Image
|
3 |
+
import cv2
|
4 |
+
|
5 |
+
|
6 |
+
def save_uploadedfile(uploaded_image, save_path):
|
7 |
+
im = Image.open(uploaded_image)
|
8 |
+
if im.mode in ("RGBA", "P"):
|
9 |
+
im = im.convert("RGB")
|
10 |
+
im.save(save_path)
|
11 |
+
|
12 |
+
|
13 |
+
def overlay(image, mask, color, alpha, resize=None):
|
14 |
+
"""Combines image and its segmentation mask into a single image.
|
15 |
+
|
16 |
+
Params:
|
17 |
+
image: Training image. np.ndarray,
|
18 |
+
mask: Segmentation mask. np.ndarray,
|
19 |
+
color: Color for segmentation mask rendering. tuple[int, int, int] = (255, 0, 0)
|
20 |
+
alpha: Segmentation mask's transparency. float = 0.5,
|
21 |
+
resize: If provided, both image and its mask are resized before blending them together.
|
22 |
+
tuple[int, int] = (1024, 1024))
|
23 |
+
|
24 |
+
Returns:
|
25 |
+
image_combined: The combined image. np.ndarray
|
26 |
+
|
27 |
+
"""
|
28 |
+
colored_mask = np.expand_dims(mask, 0).repeat(3, axis=0)
|
29 |
+
colored_mask = np.moveaxis(colored_mask, 0, -1)
|
30 |
+
masked = np.ma.MaskedArray(image, mask=colored_mask, fill_value=color)
|
31 |
+
image_overlay = masked.filled()
|
32 |
+
|
33 |
+
if resize is not None:
|
34 |
+
image = cv2.resize(image.transpose(1, 2, 0), resize)
|
35 |
+
image_overlay = cv2.resize(image_overlay.transpose(1, 2, 0), resize)
|
36 |
+
|
37 |
+
image_combined = cv2.addWeighted(image, 1 - alpha, image_overlay, alpha, 0)
|
38 |
+
|
39 |
+
return image_combined
|
40 |
+
|
41 |
+
|
42 |
+
def apply_masks(img, masks):
|
43 |
+
for mask in masks:
|
44 |
+
h, w, _ = img.shape
|
45 |
+
mask = cv2.resize(mask, (w, h))
|
46 |
+
img = overlay(img, mask, color=(0, 255, 0), alpha=0.3)
|
47 |
+
return img
|