Commit
·
56391c7
1
Parent(s):
30012f3
app restoration
Browse files- app.py +151 -224
- requirements.txt +1 -2
app.py
CHANGED
@@ -1,227 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import cv2
|
2 |
-
import
|
3 |
-
import
|
4 |
-
import
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
#
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
results = pose.process(frame_rgb)
|
67 |
-
|
68 |
-
# Draw pose landmarks if enabled
|
69 |
-
if pose_drawing_enabled and results.pose_landmarks: # Check if drawing is enabled
|
70 |
-
mp_drawing.draw_landmarks(frame, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
|
71 |
-
|
72 |
-
if results.pose_landmarks: # Check if pose landmarks are detected
|
73 |
-
# Get the coordinates of the landmarks
|
74 |
-
landmarks = results.pose_landmarks.landmark
|
75 |
-
|
76 |
-
# Define required variables for actions
|
77 |
-
right_shoulder = landmarks[mp_pose.PoseLandmark.RIGHT_SHOULDER.value]
|
78 |
-
right_wrist = landmarks[mp_pose.PoseLandmark.RIGHT_WRIST.value]
|
79 |
-
left_shoulder = landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value]
|
80 |
-
left_wrist = landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value]
|
81 |
-
right_hip = landmarks[mp_pose.PoseLandmark.RIGHT_HIP.value]
|
82 |
-
left_hip = landmarks[mp_pose.PoseLandmark.LEFT_HIP.value]
|
83 |
-
right_pinky = landmarks[mp_pose.PoseLandmark.RIGHT_PINKY.value]
|
84 |
-
right_index = landmarks[mp_pose.PoseLandmark.RIGHT_INDEX.value]
|
85 |
-
right_thumb = landmarks[mp_pose.PoseLandmark.RIGHT_THUMB.value]
|
86 |
-
left_pinky = landmarks[mp_pose.PoseLandmark.LEFT_PINKY.value]
|
87 |
-
left_index = landmarks[mp_pose.PoseLandmark.LEFT_INDEX.value]
|
88 |
-
left_thumb = landmarks[mp_pose.PoseLandmark.LEFT_THUMB.value]
|
89 |
-
nose = landmarks[mp_pose.PoseLandmark.NOSE.value]
|
90 |
-
near_shoulder_distance = 0.1 # Define the near shoulder distance
|
91 |
-
|
92 |
-
# Check for each action
|
93 |
-
if current_action == 'Raise Right Hand' and right_wrist.y < nose.y: # Condition for raising right hand
|
94 |
-
action_detected = True
|
95 |
-
print("Raise Right Hand")
|
96 |
-
elif current_action == 'Raise Left Hand' and left_wrist.y < nose.y: # Condition for raising left hand
|
97 |
-
action_detected = True
|
98 |
-
print("Raise Left Hand")
|
99 |
-
elif current_action == 'Wave Right Hand' and (right_wrist.x > right_shoulder.x and right_wrist.y < nose.y): # Condition for waving, adjusted to be further from shoulder
|
100 |
-
action_detected = True
|
101 |
-
print("Wave Right Hand")
|
102 |
-
elif current_action == 'Wave Left Hand' and (left_wrist.x < left_shoulder.x and left_wrist.y < nose.y): # Condition for waving, adjusted to be further from shoulder
|
103 |
-
action_detected = True
|
104 |
-
print("Wave Left Hand")
|
105 |
-
elif current_action == 'Clap' and (abs(right_wrist.x - left_wrist.x) < 0.1 and right_wrist.y < left_wrist.y and left_wrist.y < right_shoulder.y): # Condition for clapping based on proximity
|
106 |
-
action_detected = True
|
107 |
-
print("Clap")
|
108 |
-
elif current_action == 'Touch shoulder Right' and (
|
109 |
-
(((right_pinky.x - right_shoulder.x) ** 2 + (right_pinky.y - right_shoulder.y) ** 2) ** 0.5 < near_shoulder_distance) or
|
110 |
-
(((right_index.x - right_shoulder.x) ** 2 + (right_index.y - right_shoulder.y) ** 2) ** 0.5 < near_shoulder_distance) or
|
111 |
-
(((right_thumb.x - right_shoulder.x) ** 2 + (right_thumb.y - right_shoulder.y) ** 2) ** 0.5 < near_shoulder_distance)): # Condition for touching shoulder
|
112 |
-
action_detected = True
|
113 |
-
print("Touch shoulder Right")
|
114 |
-
elif current_action == 'Touch shoulder Left' and (
|
115 |
-
(((left_pinky.x - left_shoulder.x) ** 2 + (left_pinky.y - left_shoulder.y) ** 2) ** 0.5 < near_shoulder_distance) or
|
116 |
-
(((left_index.x - left_shoulder.x) ** 2 + (left_index.y - left_shoulder.y) ** 2) ** 0.5 < near_shoulder_distance) or
|
117 |
-
(((left_thumb.x - left_shoulder.x) ** 2 + (left_thumb.y - left_shoulder.y) ** 2) ** 0.5 < near_shoulder_distance)): # Condition for touching shoulder
|
118 |
-
action_detected = True
|
119 |
-
print("Touch shoulder Left")
|
120 |
-
elif current_action == 'Tilt Head Right':
|
121 |
-
is_nose_near_right_shoulder = (abs(nose.x - right_shoulder.x) < near_shoulder_distance) and (abs(nose.y - right_shoulder.y) < near_shoulder_distance)
|
122 |
-
is_nose_below_left_shoulder = nose.y > left_shoulder.y
|
123 |
-
|
124 |
-
if is_nose_near_right_shoulder or is_nose_below_left_shoulder:
|
125 |
-
action_detected = True
|
126 |
-
print("Tilt Head Right")
|
127 |
-
|
128 |
-
elif current_action == 'Tilt Head Left':
|
129 |
-
is_nose_near_left_shoulder = (abs(nose.x - left_shoulder.x) < near_shoulder_distance) and (abs(nose.y - left_shoulder.y) < near_shoulder_distance)
|
130 |
-
is_nose_above_right_shoulder = nose.y > right_shoulder.y
|
131 |
-
|
132 |
-
if is_nose_near_left_shoulder or is_nose_above_right_shoulder:
|
133 |
-
action_detected = True
|
134 |
-
print("Tilt Head Left")
|
135 |
-
elif current_action == 'Spread Hands':
|
136 |
-
is_right_hand_spread = right_wrist.x < right_shoulder.x and abs(right_wrist.y - right_shoulder.y) < near_shoulder_distance
|
137 |
-
is_left_hand_spread = left_wrist.x > left_shoulder.x and abs(left_wrist.y - left_shoulder.y) < near_shoulder_distance
|
138 |
-
|
139 |
-
if is_right_hand_spread and is_left_hand_spread:
|
140 |
-
action_detected = True
|
141 |
-
print("Spread Hands")
|
142 |
-
elif current_action == 'Spin' and right_shoulder.x > left_shoulder.x: # Condition for spinning
|
143 |
-
action_detected = True
|
144 |
-
print("Spin")
|
145 |
-
elif current_action == 'Jump' and (right_hip.y < left_hip.y): # Condition for jumping
|
146 |
-
action_detected = True
|
147 |
-
print("Jump")
|
148 |
-
|
149 |
-
# Update action based on detection
|
150 |
-
if action_detected:
|
151 |
-
score += 1
|
152 |
-
text_to_display = f'{current_action} success!'
|
153 |
-
ChangeAction() # Call the new function
|
154 |
else:
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
210 |
else:
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
video_writer.write(frame)
|
222 |
-
|
223 |
-
pose.close()
|
224 |
-
cap.release()
|
225 |
-
if video_writer is not None:
|
226 |
-
video_writer.release() # Ensure the video writer is released if still open
|
227 |
-
cv2.destroyAllWindows()
|
|
|
1 |
+
"""Application to demo inpainting, Median and Bilateral Blur using streamlit.
|
2 |
+
|
3 |
+
Run using: streamlit run 10_04_image_restoration_app.py
|
4 |
+
"""
|
5 |
+
|
6 |
+
import streamlit as st
|
7 |
+
import pathlib
|
8 |
+
from streamlit_drawable_canvas import st_canvas
|
9 |
import cv2
|
10 |
+
import numpy as np
|
11 |
+
import io
|
12 |
+
import base64
|
13 |
+
from PIL import Image
|
14 |
+
|
15 |
+
|
16 |
+
# Function to create a download link for output image
|
17 |
+
def get_image_download_link(img, filename, text):
|
18 |
+
"""Generates a link to download a particular image file."""
|
19 |
+
buffered = io.BytesIO()
|
20 |
+
img.save(buffered, format='JPEG')
|
21 |
+
img_str = base64.b64encode(buffered.getvalue()).decode()
|
22 |
+
href = f'<a href="data:file/txt;base64,{img_str}" download="{filename}">{text}</a>'
|
23 |
+
return href
|
24 |
+
|
25 |
+
|
26 |
+
# Set title.
|
27 |
+
st.sidebar.title('Image Restoration')
|
28 |
+
|
29 |
+
|
30 |
+
# Specify canvas parameters in application
|
31 |
+
uploaded_file = st.sidebar.file_uploader("Upload Image to restore OK:", type=["png", "jpg"])
|
32 |
+
image = None
|
33 |
+
res = None
|
34 |
+
|
35 |
+
if uploaded_file is not None:
|
36 |
+
# Debug: Print uploaded file information
|
37 |
+
# st.write("Uploaded file:", uploaded_file.name)
|
38 |
+
|
39 |
+
# Convert the file to an opencv image.
|
40 |
+
file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
|
41 |
+
image = cv2.imdecode(file_bytes, 1)
|
42 |
+
# Debug: Print image shape
|
43 |
+
# st.write("Image shape:", image.shape)
|
44 |
+
|
45 |
+
# Display the uploaded image immediately
|
46 |
+
# st.image(image[:,:,::-1], caption='Uploaded Image')
|
47 |
+
# Display a selection box for choosing the filter to apply.
|
48 |
+
option = st.sidebar.selectbox('Median or Bilateral Blur or Inpaint?', ('None', 'Median Blur', 'Bilateral Blur', 'Image Inpaint'))
|
49 |
+
|
50 |
+
if option == 'Median Blur':
|
51 |
+
ksize = st.sidebar.slider("ksize: ", 3, 15, 5, 2)
|
52 |
+
image = cv2.medianBlur(image, ksize)
|
53 |
+
res=image[:,:,::-1]
|
54 |
+
st.image(res)
|
55 |
+
elif option == 'Bilateral Blur':
|
56 |
+
dia = st.sidebar.slider("diameter: ", 1, 50, 20)
|
57 |
+
sigmaColor = st.sidebar.slider("sigmaColor: ", 0, 250, 200, 10)
|
58 |
+
sigmaSpace = st.sidebar.slider("sigmaSpace: ", 0, 250, 100, 10)
|
59 |
+
image = cv2.bilateralFilter(image, dia, sigmaColor, sigmaSpace)
|
60 |
+
res=image[:,:,::-1]
|
61 |
+
st.image(res)
|
62 |
+
|
63 |
+
elif option == 'Image Inpaint':
|
64 |
+
# Debug: Print selected option
|
65 |
+
# st.write("Selected option for inpainting:", option)
|
66 |
+
|
67 |
+
stroke_width = st.sidebar.slider("Stroke width: ", 1, 25, 5)
|
68 |
+
# st.write("Stroke width:", stroke_width) # Debug: Print stroke width
|
69 |
+
|
70 |
+
h, w = image.shape[:2]
|
71 |
+
# st.write("Original image dimensions (h, w):", h, w) # Debug: Print dimensions
|
72 |
+
if w > 800:
|
73 |
+
h_, w_ = int(h * 800 / w), 800
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
else:
|
75 |
+
h_, w_ = h, w
|
76 |
+
# st.write("Updated image dimensions (h_, w_):", h_, w_) # Debug: Print dimensions
|
77 |
+
|
78 |
+
# Create a canvas component.
|
79 |
+
canvas_result = st_canvas(
|
80 |
+
fill_color='white',
|
81 |
+
stroke_width=stroke_width,
|
82 |
+
stroke_color='black',
|
83 |
+
background_image=Image.open(uploaded_file).resize((h_, w_)),
|
84 |
+
update_streamlit=True,
|
85 |
+
height=h_,
|
86 |
+
width=w_,
|
87 |
+
drawing_mode='freedraw',
|
88 |
+
key="canvas",
|
89 |
+
)
|
90 |
+
|
91 |
+
# Debug: Print canvas result
|
92 |
+
# st.write("Canvas result:", canvas_result)
|
93 |
+
|
94 |
+
stroke = canvas_result.image_data
|
95 |
+
if stroke is not None:
|
96 |
+
# Debug: Print stroke data
|
97 |
+
# st.write("Stroke data shape:", stroke.shape)
|
98 |
+
|
99 |
+
if st.sidebar.checkbox('show mask'):
|
100 |
+
st.image(stroke)
|
101 |
+
|
102 |
+
mask = cv2.split(stroke)[3]
|
103 |
+
mask = np.uint8(mask)
|
104 |
+
mask = cv2.resize(mask, (w, h))
|
105 |
+
# Debug: Print mask shape
|
106 |
+
# st.write("Mask shape:", mask.shape)
|
107 |
+
|
108 |
+
st.sidebar.caption('Happy with the selection?')
|
109 |
+
option = st.sidebar.selectbox('Mode', ['None', 'Telea', 'NS', 'Compare both'])
|
110 |
+
|
111 |
+
if option == 'Telea':
|
112 |
+
st.subheader('Result of Telea')
|
113 |
+
res = cv2.inpaint(src=image, inpaintMask=mask, inpaintRadius=3, flags=cv2.INPAINT_TELEA)[:,:,::-1]
|
114 |
+
st.image(res)
|
115 |
+
# Debug: Print result shape
|
116 |
+
# st.write("Telea result shape:", res.shape)
|
117 |
+
elif option == 'Compare both':
|
118 |
+
col1, col2 = st.columns(2)
|
119 |
+
res1 = cv2.inpaint(src=image, inpaintMask=mask, inpaintRadius=3, flags=cv2.INPAINT_TELEA)[:,:,::-1]
|
120 |
+
res2 = cv2.inpaint(src=image, inpaintMask=mask, inpaintRadius=3, flags=cv2.INPAINT_NS)[:,:,::-1]
|
121 |
+
with col1:
|
122 |
+
st.subheader('Result of Telea')
|
123 |
+
st.image(res1)
|
124 |
+
with col2:
|
125 |
+
st.subheader('Result of NS')
|
126 |
+
st.image(res2)
|
127 |
+
if res1 is not None:
|
128 |
+
# Display link.
|
129 |
+
result1 = Image.fromarray(res1)
|
130 |
+
st.sidebar.markdown(
|
131 |
+
get_image_download_link(result1, 'telea.png', 'Download Output of Telea'),
|
132 |
+
unsafe_allow_html=True)
|
133 |
+
if res2 is not None:
|
134 |
+
# Display link.
|
135 |
+
result2 = Image.fromarray(res2)
|
136 |
+
st.sidebar.markdown(
|
137 |
+
get_image_download_link(result2, 'ns.png', 'Download Output of NS'),
|
138 |
+
unsafe_allow_html=True)
|
139 |
+
|
140 |
+
elif option == 'NS':
|
141 |
+
st.subheader('Result of NS')
|
142 |
+
res = cv2.inpaint(src=image, inpaintMask=mask, inpaintRadius=3, flags=cv2.INPAINT_NS)[:,:,::-1]
|
143 |
+
st.image(res)
|
144 |
else:
|
145 |
+
pass
|
146 |
+
|
147 |
+
if res is not None:
|
148 |
+
# Debug: Print final result shape
|
149 |
+
# st.write("Final result shape:", res.shape)
|
150 |
+
# Display link.
|
151 |
+
result = Image.fromarray(res)
|
152 |
+
st.sidebar.markdown(
|
153 |
+
get_image_download_link(result, 'output.png', 'Download Output'),
|
154 |
+
unsafe_allow_html=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
CHANGED
@@ -1,6 +1,5 @@
|
|
1 |
-
# opencv-python
|
2 |
numpy
|
3 |
streamlit
|
4 |
opencv-python-headless
|
5 |
pillow
|
6 |
-
|
|
|
|
|
1 |
numpy
|
2 |
streamlit
|
3 |
opencv-python-headless
|
4 |
pillow
|
5 |
+
streamlit_drawable_canvas
|