Spaces:
Runtime error
Runtime error
alexrods
commited on
Commit
•
54a96ff
1
Parent(s):
69cb9ce
fix error in app.py
Browse files
app.py
CHANGED
@@ -5,11 +5,6 @@ from PIL import Image
|
|
5 |
from huggingface_hub import from_pretrained_keras
|
6 |
import cv2
|
7 |
|
8 |
-
st.header("Segmentacion de partes del cuerpo humano")
|
9 |
-
|
10 |
-
st.markdown("Sube una imagen o selecciona un ejemplo para segmentar las distintas partes del cuerpo humano")
|
11 |
-
|
12 |
-
file_imagen = st.file_uploader("Sube aqui tu imagen", type=["png", "jpg", "jpeg"])
|
13 |
|
14 |
model = from_pretrained_keras("keras-io/deeplabv3p-resnet50")
|
15 |
|
@@ -27,12 +22,14 @@ def read_image(image):
|
|
27 |
image = image / 127.5 - 1
|
28 |
return image
|
29 |
|
|
|
30 |
def infer(model, image_tensor):
|
31 |
predictions = model.predict(np.expand_dims((image_tensor), axis=0))
|
32 |
predictions = np.squeeze(predictions)
|
33 |
predictions = np.argmax(predictions, axis=2)
|
34 |
return predictions
|
35 |
|
|
|
36 |
def decode_segmentation_masks(mask, colormap, n_classes):
|
37 |
r = np.zeros_like(mask).astype(np.uint8)
|
38 |
g = np.zeros_like(mask).astype(np.uint8)
|
@@ -45,12 +42,14 @@ def decode_segmentation_masks(mask, colormap, n_classes):
|
|
45 |
rgb = np.stack([r, g, b], axis=2)
|
46 |
return rgb
|
47 |
|
|
|
48 |
def get_overlay(image, colored_mask):
|
49 |
image = tf.keras.preprocessing.image.array_to_img(image)
|
50 |
image = np.array(image).astype(np.uint8)
|
51 |
overlay = cv2.addWeighted(image, 0.35, colored_mask, 0.65, 0)
|
52 |
return overlay
|
53 |
|
|
|
54 |
def segmentation(input_image):
|
55 |
image_tensor = read_image(input_image)
|
56 |
prediction_mask = infer(image_tensor=image_tensor, model=model)
|
@@ -58,10 +57,16 @@ def segmentation(input_image):
|
|
58 |
overlay = get_overlay(image_tensor, prediction_colormap)
|
59 |
return (overlay, prediction_colormap)
|
60 |
|
|
|
61 |
# i = gr.inputs.Image()
|
62 |
# o = [gr.outputs.Image('pil'), gr.outputs.Image('pil')]
|
|
|
|
|
|
|
63 |
|
64 |
-
|
|
|
|
|
65 |
|
66 |
col1, col2, col3 = st.columns(3)
|
67 |
with col1:
|
@@ -82,6 +87,9 @@ with col3:
|
|
82 |
if st.button("Corre ejemplo 1"):
|
83 |
file_imagen = examples[2]
|
84 |
|
|
|
|
|
|
|
85 |
article = "<div style='text-align: center;'><a href='https://keras.io/examples/vision/deeplabv3_plus/' target='_blank'>Keras example by Praveen Kaushik</a></div>"
|
86 |
# gr.Interface(segmentation, i, o, examples=examples, allow_flagging=False, analytics_enabled=False,
|
87 |
# title=title, description=description, article=article).launch(enable_queue=True)
|
|
|
5 |
from huggingface_hub import from_pretrained_keras
|
6 |
import cv2
|
7 |
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
model = from_pretrained_keras("keras-io/deeplabv3p-resnet50")
|
10 |
|
|
|
22 |
image = image / 127.5 - 1
|
23 |
return image
|
24 |
|
25 |
+
|
26 |
def infer(model, image_tensor):
|
27 |
predictions = model.predict(np.expand_dims((image_tensor), axis=0))
|
28 |
predictions = np.squeeze(predictions)
|
29 |
predictions = np.argmax(predictions, axis=2)
|
30 |
return predictions
|
31 |
|
32 |
+
|
33 |
def decode_segmentation_masks(mask, colormap, n_classes):
|
34 |
r = np.zeros_like(mask).astype(np.uint8)
|
35 |
g = np.zeros_like(mask).astype(np.uint8)
|
|
|
42 |
rgb = np.stack([r, g, b], axis=2)
|
43 |
return rgb
|
44 |
|
45 |
+
|
46 |
def get_overlay(image, colored_mask):
|
47 |
image = tf.keras.preprocessing.image.array_to_img(image)
|
48 |
image = np.array(image).astype(np.uint8)
|
49 |
overlay = cv2.addWeighted(image, 0.35, colored_mask, 0.65, 0)
|
50 |
return overlay
|
51 |
|
52 |
+
|
53 |
def segmentation(input_image):
|
54 |
image_tensor = read_image(input_image)
|
55 |
prediction_mask = infer(image_tensor=image_tensor, model=model)
|
|
|
57 |
overlay = get_overlay(image_tensor, prediction_colormap)
|
58 |
return (overlay, prediction_colormap)
|
59 |
|
60 |
+
|
61 |
# i = gr.inputs.Image()
|
62 |
# o = [gr.outputs.Image('pil'), gr.outputs.Image('pil')]
|
63 |
+
st.header("Segmentacion de partes del cuerpo humano")
|
64 |
+
|
65 |
+
st.markdown("Sube una imagen o selecciona un ejemplo para segmentar las distintas partes del cuerpo humano")
|
66 |
|
67 |
+
file_imagen = st.file_uploader("Sube aqui tu imagen", type=["png", "jpg", "jpeg"])
|
68 |
+
|
69 |
+
examples = ["example_image_1.jpg", "example_image_2.jpg", "example_image_3.jpg"]
|
70 |
|
71 |
col1, col2, col3 = st.columns(3)
|
72 |
with col1:
|
|
|
87 |
if st.button("Corre ejemplo 1"):
|
88 |
file_imagen = examples[2]
|
89 |
|
90 |
+
# if archivo_imagen is not None:
|
91 |
+
|
92 |
+
|
93 |
article = "<div style='text-align: center;'><a href='https://keras.io/examples/vision/deeplabv3_plus/' target='_blank'>Keras example by Praveen Kaushik</a></div>"
|
94 |
# gr.Interface(segmentation, i, o, examples=examples, allow_flagging=False, analytics_enabled=False,
|
95 |
# title=title, description=description, article=article).launch(enable_queue=True)
|