jqsu1's picture
Update app.py
7c2dca5
raw
history blame
5.07 kB
import gradio as gr
import kornia as K
from kornia.core import Tensor
def load_img(file):
# load the image using the rust backend
img_rgb: Tensor = K.io.load_image(file.name, K.io.ImageLoadType.RGB32)
img_rgb = img_rgb[None]
img_gray = K.color.rgb_to_grayscale(img_rgb)
return img_gray
def canny_edge_detector(file):
x_gray = load_img(file)
x_canny: Tensor = K.filters.canny(x_gray)[0]
img_out = 1.0 - x_canny.clamp(0.0, 1.0)
return K.utils.tensor_to_image(img_out)
def sobel_edge_detector(file):
x_gray = load_img(file)
x_sobel: Tensor = K.filters.sobel(x_gray)
img_out = 1.0 - x_sobel
return K.utils.tensor_to_image(img_out)
def simple_edge_detector(file, order, direction):
x_gray = load_img(file)
grads: Tensor = K.filters.spatial_gradient(
x_gray, order=order
) # BxCx2xHxW
grads_x = grads[:, :, 0]
grads_y = grads[:, :, 1]
if direction == "x":
img_out = 1.0 - grads_x.clamp(0.0, 1.0)
else:
img_out = 1.0 - grads_y.clamp(0.0, 1.0)
return K.utils.tensor_to_image(img_out)
def laplacian_edge_detector(file, kernel=9):
x_gray = load_img(file)
x_laplacian: Tensor = K.filters.laplacian(x_gray, kernel_size=kernel)
img_out = 1.0 - x_laplacian.clamp(0.0, 1.0)
return K.utils.tensor_to_image(img_out)
examples = [["examples/doraemon.png"], ["examples/kornia.png"]]
title = "Kornia Edge Detector"
description = "<p style='text-align: center'>This is a Gradio demo for Kornia's Edge Detector.</p><p style='text-align: center'>To use it, simply upload your image, or click one of the examples to load them, and use the sliders to enhance! Read more at the links at the bottom.</p>"
article = "<p style='text-align: center'><a href='https://kornia.readthedocs.io/en/latest/' target='_blank'>Kornia Docs</a> | <a href='https://github.com/kornia/kornia' target='_blank'>Kornia Github Repo</a> | <a href='https://kornia-tutorials.readthedocs.io/en/latest/image_enhancement.html' target='_blank'>Kornia Enhancements Tutorial</a></p>"
def change_layout(choice):
kernel = gr.update(visible=False)
order = gr.update(visible=False)
direction = gr.update(visible=False)
if choice == "Laplacian":
return [gr.update(value=3, visible=True), order, direction]
elif choice == "Simple":
return [
kernel,
gr.update(value=2, visible=True),
gr.update(value="x", visible=True),
]
return [kernel, order, direction]
def Detect(file, choice):
layout = change_layout(choice)
if choice == "Canny":
img = canny_edge_detector(file)
elif choice == "Sobel":
img = sobel_edge_detector(file)
elif choice == "Laplacian":
img = laplacian_edge_detector(file, 5)
else:
img = simple_edge_detector(file, 1, "x")
layout.extend([img])
return layout
def Detect_wo_layout(file, choice, kernel, order, direction):
if choice == "Canny":
img = canny_edge_detector(file)
elif choice == "Sobel":
img = sobel_edge_detector(file)
elif choice == "Laplacian":
img = laplacian_edge_detector(file, kernel)
else:
img = simple_edge_detector(file, order, direction)
return img
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
image_input = gr.Image(type="file")
kernel = gr.Slider(
minimum=1,
maximum=7,
step=2,
value=3,
label="kernel_size",
visible=False,
)
order = gr.Radio(
[1, 2], value=1, label="Derivative Order", visible=False
)
direction = gr.Radio(
["x", "y"],
value="x",
label="Derivative Direction",
visible=False,
)
radio = gr.Radio(
["Canny", "Simple", "Sobel", "Laplacian"],
value="Canny",
label="Type of Edge Detector",
)
with gr.Column():
image_output = gr.Image(shape=(256, 256))
gr.Examples(examples, inputs=[image_input])
radio.change(
fn=Detect,
inputs=[image_input, radio],
outputs=[kernel, order, direction, image_output],
)
kernel.change(
fn=Detect_wo_layout,
inputs=[image_input, radio, kernel, order, direction],
outputs=[image_output],
)
order.change(
fn=Detect_wo_layout,
inputs=[image_input, radio, kernel, order, direction],
outputs=[image_output],
)
direction.change(
fn=Detect_wo_layout,
inputs=[image_input, radio, kernel, order, direction],
outputs=[image_output],
)
image_input.change(
fn=Detect_wo_layout,
inputs=[image_input, radio, kernel, order, direction],
outputs=[image_output],
)
demo.launch()