eeshawn
update app.py
9f1ec44
import gradio as gr
import torch
from ultralyticsplus import YOLO, render_result
def seal_detection(
image,
conf_threshold,
):
"""
Object detection with YOLOv8 model, detecting basic Naruto hand seals.
Args:
image: Input image
conf_threshold: Confidence threshold
Returns:
Rendered image
"""
results = yolo_model.predict(image, conf=conf_threshold)
render = render_result(model=yolo_model, image=image, result=results[0])
return render
def clear():
"""
Reset inputs.
"""
image_update = gr.Image.update(value=None)
conf_update = gr.Slider.update(value=0.5)
return image_update, conf_update, image_update
with gr.Blocks() as demo:
gr.Markdown("# Naruto Hand Seal Detection with YOLOv8")
with gr.Accordion("README", open=False):
gr.Markdown(
"""
### Introduction
As a data science practitioner and a fan of Japanese manga, I was eager to apply my skills to a project that combined these interests. Among my favourite animes from my childhood, I decided to develop a computer vision model that could detect hand seals from the **Naruto** anime.
Hand seals are an integral part of the Naruto universe, used by characters to activate powerful techniques. There are twelve basic seals, each named after an animal in the Chinese Zodiac, and different sequences of hand seals are required for different techniques.
As a fan of the series, I knew that accurately detecting and classifying hand seals would be a difficult but rewarding challenge, and I was excited to tackle it using my expertise in machine learning and computer vision. One key challenge to overcome would be the lack of a good dataset of labelled images for training, so I had to develop my own. Besides capturing images of myself performing the seals, I augmented my dataset with YouTube screenshots consisting of both real persons and anime characters performing the seals.
### Problem Statement
The challenge was to develop a model that could accurately identify the hand seal being performed.
In this project, I leveraged transfer learning from the <a href="https://github.com/ultralytics/ultralytics" target="_blank">YOLOv8</a> model to customize an object detection model specifically for the hand seals. Developed by the Ultralytics team, YOLOv8 is the latest addition to the YOLO family and offers high performance while being easy to train and use.
"""
)
with gr.Row():
with gr.Column():
image = gr.Image(source="upload", type="pil", label="Image Upload", interactive=True),
slider = gr.Slider(minimum=0.05, maximum=1.0, value=0.5, step=0.05, label="Confidence Threshold"),
with gr.Row():
clear_form = gr.Button("Reset")
submit = gr.Button("Predict")
with gr.Column():
outputs = gr.Image(type="filepath", label="Output Image", interactive=False)
gr.Markdown(
"""
<p style="text-align:center">
Happy to connect on <a href="https://www.linkedin.com/in/shawn-sing/" target="_blank">LinkedIn</a> or visit my <a href="https://github.com/eeshawn11/" target="_blank">GitHub</a> to check out my other projects.
"""
)
clear_form.click(fn=clear, inputs=None, outputs=[image[0], slider[0], outputs], show_progress=False)
submit.click(fn=seal_detection, inputs=[image[0], slider[0]], outputs=outputs)
if __name__ == "__main__":
yolo_model = YOLO('eeshawn11/naruto_hand_seal_detection')
yolo_model.overrides['max_det'] = 20
device = 'cuda' if torch.cuda.is_available() else 'cpu'
yolo_model.to(device)
demo.queue(api_open=False, max_size=10)
demo.launch()