from deepsparse import Pipeline import time import gradio as gr from PIL import Image import numpy from annotate import * markdownn = ''' # 🏞 YOLACT Image segmentation Pipeline with DeepSparse Image segmentation also referred to as semantic segmentation, is the task of assigning a label to each pixel in an image. In semantic segmentation, the label map represents the predicted category for each pixel. ![Fruit Segmentation](https://huggingface.co/spaces/neuralmagic/cv-yolact/resolve/main/seg.png) ### What is DeepSparse? DeepSparse is sparsity-aware inference runtime offering GPU-class performance on CPUs and APIs to integrate ML into your application. DeepSparse provides sparsified pipelines for computer vision and NLP. The pipelines are similar to Hugging Face pipelines but are faster because they have been pruned and quantized. ### What is YOLACT? YOLACT stands for "You Only Look At Coefficients." YOLACT was one of the first methods to do instance segmentation in real-time. YOLACT is an extension of the popular YOLO (You Only Look Once) algorithm. The YOLACT pipeline enables real-time object detection. Here is sample code for an YOLACT image segmentation pipeline: ``` from deepsparse import Pipeline pipeline = Pipeline.create(task='yolo',model_path="zoo:cv/detection/yolov5-l/pytorch/ultralytics/coco/pruned_quant-aggressive_95",class_names=None, model_config=None, ) inference = pipeline(image) print(inference) ``` ### Example image segmentation use case An example use case for image segmentation is in the sorting and packing of fruits. Accurate detection and segmentation can help to improve quality and lower inspection costs. Such tasks need real-time detection. This can be achieved by acquiring expensive compute resources such as GPUs. What if the same can be achieved using CPUs? Sparsified and quantized YOLACT models enable you to achieve GPU-class performance on commodity CPUs. Here's an illustration of the YOLACT model being used for segmentation on a 4-core laptop. ![Fruit Segmentation](https://huggingface.co/spaces/neuralmagic/cv-yolact/resolve/main/results.gif) ### Useful Resources [Real-time Instance Segmentation with Sparse YOLACT on a Laptop](https://neuralmagic.com/blog/) ''' task = "yolo" dense_pipeline = Pipeline.create( task=task, model_path="zoo:cv/detection/yolov5-l/pytorch/ultralytics/coco/base-none", class_names='coco', # if using custom model, pass in a list of classes the model will clasify or a path to a json file containing them model_config=None, # if using custom model, pass in the path to a local model config file here ) sparse_pipeline = Pipeline.create( task=task, model_path="zoo:cv/detection/yolov5-l/pytorch/ultralytics/coco/pruned_quant-aggressive_95", class_names='coco', # if using custom model, pass in a list of classes the model will clasify or a path to a json file containing them model_config=None, # if using custom model, pass in the path to a local model config file here ) def run_pipeline(image): dense_start = time.perf_counter() dense_output = dense_pipeline(images=[image], confidence_threshold=0.2, nms_threshold=0.5) dense_annotation = annotate_image(image=image, prediction=dense_output) dense_result = Image.fromarray(dense_annotation) dense_end = time.perf_counter() dense_duration = (dense_end - dense_start) * 1000.0 sparse_start = time.perf_counter() sparse_output = sparse_pipeline(images=[image], confidence_threshold=0.2, nms_threshold=0.5) sparse_annotation = annotate_image(image=image, prediction=sparse_output) sparse_result = Image.fromarray(sparse_annotation) sparse_end = time.perf_counter() sparse_duration = (sparse_end - sparse_start) * 1000.0 return sparse_result, sparse_duration, dense_result, dense_duration with gr.Blocks() as demo: with gr.Row(): with gr.Column(): gr.Markdown(markdownn) with gr.Column(): gr.Markdown(""" ### 🔥 YOLACT Image Segmentation demo Using [dbolya/yolact](https://sparsezoo.neuralmagic.com/models/cv%2Fsegmentation%2Fyolact-darknet53%2Fpytorch%2Fdbolya%2Fcoco%2Fpruned82_quant-none) """) image = gr.Image() btn = gr.Button("Submit") gr.Examples([["Fruits.png"]],inputs=[image],) dense_answers = gr.Image(label="Dense model answer") dense_duration = gr.Number(label="Dense Latency (ms):") sparse_answers = gr.Image(label="Sparse model answers") sparse_duration = gr.Number(label="Sparse Latency (ms):") btn.click( run_pipeline, inputs=[image], outputs=[sparse_answers, sparse_duration, dense_answers, dense_duration], ) if __name__ == "__main__": demo.launch()