Spaces:
Runtime error
Runtime error
File size: 887 Bytes
339d133 247cca2 4d78218 339d133 247cca2 03a517d 247cca2 339d133 247cca2 03a517d 247cca2 339d133 247cca2 339d133 247cca2 339d133 247cca2 03a517d 247cca2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 |
import torch
import gradio as gr
from torchvision.transforms import functional as F
from PIL import Image
# Load the YOLOv8 model (assuming it is already converted to the Hugging Face format)
model = torch.hub.load('ultralytics/yolov8', 'custom', path='yolov5s.pt')
# Define the prediction function
def predict(image):
# Preprocess the input image
image_tensor = F.to_tensor(image)
image_tensor.unsqueeze_(0)
# Perform inference
results = model(image_tensor)
# Post-process the results
# Extract the bounding box coordinates and class labels
bboxes = results.xyxy[0].tolist()
labels = results.names[0]
return bboxes, labels
# Define the Gradio interface
inputs = gr.inputs.Image()
outputs = gr.outputs.Image()
interface = gr.Interface(fn=predict, inputs=inputs, outputs=outputs, capture_session=True)
# Run the interface
interface.launch() |