Spaces:
Runtime error
Runtime error
File size: 1,561 Bytes
c541fae eef2251 c541fae eef2251 c541fae |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
from io import BytesIO
from fastapi import FastAPI, Response, status, UploadFile
from torchvision.io import read_image
from torchvision.models.detection import (FasterRCNN_ResNet50_FPN_V2_Weights,
fasterrcnn_resnet50_fpn_v2)
from torchvision.transforms.v2.functional import to_pil_image
from torchvision.utils import draw_bounding_boxes
from PIL import Image
app = FastAPI(docs_url='/', title='Test PyTorch COCO Object Detection')
# Step 1: Initialize model with the best available weights
weights = FasterRCNN_ResNet50_FPN_V2_Weights.DEFAULT
model = fasterrcnn_resnet50_fpn_v2(weights=weights, box_score_thresh=0.9)
model.eval()
# Step 2: Initialize the inference transforms
preprocess = weights.transforms()
@app.get('/healthcheck')
async def healthcheck():
return Response(status_code=status.HTTP_200_OK)
@app.post('/detectObjectsFromURL')
async def infer(image: UploadFile):
img = read_image(image.filename)
batch = [preprocess(img)]
# Step 4: Use the model and visualize the prediction
prediction = model(batch)[0]
labels = [weights.meta["categories"][i] for i in prediction["labels"]]
box = draw_bounding_boxes(img, boxes=prediction["boxes"],
labels=labels,
colors="red",
width=4, font_size=30)
im: Image.Image
im = to_pil_image(box.detach())
with BytesIO() as bio:
im.save(bio, format='PNG')
return Response(content=bio.getvalue(), media_type='image/png')
|