Spaces:
Sleeping
Sleeping
File size: 1,632 Bytes
a71996e de74aee 134848b 174826a e365c96 de74aee 58debd5 de74aee 134848b 0b91e55 de74aee 2407c06 de74aee a00dc8b 6714a55 2010974 de74aee 2010974 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
import gradio as gr
from transformers import DetrImageProcessor, DetrForObjectDetection
import torch
import supervision as sv
import json
import requests
from PIL import Image
import numpy as np
image_processor = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50")
model = DetrForObjectDetection.from_pretrained("Guy2/AirportSec-150epoch")
# model = DetrForObjectDetection.from_pretrained("taroii/airport-security")
id2label = {0: 'dangerous-items', 1: 'Gun', 2: 'Knife', 3: 'Pliers', 4: 'Scissors', 5: 'Wrench'}
def anylize(url):
image = Image.open(requests.get(url, stream=True).raw)
image = image.convert('RGB')
with torch.no_grad():
inputs = image_processor(images=image, return_tensors='pt')
outputs = model(**inputs)
image = np.array(image)
target_sizes = torch.tensor([image.shape[:2]])
results = image_processor.post_process_object_detection(
outputs=outputs,
threshold=0.8,
target_sizes=target_sizes
)[0]
# annotate
detections = sv.Detections.from_transformers(transformers_results=results).with_nms(threshold=0.5)
# labels = [str([list(xyxy), confidence, id2label[class_id]]) for xyxy, _, confidence, class_id, _ in detections]
labels = [[list(xyxy), confidence, id2label[class_id]] for xyxy, _, confidence, class_id, _ in detections]
print(labels)
return str([image.shape[:2], labels])
# json_list = json.dumps(labels)
# return json_list
gr.Interface(fn = anylize, inputs="text", outputs="text").launch()
# gr.Interface(fn = anylize, inputs="text", outputs="image").launch()
|