# Copied megadetector section from https://huggingface.co/spaces/hlydecker/MegaDetector_v5 # %% #all imports import gradio as gr import torch import torchvision import numpy as np from PIL import Image # %% # Loads a model from github repo, but you need to have the model model = torch.hub.load('ultralytics/yolov5', 'custom', "/home/vic/vic_data/dlclive4mega/models/md_v5b.0.0.pt", force_reload=True) # %% #not sure if we need to resize... maybe just the origin image and see? """ def yolo(im): #size=640): g = (size / max(im.size)) # gain im = im.resize((int(x * g) for x in im.size), Image.ANTIALIAS) # resize model = torch.hub.load('ultralytics/yolov5', 'custom', "/home/vic/vic_data/dlclive4mega/models/md_v5b.0.0.pt", force_reload=True) results = model(im) # inference results.render() # updates results.imgs with boxes and labels return Image.fromarray(results.imgs[0]) """ def yolo(im): #size=640): model = torch.hub.load('ultralytics/yolov5', 'custom', "/home/vic/vic_data/dlclive4mega/models/md_v5b.0.0.pt", force_reload=True) results = model(im) # inference results.render() # updates results.imgs with boxes and labels return Image.fromarray(results.imgs[0]) # %% #inputs = [image, chosen_model, size] #this is where you show the image as interface inputs = gr.inputs.Image(type="pil", label="Input Image") outputs = gr.outputs.Image(type="pil", label="Output Image") # %% title = "MegaDetector with DeepLabCut pose estimation" description = "Detect and identify animals, people and vehicles in camera trap images followed by generating poses for humans and animals" article = "

Detect and identify animals, people and vehicles in camera trap images followed by generating poses for humans and animals

" # %% #running the actual gr.Interface(yolo, inputs, outputs, title=title, description=description, article=article, theme="huggingface").launch(enable_queue=True)