Spaces:
Running
Running
import gradio as gr | |
import torch | |
from PIL import Image | |
import gdown | |
''' | |
# a file | |
url = "https://drive.google.com/uc?id=1-ZIa4KsSjhup4Pep70uBvI4BjnSUbocX" | |
output = "best.pt" | |
gdown.download(url, output, quiet=False) | |
''' | |
# Images | |
torch.hub.download_url_to_file( | |
'https://iiif.dl.itc.u-tokyo.ac.jp/iiif/genji/TIFF/A00_6587/01/01_0004.tif/full/1024,/0/default.jpg', '『源氏物語』(東京大学総合図書館所蔵).jpg') | |
torch.hub.download_url_to_file( | |
'https://rmda.kulib.kyoto-u.ac.jp/iiif/RB00007030/01/RB00007030_00003_0.ptif/full/1024,/0/default.jpg', '『源氏物語』(京都大学所蔵).jpg') | |
torch.hub.download_url_to_file( | |
'https://kotenseki.nijl.ac.jp/api/iiif/100312034/v4/HRSM/HRSM-00396/HRSM-00396-00012.tif/full/1024,/0/default.jpg', '『平家物語』(国文学研究資料館提供).jpg') | |
# Model | |
# model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # force_reload=True to update | |
model = torch.hub.load('ultralytics/yolov5', 'custom', path='best.pt', source="local") | |
def yolo(im, size=1024): | |
g = (size / max(im.size)) # gain | |
im = im.resize((int(x * g) for x in im.size), Image.ANTIALIAS) # resize | |
results = model(im) # inference | |
results.render() # updates results.imgs with boxes and labels | |
return Image.fromarray(results.imgs[0]) | |
inputs = gr.inputs.Image(type='pil', label="Original Image") | |
outputs = gr.outputs.Image(type="pil", label="Output Image") | |
title = "YOLOv5 Character" | |
description = "YOLOv5 Character Gradio demo for object detection. Upload an image or click an example image to use." | |
article = "<p style='text-align: center'>YOLOv5 Character is an object detection model trained on the <a href=\"http://codh.rois.ac.jp/char-shape/\">日本古典籍くずし字データセット</a>.</p>" | |
examples = [['『源氏物語』(東京大学総合図書館所蔵).jpg'], ['『源氏物語』(京都大学所蔵).jpg'], ['『平家物語』(国文学研究資料館提供).jpg']] | |
gr.Interface(yolo, inputs, outputs, title=title, description=description, article=article, examples=examples, theme="huggingface").launch(enable_queue=True) # cache_examples=True, |