import gradio as gr import torch from PIL import Image import json # Images torch.hub.download_url_to_file( 'https://iiif.dl.itc.u-tokyo.ac.jp/iiif/genji/TIFF/A00_6587/01/01_0004.tif/full/1024,/0/default.jpg', '『源氏物語』(東京大学総合図書館所蔵).jpg') torch.hub.download_url_to_file( 'https://rmda.kulib.kyoto-u.ac.jp/iiif/RB00007030/01/RB00007030_00003_0.ptif/full/1024,/0/default.jpg', '『源氏物語』(京都大学所蔵).jpg') torch.hub.download_url_to_file( 'https://kotenseki.nijl.ac.jp/api/iiif/100312034/v4/HRSM/HRSM-00396/HRSM-00396-00012.tif/full/1024,/0/default.jpg', '『平家物語』(国文学研究資料館提供).jpg') # Model # model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # force_reload=True to update model = torch.hub.load('ultralytics/yolov5', 'custom', path='best.pt', source="local") def yolo(im, size=1024): g = (size / max(im.size)) # gain im = im.resize((int(x * g) for x in im.size), resample=Image.Resampling.LANCZOS) # resize results = model(im) # inference results.render() # updates results.imgs with boxes and labels df = results.pandas().xyxy[0].to_json(orient="records") res = json.loads(df) return [ Image.fromarray(results.imgs[0]), res ] inputs = gr.inputs.Image(type='pil', label="Original Image") outputs = [ gr.outputs.Image(type="pil", label="Output Image"), gr.outputs.JSON(label="Output JSON") ] title = "YOLOv5 NDL-DocL Datasets" description = "YOLOv5 NDL-DocL Datasets Gradio demo for object detection. Upload an image or click an example image to use." article = "

YOLOv5 NDL-DocL Datasets is an object detection model trained on the NDL-DocL Datasets.

" examples = [['『源氏物語』(東京大学総合図書館所蔵).jpg'], ['『源氏物語』(京都大学所蔵).jpg'], ['『平家物語』(国文学研究資料館提供).jpg']] gr.Interface(yolo, inputs, outputs, title=title, description=description, article=article, examples=examples, theme="huggingface").launch(enable_queue=True) # cache_examples=True,