gwkrsrch's picture
Update app.py
81add38
"""
Donut
Copyright (c) 2022-present NAVER Corp.
MIT License
"""
import argparse
import gradio as gr
import torch
from donut import DonutModel
def demo_process(input_img):
global pretrained_model, task_prompt, task_name
output = pretrained_model.inference(image=input_img, prompt=task_prompt)["predictions"][0]
return output["text_sequence"].replace(" ", "") # temp
task_prompt = f"<s_kuzushiji>"
pretrained_model = DonutModel.from_pretrained("naver-clova-ix/donut-base-finetuned-kuzushiji")
pretrained_model.eval()
demo = gr.Interface(
fn=demo_process,
inputs= gr.inputs.Image(type="pil"),
outputs="text",
title=f"Donut 🍩 demonstration for Kuzushiji Decoding",
description="""This is a toy example for decoding kuzushiji (old Japanese cursive characters, γγšγ—ε­—) documents with a single E2E model, `Document Understanding Transformer` (Donut 🍩, ECCV-22). This particular model is fine-tuned on <a href="http://codh.rois.ac.jp/char-shape/">Kuzushiji Dataset</a>. To use it, simply upload a kuzushiji document image or use one of the examples below and click `Submit`. Results will show up in a few seconds.<br>* Note that this demo is running on a small resource environment, `basic CPU plan` (`2 vCPU, 16GiB RAM`).<br>* Demonstrations for other types of documents/tasks are available at https://github.com/clovaai/donut<br>
* More details of Donut are available at <a href="https://arxiv.org/abs/2111.15664">Paper</a>, <a href="https://github.com/clovaai/donut">GitHub</a>, and <a href="https://huggingface.co/docs/transformers/model_doc/donut">Huggingface πŸ€— Implementation Page</a>.<br>
* Kuzushiji Dataset is from <a href="http://codh.rois.ac.jp/char-shape/">Dataset Link</a> (Reference: γ€Žζ—₯ζœ¬ε€ε…Έη±γγšγ—ε­—γƒ‡γƒΌγ‚Ώγ‚»γƒƒγƒˆγ€οΌˆε›½ζ–‡η ”γ»γ‹ζ‰€θ”΅οΌCODH加ε·₯οΌ‰doi:10.20676/00000340).""",
examples=[["sample1.jpg"], ["sample2.jpg"]],
cache_examples=False,
)
demo.launch()