Spaces:
Running
Running
File size: 1,453 Bytes
dbdbddf 1abadf7 dbdbddf 1abadf7 56b3978 1abadf7 dbdbddf 1abadf7 1a4398d 8b21536 1abadf7 dbdbddf 1abadf7 dbdbddf 1abadf7 1a4398d fe10d73 1abadf7 fe10d73 dbdbddf 1abadf7 fe10d73 1a4398d fe10d73 3d45b3a 56ebfe0 fe10d73 dbdbddf 1abadf7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 |
import os
from doctr.io import DocumentFile
from doctr.models import ocr_predictor, from_hub
import gradio as gr
os.environ['USE_TORCH'] = '1'
reco_model = from_hub('ayymen/crnn_mobilenet_v3_large_tifinagh')
predictor = ocr_predictor(reco_arch=reco_model, pretrained=True)
title = "Tifinagh OCR"
description = "Upload an image to get the OCR results!"
def ocr(img):
img.save("out.jpg")
doc = DocumentFile.from_images("out.jpg")
output = predictor(doc)
res = ""
for obj in output.pages:
for obj1 in obj.blocks:
for obj2 in obj1.lines:
for obj3 in obj2.words:
res=res + " " + obj3.value
res = res + "\n"
res = res + "\n\n"
_output_name = "RESULT_OCR.txt"
open(_output_name, 'w', encoding="utf-8").close() # clear file
with open(_output_name, "w", encoding="utf-8", errors="ignore") as f:
f.write(res)
print("Writing into file")
return res, _output_name
demo = gr.Interface(fn=ocr,
inputs=gr.Image(type="pil"),
outputs=[
gr.Textbox(lines=10, label="Full Text"),
gr.File(label="Download OCR Results")
],
title=title,
description=description,
examples=[["Examples/1.jpg"],["Examples/2.jpg"],["Examples/3.png"]]
)
demo.launch(debug=True)
|