File size: 3,943 Bytes
e47538b
a434c81
 
e47538b
 
 
 
 
 
 
4c5b48d
4dfb513
e47538b
747c9ad
 
e47538b
747c9ad
 
 
 
 
 
 
1796d49
747c9ad
 
4dfb513
 
4f82f81
 
 
4dfb513
 
 
27d81b4
4dfb513
 
1e988ab
4dfb513
4f82f81
791261d
4dfb513
 
a434c81
e47538b
 
 
 
 
 
 
a434c81
4c5b48d
0226999
6a34e60
 
e47538b
cf6b359
e47538b
347d930
e47538b
fe1f44f
 
 
 
 
 
 
c5a6d21
fe1f44f
c5a6d21
fe1f44f
c5a6d21
fe1f44f
c5a6d21
e47538b
1e988ab
 
 
250e824
b2a4c2e
747c9ad
347d930
 
5ff27bf
56cdfa2
e47538b
 
174240c
e47538b
 
 
 
 
 
 
 
 
 
154403f
5ff27bf
e47538b
 
 
 
b0e9721
e47538b
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
import pandas as pd
from transformers import pipeline

import PIL
from PIL import Image
from PIL import ImageDraw
import gradio as gr
import torch
import easyocr
import omegaconf
import cv2
import json

from vietocr.vietocr.tool.predictor import Predictor
from vietocr.vietocr.tool.config import Cfg

# Configure of VietOCR
config = Cfg.load_config_from_name('vgg_transformer')
# config = Cfg.load_config_from_file('vietocr/config.yml')
# config['weights'] = '/Users/bmd1905/Desktop/pretrain_ocr/vi00_vi01_transformer.pth'

config['cnn']['pretrained'] = True
config['predictor']['beamsearch'] = True
config['device'] = 'cpu' # mps

recognitor = Predictor(config)
model_name = "microsoft/xdoc-base-squad2.0"
nlp = pipeline('question-answering', model=model_name)

def query(doc, labels):
    questions = labels.split(", ")
    result={}
    for question in questions:
        QA_input = {
                    'question': question+'?',
                    'context': doc
                }
        
        res= nlp(QA_input)
        print(res)
        value = res['answer']
        result[question]=value
    return result
    
def draw_boxes(image, bounds, color='yellow', width=2):
    draw = ImageDraw.Draw(image)
    for bound in bounds:
        p0, p1, p2, p3 = bound[0]
        draw.line([*p0, *p1, *p2, *p3, *p0], fill=color, width=width)
    return image

def inference(filepath, lang, labels):
    img = cv2.imread(filepath)
    width, height, _ = img.shape
    if width>height:
        height, width, _ = img.shape
    reader = easyocr.Reader(lang)
    bounds = reader.readtext(filepath)
    new_bounds=[]
    texts=''
    for (bbox, text, prob) in bounds:
        (tl, tr, br, bl) = bbox
        tl = (int(tl[0]), int(tl[1]))
        tr = (int(tr[0]), int(tr[1]))
        br = (int(br[0]), int(br[1]))
        bl = (int(bl[0]), int(bl[1]))

        min_x = min(tl[0], tr[0], br[0], bl[0])
        min_x = max(0, min_x)
        max_x = max(tl[0], tr[0], br[0], bl[0])
        max_x = min(width-1, max_x)
        min_y = min(tl[1], tr[1], br[1], bl[1])
        min_y = max(0, min_y)
        max_y = max(tl[1], tr[1], br[1], bl[1])
        max_y = min(height-1, max_y)
        # crop the region of interest (ROI)



        cropped_image = img[min_y:max_y,min_x:max_x] # crop the image
        cropped_image = Image.fromarray(cropped_image)
        out = recognitor.predict(cropped_image)
        texts = texts + '\t' + out
    result = query(texts, labels)
    jsonText = json.dumps(result)
    im = PIL.Image.open(filepath)
    draw_boxes(im, bounds)
    im.save('result.jpg')
    return ['result.jpg', texts, jsonText]

title = 'EasyOCR'
description = 'Gradio demo for EasyOCR. EasyOCR demo supports 80+ languages.To use it, simply upload your image and choose a language from the dropdown menu, or click one of the examples to load them. Read more at the links below.'
article = "<p style='text-align: center'><a href='https://www.jaided.ai/easyocr/'>Ready-to-use OCR with 80+ supported languages and all popular writing scripts including Latin, Chinese, Arabic, Devanagari, Cyrillic and etc.</a> | <a href='https://github.com/JaidedAI/EasyOCR'>Github Repo</a></p>"
css = ".output_image, .input_image {height: 40rem !important; width: 100% !important;}"
choices = [
    "vi"
]
gr.Interface(
    inference,
    [gr.inputs.Image(type='filepath', label='Input'),gr.inputs.CheckboxGroup(choices, type="value", default=['vi'], label='language'), gr.inputs.Textbox(label='Labels',default='Người nα»™p, TiαΊΏp nhαΊ­n hα»“ sΖ‘ của')],
    [gr.outputs.Image(type='pil', label='Output'), gr.outputs.Textbox(label='Text'), gr.outputs.JSON(label='document')],
    title=title,
    description=description,
    article=article,
    css=css,
    examples=[['giaytiepnhan.jpg',['vi'],'Người nα»™p, TiαΊΏp nhαΊ­n hα»“ sΖ‘ của'],['giaytiepnhan2.jpg',['vi'],'MΓ£ sα»‘ hα»“ sΖ‘, Địa chỉ']],
    enable_queue=True
    ).launch(debug=True)