File size: 1,934 Bytes
6610027
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89e72c9
6610027
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import gradio as gr
import openvino as ov
from pathlib import Path
import numpy as np
from PIL import Image
import cv2

from src.config import (
    DICT_DIR,
    IMAGE_TYPES,
    IMAGE_EXAMPLE,
    MODEL_DIR,
    DEVICE,
)

from src.image_processing import recognize

# Load models
core = ov.Core()
model = core.read_model(model=Path(MODEL_DIR))
print("[INFO] Loaded recognition model")

# Select device (CPU or GPU)
compiled_model = core.compile_model(model=model, device_name=DEVICE)

# Fetch Information About Input and Output Layers
recognition_input_layer = compiled_model.input(0)
recognition_output_layer = compiled_model.output(0)

print("[INFO] Fetched recognition model")

# In JA model, there should be blank symbol added at index 0 of each charlist.
blank_char = "~"

with Path(DICT_DIR).open(mode="r", encoding="utf-8") as charlist:
    letters = blank_char + "".join(line.strip() for line in charlist)
print("[INFO] Loaded dictionary")



def do_ocr(inp):
    #img = Image.open(inp).convert('L')
    #img = np.array(img)
    print(f"input: {inp}")
    print(type(inp))
    #img = cv2.imread(inp, cv2.IMREAD_GRAYSCALE)
    img = cv2.cvtColor(inp, cv2.COLOR_BGR2GRAY)
    recognized_text = recognize(img, 
                                compiled_model, 
                                recognition_input_layer, 
                                recognition_output_layer, 
                                letters,
                                )
    return "".join(recognized_text)
  
input = gr.Image()
output = gr.Textbox()

title = "日本語手書き認識"
description = "DEMO by TOKYO TECHIES (注意:画像には1行のテキストしか含まれていません。)"
examples=[['data/in_1.png'],['data/sample_1_1.png']]

gr.Interface(fn=do_ocr, 
             inputs=input, 
             outputs=output, 
             title=title, 
             description=description, 
             examples=examples).launch()