hantech commited on
Commit
99a4b22
β€’
1 Parent(s): 80f1479

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -86
app.py CHANGED
@@ -50,12 +50,12 @@ def inference(filepath, lang):
50
  # crop the region of interest (ROI)
51
  img = Image.open(filepath)
52
  #img = img[y0:y1, x0:x1]
53
- img = img.crop((x1, y1, x3, y3)) # crop the image
54
  img = process_input(img, config['dataset']['image_height'],
55
  config['dataset']['image_min_width'], config['dataset']['image_max_width'])
56
  out = translate(img, model)[0].tolist()
57
  out = vocab.decode(out)
58
- new_bounds.append((bbox, out, prob))
59
  im = PIL.Image.open(filepath)
60
  draw_boxes(im, bounds)
61
  im.save('result.jpg')
@@ -67,94 +67,12 @@ article = "<p style='text-align: center'><a href='https://www.jaided.ai/easyocr/
67
  examples = [['english.png',['en']],['thai.jpg',['th']],['french.jpg',['fr', 'en']],['chinese.jpg',['ch_sim', 'en']],['japanese.jpg',['ja', 'en']],['korean.png',['ko', 'en']],['Hindi.jpeg',['hi', 'en']]]
68
  css = ".output_image, .input_image {height: 40rem !important; width: 100% !important;}"
69
  choices = [
70
- "abq",
71
- "ady",
72
- "af",
73
- "ang",
74
- "ar",
75
- "as",
76
- "ava",
77
- "az",
78
- "be",
79
- "bg",
80
- "bh",
81
- "bho",
82
- "bn",
83
- "bs",
84
- "ch_sim",
85
- "ch_tra",
86
- "che",
87
- "cs",
88
- "cy",
89
- "da",
90
- "dar",
91
- "de",
92
- "en",
93
- "es",
94
- "et",
95
- "fa",
96
- "fr",
97
- "ga",
98
- "gom",
99
- "hi",
100
- "hr",
101
- "hu",
102
- "id",
103
- "inh",
104
- "is",
105
- "it",
106
- "ja",
107
- "kbd",
108
- "kn",
109
- "ko",
110
- "ku",
111
- "la",
112
- "lbe",
113
- "lez",
114
- "lt",
115
- "lv",
116
- "mah",
117
- "mai",
118
- "mi",
119
- "mn",
120
- "mr",
121
- "ms",
122
- "mt",
123
- "ne",
124
- "new",
125
- "nl",
126
- "no",
127
- "oc",
128
- "pi",
129
- "pl",
130
- "pt",
131
- "ro",
132
- "ru",
133
- "rs_cyrillic",
134
- "rs_latin",
135
- "sck",
136
- "sk",
137
- "sl",
138
- "sq",
139
- "sv",
140
- "sw",
141
- "ta",
142
- "tab",
143
- "te",
144
- "th",
145
- "tjk",
146
- "tl",
147
- "tr",
148
- "ug",
149
- "uk",
150
- "ur",
151
- "uz",
152
  "vi"
153
  ]
154
  gr.Interface(
155
  inference,
156
- [gr.inputs.Image(type='filepath', label='Input'),gr.inputs.CheckboxGroup(choices, type="value", default=['en'], label='language')],
157
- [gr.outputs.Image(type='pil', label='Output'), gr.outputs.Dataframe(type='pandas', headers=['text', 'confidence'])],
158
  title=title,
159
  description=description,
160
  article=article,
 
50
  # crop the region of interest (ROI)
51
  img = Image.open(filepath)
52
  #img = img[y0:y1, x0:x1]
53
+ img = img.crop((max(0,x1-5), max(y1-5), min(x3+5,width), min(y3+5, height)) # crop the image
54
  img = process_input(img, config['dataset']['image_height'],
55
  config['dataset']['image_min_width'], config['dataset']['image_max_width'])
56
  out = translate(img, model)[0].tolist()
57
  out = vocab.decode(out)
58
+ new_bounds.append((bbox,text, out, prob))
59
  im = PIL.Image.open(filepath)
60
  draw_boxes(im, bounds)
61
  im.save('result.jpg')
 
67
  examples = [['english.png',['en']],['thai.jpg',['th']],['french.jpg',['fr', 'en']],['chinese.jpg',['ch_sim', 'en']],['japanese.jpg',['ja', 'en']],['korean.png',['ko', 'en']],['Hindi.jpeg',['hi', 'en']]]
68
  css = ".output_image, .input_image {height: 40rem !important; width: 100% !important;}"
69
  choices = [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  "vi"
71
  ]
72
  gr.Interface(
73
  inference,
74
+ [gr.inputs.Image(type='filepath', label='Input'),gr.inputs.CheckboxGroup(choices, type="value", default=['vi'], label='language')],
75
+ [gr.outputs.Image(type='pil', label='Output'), gr.outputs.Dataframe(type='pandas', headers=['easyOCR','vietOCR', 'confidence'])],
76
  title=title,
77
  description=description,
78
  article=article,