Omnibus commited on
Commit
7e2f99d
Β·
1 Parent(s): d371804

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -24
app.py CHANGED
@@ -179,12 +179,12 @@ ocr_id = {
179
  }
180
 
181
 
182
- def blur_im(img,bounds,target_lang,trans_lang):
183
  im = cv2.imread(img)
184
  im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
185
 
186
  for bound in bounds:
187
- if bound[2]>=0.3:
188
  p0, p1, p2, p3 = bound[0]
189
  x = int(p0[0])
190
  y = int(p0[1])
@@ -197,7 +197,7 @@ def blur_im(img,bounds,target_lang,trans_lang):
197
  pass
198
  im = Image.fromarray(im)
199
  for bound in bounds:
200
- if bound[2]>=0.3:
201
  p0, p1, p2, p3 = bound[0]
202
  x = int(p0[0])
203
  y = int(p0[1])
@@ -205,20 +205,17 @@ def blur_im(img,bounds,target_lang,trans_lang):
205
  h = int(p2[1]) - int(y)
206
  draw = ImageDraw.Draw(im)
207
  text = this(bound[1],target_lang,trans_lang)
208
-
209
- font_size=int(int(w)*0.1)
210
-
211
  font = ImageFont.truetype("./fonts/unifont-15.0.01.ttf", int(font_size))
212
-
213
  draw.text((x, y),text, font = font, fill=(0,0,0))
214
  else:
215
  pass
216
  return im
217
 
218
- def draw_boxes(image, bounds, width=1):
219
  draw = ImageDraw.Draw(image)
220
  for bound in bounds:
221
- if bound[2]>=0.3:
222
  color = "blue"
223
  else:
224
  color = "red"
@@ -226,7 +223,7 @@ def draw_boxes(image, bounds, width=1):
226
  draw.line([*p0, *p1, *p2, *p3, *p0], fill=color, width=width)
227
  return image
228
 
229
- def detect(img, target_lang,trans_lang,target_lang2=None):
230
  if target_lang2 != None and target_lang2 != "":
231
  lang=f"{lang_id[target_lang]}"
232
  lang2=f"{lang_id[target_lang2]}"
@@ -234,28 +231,33 @@ def detect(img, target_lang,trans_lang,target_lang2=None):
234
  else:
235
  lang=[f"{ocr_id[target_lang]}"]
236
  pass
237
- #img = Image.open(img)
238
 
239
- #img.thumbnail((1000,1000))
240
- #img = np.array(img)
 
 
241
  reader = easyocr.Reader(lang)
242
- bounds = reader.readtext(img)
243
-
244
- im = PIL.Image.open(img)
245
- im_out=draw_boxes(im, bounds)
246
- #im.save('result.jpg')
247
-
248
- blr_out=blur_im(img,bounds,target_lang,trans_lang)
249
  return im_out,blr_out,pd.DataFrame(bounds),pd.DataFrame(bounds).iloc[:,1:]
250
 
251
  with gr.Blocks() as robot:
252
  with gr.Row():
253
  with gr.Column():
254
  im=gr.Image(type="filepath")
 
255
  with gr.Column():
256
- target_lang = gr.Dropdown(label="Detect language:", choices=list(ocr_id.keys()),value="English")
257
- #target_lang2 = gr.Dropdown(label="Detect language2", choices=list(lang_id.keys()),value="")
258
- trans_lang = gr.Dropdown(label="Translate to:", choices=list(lang_id.keys()),value="Chinese")
 
 
 
 
 
 
259
  go_btn=gr.Button()
260
  with gr.Row():
261
  with gr.Column():
@@ -267,6 +269,6 @@ with gr.Blocks() as robot:
267
  out_txt=gr.Textbox(lines=8)
268
  data_f=gr.Dataframe()
269
 
270
- go_btn.click(detect,[im,target_lang,trans_lang],[out_im,trans_im,out_txt,data_f])
271
  #go_btn.click(detect,[im,target_lang,target_lang2],[out_im,trans_im,out_txt,data_f])
272
  robot.queue(concurrency_count=10).launch()
 
179
  }
180
 
181
 
182
+ def blur_im(img,bounds,target_lang,trans_lang,ocr_sens,font_fac):
183
  im = cv2.imread(img)
184
  im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
185
 
186
  for bound in bounds:
187
+ if bound[2]>=(ocr_sens):
188
  p0, p1, p2, p3 = bound[0]
189
  x = int(p0[0])
190
  y = int(p0[1])
 
197
  pass
198
  im = Image.fromarray(im)
199
  for bound in bounds:
200
+ if bound[2]>=(ocr_sens):
201
  p0, p1, p2, p3 = bound[0]
202
  x = int(p0[0])
203
  y = int(p0[1])
 
205
  h = int(p2[1]) - int(y)
206
  draw = ImageDraw.Draw(im)
207
  text = this(bound[1],target_lang,trans_lang)
208
+ font_size=int(int(h)*font_fac)
 
 
209
  font = ImageFont.truetype("./fonts/unifont-15.0.01.ttf", int(font_size))
 
210
  draw.text((x, y),text, font = font, fill=(0,0,0))
211
  else:
212
  pass
213
  return im
214
 
215
+ def draw_boxes(image, bounds, ocr_sens,width=1):
216
  draw = ImageDraw.Draw(image)
217
  for bound in bounds:
218
+ if bound[2]>=(ocr_sens):
219
  color = "blue"
220
  else:
221
  color = "red"
 
223
  draw.line([*p0, *p1, *p2, *p3, *p0], fill=color, width=width)
224
  return image
225
 
226
+ def detect(img, target_lang,trans_lang,ocr_sens,font_fac,target_lang2=None):
227
  if target_lang2 != None and target_lang2 != "":
228
  lang=f"{lang_id[target_lang]}"
229
  lang2=f"{lang_id[target_lang2]}"
 
231
  else:
232
  lang=[f"{ocr_id[target_lang]}"]
233
  pass
 
234
 
235
+ img = Image.open(img)
236
+ img.thumbnail((1000,1000), Image.Resampling.LANCZOS)
237
+ img.save("tmp.jpg")
238
+ img1 = np.array(img)
239
  reader = easyocr.Reader(lang)
240
+ bounds = reader.readtext(img1)
241
+ im = PIL.Image.open("tmp.jpg")
242
+ im_out=draw_boxes(im, bounds,ocr_sens)
243
+ blr_out=blur_im("tmp.jpg",bounds,target_lang,trans_lang,ocr_sens,font_fac)
 
 
 
244
  return im_out,blr_out,pd.DataFrame(bounds),pd.DataFrame(bounds).iloc[:,1:]
245
 
246
  with gr.Blocks() as robot:
247
  with gr.Row():
248
  with gr.Column():
249
  im=gr.Image(type="filepath")
250
+
251
  with gr.Column():
252
+ with gr.Group():
253
+ with gr.Row():
254
+ with gr.Column():
255
+ target_lang = gr.Dropdown(label="Detect language:", choices=list(ocr_id.keys()),value="English")
256
+ trans_lang = gr.Dropdown(label="Translate to:", choices=list(lang_id.keys()),value="Chinese")
257
+ with gr.Column():
258
+ ocr_sens=gr.Slider(0.1, 1, step=0.05,value=0.25,label="Detect Min Confidence")
259
+ font_fac=gr.Slider(0.1, 1, step =0.1,value=0.4,label="Font Scale")
260
+ ocr_space=gr.Slider(1,10, step=1,value=5,label="Future Function")
261
  go_btn=gr.Button()
262
  with gr.Row():
263
  with gr.Column():
 
269
  out_txt=gr.Textbox(lines=8)
270
  data_f=gr.Dataframe()
271
 
272
+ go_btn.click(detect,[im,target_lang,trans_lang,ocr_sens,font_fac],[out_im,trans_im,out_txt,data_f])
273
  #go_btn.click(detect,[im,target_lang,target_lang2],[out_im,trans_im,out_txt,data_f])
274
  robot.queue(concurrency_count=10).launch()