DeepLearning101 commited on
Commit
f29287a
1 Parent(s): e138c49

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -19
app.py CHANGED
@@ -1,14 +1,12 @@
1
- import os, io
2
  from paddleocr import PaddleOCR, draw_ocr
3
- from PIL import Image, ImageDraw
4
  import gradio as gr
5
 
6
-
7
  # 設定 Hugging Face Hub 的 Access Token
8
  os.environ["HF_TOKEN"] = "TWOCR"
9
-
10
- def inference(img_path):
11
 
 
12
  ocr = PaddleOCR(
13
  rec_char_dict_path='zhtw_common_dict.txt',
14
  use_gpu=False,
@@ -30,7 +28,7 @@ def inference(img_path):
30
  im_show_pil = draw_ocr(image, boxes, txts, scores, font_path="./simfang.ttf")
31
 
32
  return im_show_pil, "\n".join(txts)
33
-
34
  title = "<p style='text-align: center'><a href='https://www.twman.org/AI/CV' target='_blank'>繁體中文醫療診斷書和收據OCR:PaddleOCR</a></p>"
35
 
36
  description = """
@@ -39,25 +37,25 @@ description = """
39
  <p style='text-align: center'><a href="https://github.com/Deep-Learning-101/Computer-Vision-Paper" target='_blank'>https://github.com/Deep-Learning-101/Computer-Vision-Paper</a></p><br>
40
  """
41
 
42
-
43
  css = ".output_image, .input_image {height: 40rem !important; width: 100% !important;}"
44
 
45
  gr.Interface(
46
- inference,
47
- [gr.inputs.Image(type='filepath', label='圖片上傳')],
48
  outputs=[
49
- gr.outputs.Image(type="pil", label="識別結果"),
50
- "text"
 
 
 
 
 
 
 
 
51
  ],
52
- examples = [
53
- ["DEMO/einvoice1.png"],
54
- ["DEMO/einvoice2.png"],
55
- ["DEMO/THSR1.jpg"],
56
- ["DEMO/THSR2.jpg"],
57
- ["DEMO/IDCARD1.jpg"],
58
- ["DEMO/HealthCARD1.jpg"]],
59
  title=title,
60
  description=description,
61
  css=css,
62
  enable_queue=True
63
- ).launch(debug=True)
 
1
+ import os
2
  from paddleocr import PaddleOCR, draw_ocr
3
+ from PIL import Image
4
  import gradio as gr
5
 
 
6
  # 設定 Hugging Face Hub 的 Access Token
7
  os.environ["HF_TOKEN"] = "TWOCR"
 
 
8
 
9
+ def inference(img_path):
10
  ocr = PaddleOCR(
11
  rec_char_dict_path='zhtw_common_dict.txt',
12
  use_gpu=False,
 
28
  im_show_pil = draw_ocr(image, boxes, txts, scores, font_path="./simfang.ttf")
29
 
30
  return im_show_pil, "\n".join(txts)
31
+
32
  title = "<p style='text-align: center'><a href='https://www.twman.org/AI/CV' target='_blank'>繁體中文醫療診斷書和收據OCR:PaddleOCR</a></p>"
33
 
34
  description = """
 
37
  <p style='text-align: center'><a href="https://github.com/Deep-Learning-101/Computer-Vision-Paper" target='_blank'>https://github.com/Deep-Learning-101/Computer-Vision-Paper</a></p><br>
38
  """
39
 
 
40
  css = ".output_image, .input_image {height: 40rem !important; width: 100% !important;}"
41
 
42
  gr.Interface(
43
+ fn=inference,
44
+ inputs=gr.Image(type='filepath', label='圖片上傳'),
45
  outputs=[
46
+ gr.Image(type="pil", label="識別結果"),
47
+ gr.Textbox(label="識別文本")
48
+ ],
49
+ examples=[
50
+ ["DEMO/einvoice1.png"],
51
+ ["DEMO/einvoice2.png"],
52
+ ["DEMO/THSR1.jpg"],
53
+ ["DEMO/THSR2.jpg"],
54
+ ["DEMO/IDCARD1.jpg"],
55
+ ["DEMO/HealthCARD1.jpg"]
56
  ],
 
 
 
 
 
 
 
57
  title=title,
58
  description=description,
59
  css=css,
60
  enable_queue=True
61
+ ).launch(debug=True)