Goodsea commited on
Commit
b5e4f25
1 Parent(s): 7c2c0f7

paddleocr package

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -0
  2. app.py +49 -40
  3. deprem_ocr-1.0.19-py3-none-any.whl +3 -0
  4. requirements.txt +11 -2
.gitattributes CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ *.whl filter=lfs diff=lfs merge=lfs -text
app.py CHANGED
@@ -1,17 +1,16 @@
1
  import gradio as gr
2
- from easyocr import Reader
3
- from PIL import Image
4
- import io
5
  import json
6
  import csv
7
  import openai
8
  import ast
9
  import os
 
10
  from deta import Deta
11
 
12
 
13
- openai.api_key = os.getenv('API_KEY')
14
- reader = Reader(["tr"])
15
 
16
 
17
  def get_parsed_address(input_img):
@@ -20,14 +19,9 @@ def get_parsed_address(input_img):
20
  return openai_response(address_full_text)
21
 
22
 
23
- def preprocess_img(inp_image):
24
- gray = cv2.cvtColor(inp_image, cv2.COLOR_BGR2GRAY)
25
- gray_img = cv2.bitwise_not(gray)
26
- return gray_img
27
-
28
-
29
  def get_text(input_img):
30
- result = reader.readtext(input_img, detail=0)
 
31
  return " ".join(result)
32
 
33
 
@@ -45,9 +39,10 @@ def get_json(mahalle, il, sokak, apartman):
45
  dump = json.dumps(adres, indent=4, ensure_ascii=False)
46
  return dump
47
 
 
48
  def write_db(data_dict):
49
  # 2) initialize with a project key
50
- deta_key = os.getenv('DETA_KEY')
51
  deta = Deta(deta_key)
52
 
53
  # 3) create and use as many DBs as you want!
@@ -60,16 +55,17 @@ def text_dict(input):
60
  write_db(eval_result)
61
 
62
  return (
63
- str(eval_result['city']),
64
- str(eval_result['distinct']),
65
- str(eval_result['neighbourhood']),
66
- str(eval_result['street']),
67
- str(eval_result['address']),
68
- str(eval_result['tel']),
69
- str(eval_result['name_surname']),
70
- str(eval_result['no']),
71
  )
72
-
 
73
  def openai_response(ocr_input):
74
  prompt = f"""Tabular Data Extraction You are a highly intelligent and accurate tabular data extractor from
75
  plain text input and especially from emergency text that carries address information, your inputs can be text
@@ -98,28 +94,31 @@ def openai_response(ocr_input):
98
  resp = eval(resp.replace("'{", "{").replace("}'", "}"))
99
  resp["input"] = ocr_input
100
  dict_keys = [
101
- 'city',
102
- 'distinct',
103
- 'neighbourhood',
104
- 'street',
105
- 'no',
106
- 'tel',
107
- 'name_surname',
108
- 'address',
109
- 'input',
110
  ]
111
  for key in dict_keys:
112
  if key not in resp.keys():
113
- resp[key] = ''
114
  return resp
115
 
116
 
117
  with gr.Blocks() as demo:
118
  gr.Markdown(
119
- """
120
  # Enkaz Bildirme Uygulaması
121
- """)
122
- gr.Markdown("Bu uygulamada ekran görüntüsü sürükleyip bırakarak AFAD'a enkaz bildirimi yapabilirsiniz. Mesajı metin olarak da girebilirsiniz, tam adresi ayrıştırıp döndürür. API olarak kullanmak isterseniz sayfanın en altında use via api'ya tıklayın.")
 
 
 
123
  with gr.Row():
124
  img_area = gr.Image(label="Ekran Görüntüsü yükleyin 👇")
125
  ocr_result = gr.Textbox(label="Metin yükleyin 👇 ")
@@ -140,13 +139,23 @@ with gr.Blocks() as demo:
140
  with gr.Row():
141
  no = gr.Textbox(label="Kapı No")
142
 
 
 
 
 
 
 
143
 
144
- submit_button.click(get_parsed_address, inputs = img_area, outputs = open_api_text, api_name="upload_image")
145
-
146
- ocr_result.change(openai_response, ocr_result, open_api_text, api_name="upload-text")
147
 
148
- open_api_text.change(text_dict, open_api_text, [city, distinct, neighbourhood, street, address, tel, name_surname, no])
 
 
 
 
149
 
150
 
151
  if __name__ == "__main__":
152
- demo.launch()
 
1
  import gradio as gr
2
+ from deprem_ocr.ocr import DepremOCR
 
 
3
  import json
4
  import csv
5
  import openai
6
  import ast
7
  import os
8
+ import numpy as np
9
  from deta import Deta
10
 
11
 
12
+ openai.api_key = os.getenv("API_KEY")
13
+ depremOCR = DepremOCR()
14
 
15
 
16
  def get_parsed_address(input_img):
 
19
  return openai_response(address_full_text)
20
 
21
 
 
 
 
 
 
 
22
  def get_text(input_img):
23
+ result = depremOCR.apply_ocr(np.array(input_img))
24
+ print(result)
25
  return " ".join(result)
26
 
27
 
 
39
  dump = json.dumps(adres, indent=4, ensure_ascii=False)
40
  return dump
41
 
42
+
43
  def write_db(data_dict):
44
  # 2) initialize with a project key
45
+ deta_key = os.getenv("DETA_KEY")
46
  deta = Deta(deta_key)
47
 
48
  # 3) create and use as many DBs as you want!
 
55
  write_db(eval_result)
56
 
57
  return (
58
+ str(eval_result["city"]),
59
+ str(eval_result["distinct"]),
60
+ str(eval_result["neighbourhood"]),
61
+ str(eval_result["street"]),
62
+ str(eval_result["address"]),
63
+ str(eval_result["tel"]),
64
+ str(eval_result["name_surname"]),
65
+ str(eval_result["no"]),
66
  )
67
+
68
+
69
  def openai_response(ocr_input):
70
  prompt = f"""Tabular Data Extraction You are a highly intelligent and accurate tabular data extractor from
71
  plain text input and especially from emergency text that carries address information, your inputs can be text
 
94
  resp = eval(resp.replace("'{", "{").replace("}'", "}"))
95
  resp["input"] = ocr_input
96
  dict_keys = [
97
+ "city",
98
+ "distinct",
99
+ "neighbourhood",
100
+ "street",
101
+ "no",
102
+ "tel",
103
+ "name_surname",
104
+ "address",
105
+ "input",
106
  ]
107
  for key in dict_keys:
108
  if key not in resp.keys():
109
+ resp[key] = ""
110
  return resp
111
 
112
 
113
  with gr.Blocks() as demo:
114
  gr.Markdown(
115
+ """
116
  # Enkaz Bildirme Uygulaması
117
+ """
118
+ )
119
+ gr.Markdown(
120
+ "Bu uygulamada ekran görüntüsü sürükleyip bırakarak AFAD'a enkaz bildirimi yapabilirsiniz. Mesajı metin olarak da girebilirsiniz, tam adresi ayrıştırıp döndürür. API olarak kullanmak isterseniz sayfanın en altında use via api'ya tıklayın."
121
+ )
122
  with gr.Row():
123
  img_area = gr.Image(label="Ekran Görüntüsü yükleyin 👇")
124
  ocr_result = gr.Textbox(label="Metin yükleyin 👇 ")
 
139
  with gr.Row():
140
  no = gr.Textbox(label="Kapı No")
141
 
142
+ submit_button.click(
143
+ get_parsed_address,
144
+ inputs=img_area,
145
+ outputs=open_api_text,
146
+ api_name="upload_image",
147
+ )
148
 
149
+ ocr_result.change(
150
+ openai_response, ocr_result, open_api_text, api_name="upload-text"
151
+ )
152
 
153
+ open_api_text.change(
154
+ text_dict,
155
+ open_api_text,
156
+ [city, distinct, neighbourhood, street, address, tel, name_surname, no],
157
+ )
158
 
159
 
160
  if __name__ == "__main__":
161
+ demo.launch()
deprem_ocr-1.0.19-py3-none-any.whl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cca7baccfbe8c4b0cd1b37fc022d077056dbf082bd2a352b1e8ef6d43a6f9f87
3
+ size 11961345
requirements.txt CHANGED
@@ -1,5 +1,14 @@
 
 
 
 
 
 
 
 
 
 
1
  openai
2
  Pillow
3
- easyocr
4
  gradio
5
- deta
 
1
+ paddlepaddle
2
+ opencv-python
3
+ Pillow
4
+ numpy==1.23.3
5
+ pandas
6
+ imutils
7
+ Cython
8
+ imgaug
9
+ pyclipper
10
+ deprem_ocr-1.0.19-py3-none-any.whl
11
  openai
12
  Pillow
 
13
  gradio
14
+ deta