Update app.py
Browse files
app.py
CHANGED
|
@@ -5,7 +5,6 @@ import json
|
|
| 5 |
from pathlib import Path
|
| 6 |
import gradio as gr
|
| 7 |
from openai import OpenAI
|
| 8 |
-
from PIL import Image
|
| 9 |
|
| 10 |
API_KEY = "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"
|
| 11 |
MODEL = "gpt-5.1"
|
|
@@ -17,6 +16,7 @@ def upload_pdf(path):
|
|
| 17 |
return client.files.create(file=open(path, "rb"), purpose="assistants").id
|
| 18 |
|
| 19 |
|
|
|
|
| 20 |
def prompt():
|
| 21 |
return (
|
| 22 |
"Extract structured JSON from the attached logistics document. Return ONLY valid JSON.\n"
|
|
@@ -77,81 +77,62 @@ def prompt():
|
|
| 77 |
)
|
| 78 |
|
| 79 |
|
| 80 |
-
def extract_image(path):
|
| 81 |
-
"""Process image via filepath"""
|
| 82 |
-
img_bytes = Path(path).read_bytes()
|
| 83 |
-
ext = Path(path).suffix.replace(".", "").lower()
|
| 84 |
|
| 85 |
-
|
|
|
|
|
|
|
| 86 |
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
"type": "
|
| 91 |
-
"
|
| 92 |
-
|
| 93 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 94 |
|
| 95 |
r = client.chat.completions.create(
|
| 96 |
model=MODEL,
|
| 97 |
messages=[{"role": "user", "content": content}]
|
| 98 |
)
|
| 99 |
|
| 100 |
-
|
| 101 |
-
return
|
| 102 |
|
| 103 |
|
| 104 |
-
def
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
content = [
|
| 110 |
-
{"type": "text", "text": prompt()},
|
| 111 |
-
{"type": "file", "file": {"file_id": fid}}
|
| 112 |
-
]
|
| 113 |
-
|
| 114 |
-
r = client.chat.completions.create(
|
| 115 |
-
model=MODEL,
|
| 116 |
-
messages=[{"role": "user", "content": content}]
|
| 117 |
-
)
|
| 118 |
-
|
| 119 |
-
t = r.choices[0].message.content
|
| 120 |
-
return t[t.find("{"): t.rfind("}") + 1]
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
def process(image_path, pdf_file):
|
| 124 |
-
if image_path:
|
| 125 |
-
return extract_image(image_path)
|
| 126 |
-
|
| 127 |
-
if pdf_file:
|
| 128 |
-
return extract_pdf(pdf_file)
|
| 129 |
-
|
| 130 |
return "{}"
|
| 131 |
|
| 132 |
|
|
|
|
|
|
|
| 133 |
with gr.Blocks() as demo:
|
| 134 |
gr.Markdown("# **Logistics OCR Data Extractor (GPT-5.1)**")
|
| 135 |
|
| 136 |
with gr.Row():
|
| 137 |
-
|
| 138 |
-
|
| 139 |
|
| 140 |
-
|
| 141 |
-
|
| 142 |
|
| 143 |
-
|
| 144 |
-
fn=process,
|
| 145 |
-
inputs=[image_input, pdf_input],
|
| 146 |
-
outputs=output
|
| 147 |
-
)
|
| 148 |
|
| 149 |
gr.Examples(
|
| 150 |
examples=[
|
| 151 |
["IMG_0001.jpg", None],
|
| 152 |
["IMG_0002.jpg", None]
|
| 153 |
],
|
| 154 |
-
inputs=[
|
| 155 |
label="Sample Images"
|
| 156 |
)
|
| 157 |
|
|
|
|
| 5 |
from pathlib import Path
|
| 6 |
import gradio as gr
|
| 7 |
from openai import OpenAI
|
|
|
|
| 8 |
|
| 9 |
API_KEY = "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"
|
| 10 |
MODEL = "gpt-5.1"
|
|
|
|
| 16 |
return client.files.create(file=open(path, "rb"), purpose="assistants").id
|
| 17 |
|
| 18 |
|
| 19 |
+
# ---------------- Prompt (unchanged) ----------------
|
| 20 |
def prompt():
|
| 21 |
return (
|
| 22 |
"Extract structured JSON from the attached logistics document. Return ONLY valid JSON.\n"
|
|
|
|
| 77 |
)
|
| 78 |
|
| 79 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 80 |
|
| 81 |
+
# ---------------- Extraction ----------------
|
| 82 |
+
def extract(path):
|
| 83 |
+
suffix = Path(path).suffix.lower()
|
| 84 |
|
| 85 |
+
if suffix == ".pdf":
|
| 86 |
+
fid = upload_pdf(path)
|
| 87 |
+
content = [
|
| 88 |
+
{"type": "text", "text": prompt()},
|
| 89 |
+
{"type": "file", "file": {"file_id": fid}}
|
| 90 |
+
]
|
| 91 |
+
else:
|
| 92 |
+
b64 = base64.b64encode(Path(path).read_bytes()).decode()
|
| 93 |
+
ext = suffix[1:]
|
| 94 |
+
content = [
|
| 95 |
+
{"type": "text", "text": prompt()},
|
| 96 |
+
{"type": "image_url", "image_url": {"url": f"data:image/{ext};base64,{b64}"}}
|
| 97 |
+
]
|
| 98 |
|
| 99 |
r = client.chat.completions.create(
|
| 100 |
model=MODEL,
|
| 101 |
messages=[{"role": "user", "content": content}]
|
| 102 |
)
|
| 103 |
|
| 104 |
+
text = r.choices[0].message.content
|
| 105 |
+
return text[text.find("{"): text.rfind("}") + 1]
|
| 106 |
|
| 107 |
|
| 108 |
+
def ui(image_input, pdf_input):
|
| 109 |
+
if image_input:
|
| 110 |
+
return extract(image_input)
|
| 111 |
+
if pdf_input:
|
| 112 |
+
return extract(pdf_input.name)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 113 |
return "{}"
|
| 114 |
|
| 115 |
|
| 116 |
+
# ---------------- UI ----------------
|
| 117 |
+
|
| 118 |
with gr.Blocks() as demo:
|
| 119 |
gr.Markdown("# **Logistics OCR Data Extractor (GPT-5.1)**")
|
| 120 |
|
| 121 |
with gr.Row():
|
| 122 |
+
img = gr.Image(label="Upload Image", type="filepath")
|
| 123 |
+
pdf = gr.File(label="Upload PDF", file_types=["pdf"])
|
| 124 |
|
| 125 |
+
out = gr.JSON(label="Extracted JSON")
|
| 126 |
+
btn = gr.Button("Submit")
|
| 127 |
|
| 128 |
+
btn.click(fn=ui, inputs=[img, pdf], outputs=out)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 129 |
|
| 130 |
gr.Examples(
|
| 131 |
examples=[
|
| 132 |
["IMG_0001.jpg", None],
|
| 133 |
["IMG_0002.jpg", None]
|
| 134 |
],
|
| 135 |
+
inputs=[img, pdf],
|
| 136 |
label="Sample Images"
|
| 137 |
)
|
| 138 |
|