Spaces:
Sleeping
Sleeping
Upload app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,258 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import torch
|
| 3 |
+
from transformers import TrOCRProcessor, VisionEncoderDecoderModel
|
| 4 |
+
from PIL import Image, UnidentifiedImageError
|
| 5 |
+
import time
|
| 6 |
+
import io
|
| 7 |
+
import platform
|
| 8 |
+
import sys
|
| 9 |
+
import cv2
|
| 10 |
+
import numpy as np
|
| 11 |
+
|
| 12 |
+
# --- 1. SYSTEM CONFIG & STATE INITIALIZATION ---
|
| 13 |
+
st.set_page_config(page_title="Handwriting Analysis", layout="wide", initial_sidebar_state="collapsed")
|
| 14 |
+
|
| 15 |
+
if 'debug_log' not in st.session_state:
|
| 16 |
+
st.session_state.debug_log = [f"[{time.strftime('%H:%M:%S')}] === SYSTEM BOOT ({platform.system()} | Python {sys.version.split()[0]}) ==="]
|
| 17 |
+
if 'img_buffer' not in st.session_state:
|
| 18 |
+
st.session_state.img_buffer = None
|
| 19 |
+
|
| 20 |
+
def log(msg):
|
| 21 |
+
st.session_state.debug_log.append(f"[{time.strftime('%H:%M:%S')}] {msg}")
|
| 22 |
+
|
| 23 |
+
# Custom CSS
|
| 24 |
+
st.markdown("""
|
| 25 |
+
<style>
|
| 26 |
+
@import url('https://fonts.googleapis.com/css2?family=Space+Grotesk:wght@300;500;700&family=Manrope:wght@300;400;600&display=swap');
|
| 27 |
+
@import url('https://fonts.googleapis.com/css2?family=Material+Symbols+Outlined:wght,FILL@100..700,0..1&display=swap');
|
| 28 |
+
|
| 29 |
+
.stApp { background-color: #0c0e12; color: #f6f6fc; font-family: 'Manrope', sans-serif; }
|
| 30 |
+
.block-container { padding-top: 1rem !important; padding-bottom: 0rem !important; max-width: 95% !important; }
|
| 31 |
+
|
| 32 |
+
.hero-title { font-family: 'Space Grotesk'; font-size: clamp(34px, 4vw, 60px); font-weight: 300; line-height: 0.9; margin-bottom: 25px; }
|
| 33 |
+
.hero-accent { color: #8ff5ff; font-weight: 700; font-style: italic; text-shadow: 0 0 20px rgba(143, 245, 255, 0.4); }
|
| 34 |
+
.strike { text-decoration: line-through; color: #46484d; opacity: 0.4; }
|
| 35 |
+
|
| 36 |
+
.uploader-wrapper { position: relative; width: 100%; max-width: 400px; margin: 0 auto; }
|
| 37 |
+
.ingest-card { height: 250px; background: #171a1f; border: 1px solid rgba(143, 245, 255, 0.1); border-radius: 4px; padding: 20px; text-align: center; display: flex; flex-direction: column; align-items: center; justify-content: center; pointer-events: none; }
|
| 38 |
+
.dashed-border { border: 1px dashed rgba(143, 245, 255, 0.15); border-radius: 2px; padding: 15px; margin-bottom: 12px; }
|
| 39 |
+
.system-label { position: absolute; font-family: 'Space Grotesk'; font-size: 7px; letter-spacing: 2px; color: rgba(143, 245, 255, 0.2); top: 10px; left: 10px; text-transform: uppercase; }
|
| 40 |
+
.cyan-btn { background-color: #8ff5ff; color: #003f43; font-family: 'Space Grotesk'; font-weight: 700; text-transform: uppercase; padding: 8px 25px; border-radius: 2px; font-size: 10px; letter-spacing: 1px; }
|
| 41 |
+
|
| 42 |
+
div[data-testid="stFileUploader"] { margin-top: -250px !important; height: 250px !important; opacity: 0.01 !important; z-index: 99 !important; cursor: pointer !important; }
|
| 43 |
+
div[data-testid="stFileUploader"] section { height: 100% !important; padding: 0 !important; }
|
| 44 |
+
|
| 45 |
+
.stat-card { background: #000; padding: 15px; border-radius: 2px; text-align: center; }
|
| 46 |
+
.stat-val { color: #8ff5ff; font-size: 24px; font-weight: 700; font-family: 'Space Grotesk'; }
|
| 47 |
+
.stat-lbl { font-size: 9px; color: #46484d; text-transform: uppercase; letter-spacing: 1.5px; }
|
| 48 |
+
|
| 49 |
+
.output-box { border-left: 3px solid #8ff5ff; background: #171a1f; padding: 20px; font-family: 'Space Grotesk'; font-size: clamp(16px, 1.5vw, 20px); color: #f6f6fc; margin-top: 15px; line-height: 1.5; min-height: 100px; white-space: pre-wrap; }
|
| 50 |
+
|
| 51 |
+
.stButton>button { background-color: transparent !important; border: 1px solid #8ff5ff !important; color: #8ff5ff !important; width: 100%; border-radius: 2px; font-family: 'Space Grotesk'; text-transform: uppercase; font-size: 10px; margin-top: 10px; transition: 0.3s; }
|
| 52 |
+
.stButton>button:hover { background-color: #8ff5ff !important; color: #000 !important; }
|
| 53 |
+
|
| 54 |
+
.debug-terminal { font-family: 'Courier New', monospace; font-size: 11px; color: #4CC9F0; background: #0a0a0c; padding: 15px; border: 1px solid #333; height: 200px; overflow-y: auto; margin-top: 50px; }
|
| 55 |
+
|
| 56 |
+
div[data-baseweb="select"] > div { background-color: #171a1f !important; border: 1px solid rgba(143, 245, 255, 0.2) !important; border-radius: 2px !important; -webkit-font-smoothing: antialiased; }
|
| 57 |
+
div[data-baseweb="select"] * { font-family: 'Space Grotesk', sans-serif !important; color: #f6f6fc !important; font-size: 12px !important; }
|
| 58 |
+
ul[role="listbox"] { background-color: #171a1f !important; border: 1px solid rgba(143, 245, 255, 0.2) !important; }
|
| 59 |
+
li[role="option"] { background-color: transparent !important; }
|
| 60 |
+
li[role="option"]:hover { background-color: rgba(143, 245, 255, 0.1) !important; color: #8ff5ff !important; }
|
| 61 |
+
|
| 62 |
+
[data-testid="stHeader"], footer, .stFileUploader label, [data-testid="stTickBarMin"] { visibility: hidden; display: none; }
|
| 63 |
+
</style>
|
| 64 |
+
""", unsafe_allow_html=True)
|
| 65 |
+
|
| 66 |
+
# --- 2. ENGINE LOGIC & OPEN CV SEGMENTATION ---
|
| 67 |
+
@st.cache_resource
|
| 68 |
+
def load_engine():
|
| 69 |
+
local_path = "./final_handwriting_model"
|
| 70 |
+
cloud_path = "Hypernova823/ReadAI"
|
| 71 |
+
|
| 72 |
+
# Auto-detect environment: Use local if available, otherwise pull from cloud
|
| 73 |
+
import os
|
| 74 |
+
if os.path.exists(local_path):
|
| 75 |
+
path = local_path
|
| 76 |
+
log(f"ENGINE STATUS: Found local directory. Loading {path}...")
|
| 77 |
+
else:
|
| 78 |
+
path = cloud_path
|
| 79 |
+
log(f"ENGINE STATUS: Local folder not found. Pulling from cloud: {path}...")
|
| 80 |
+
|
| 81 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 82 |
+
try:
|
| 83 |
+
proc = TrOCRProcessor.from_pretrained(path)
|
| 84 |
+
model = VisionEncoderDecoderModel.from_pretrained(path).to(device)
|
| 85 |
+
return proc, model, device
|
| 86 |
+
except Exception as e:
|
| 87 |
+
log(f"ENGINE CRASH: {e}")
|
| 88 |
+
return None, None, None
|
| 89 |
+
|
| 90 |
+
def segment_handwriting(pil_image):
|
| 91 |
+
"""Optimized OpenCV line extraction for full documents."""
|
| 92 |
+
# Convert PIL to cv2 (numpy array in BGR)
|
| 93 |
+
img_cv = np.array(pil_image.convert('RGB'))[:, :, ::-1].copy()
|
| 94 |
+
gray = cv2.cvtColor(img_cv, cv2.COLOR_BGR2GRAY)
|
| 95 |
+
|
| 96 |
+
# Adaptive thresholding handles varying page illumination beautifully
|
| 97 |
+
blur = cv2.GaussianBlur(gray, (7,7), 0)
|
| 98 |
+
thresh = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 21, 10)
|
| 99 |
+
|
| 100 |
+
# Morphological Dilation: Smear text horizontally into thick lines
|
| 101 |
+
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (60, 5))
|
| 102 |
+
dilated = cv2.dilate(thresh, kernel, iterations=1)
|
| 103 |
+
|
| 104 |
+
# Find contours of those lines
|
| 105 |
+
contours, _ = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
| 106 |
+
|
| 107 |
+
# Filter noise and extract bounding boxes
|
| 108 |
+
boxes = [cv2.boundingRect(c) for c in contours]
|
| 109 |
+
boxes = [b for b in boxes if b[2] > 50 and b[3] > 15] # Width > 50, Height > 15
|
| 110 |
+
boxes.sort(key=lambda b: b[1]) # Sort top to bottom
|
| 111 |
+
|
| 112 |
+
lines = []
|
| 113 |
+
for x, y, w, h in boxes:
|
| 114 |
+
# Add a 15px padding margin (TrOCR crashes/hallucinates if text touches edges)
|
| 115 |
+
pad_x, pad_y = 15, 15
|
| 116 |
+
x1 = max(0, x - pad_x)
|
| 117 |
+
y1 = max(0, y - pad_y)
|
| 118 |
+
x2 = min(img_cv.shape[1], x + w + pad_x)
|
| 119 |
+
y2 = min(img_cv.shape[0], y + h + pad_y)
|
| 120 |
+
|
| 121 |
+
lines.append(pil_image.crop((x1, y1, x2, y2)))
|
| 122 |
+
|
| 123 |
+
return lines if lines else [pil_image] # Fallback to whole image if no lines found
|
| 124 |
+
|
| 125 |
+
def render_tts_button(text, voice_choice, speed, volume):
|
| 126 |
+
safe_text = text.replace("'", "\\'").replace('\n', ' ')
|
| 127 |
+
html_code = f"""
|
| 128 |
+
<div style="padding: 2px;">
|
| 129 |
+
<button onclick="playTTS()" style="box-sizing: border-box; background-color: transparent; border: 1px solid #8ff5ff; color: #8ff5ff; width: 100%; border-radius: 2px; font-family: 'Space Grotesk', sans-serif; text-transform: uppercase; font-size: 10px; padding: 12px 0; cursor: pointer; transition: 0.3s;" onmouseover="this.style.backgroundColor='#8ff5ff'; this.style.color='#000';" onmouseout="this.style.backgroundColor='transparent'; this.style.color='#8ff5ff';"><span style="vertical-align: middle;">Generate Audio Readout</span></button>
|
| 130 |
+
</div>
|
| 131 |
+
<script>
|
| 132 |
+
var systemVoices = [];
|
| 133 |
+
function loadVoices() {{ systemVoices = window.speechSynthesis.getVoices(); }}
|
| 134 |
+
window.speechSynthesis.onvoiceschanged = loadVoices; loadVoices();
|
| 135 |
+
function playTTS() {{
|
| 136 |
+
window.speechSynthesis.cancel();
|
| 137 |
+
var msg = new SpeechSynthesisUtterance('{safe_text}');
|
| 138 |
+
msg.rate = {speed}; msg.volume = {volume};
|
| 139 |
+
var targetVoice = '{voice_choice}';
|
| 140 |
+
var selectedVoice = null;
|
| 141 |
+
if (targetVoice === 'Aria (Neural)') {{ selectedVoice = systemVoices.find(v => v.name.toLowerCase().includes('female') || v.name.toLowerCase().includes('zira') || v.name.toLowerCase().includes('aria') || v.name.toLowerCase().includes('samantha')); }}
|
| 142 |
+
else {{ selectedVoice = systemVoices.find(v => v.name.toLowerCase().includes('male') || v.name.toLowerCase().includes('david') || v.name.toLowerCase().includes('julian') || v.name.toLowerCase().includes('mark')); }}
|
| 143 |
+
if (selectedVoice) {{ msg.voice = selectedVoice; }}
|
| 144 |
+
window.speechSynthesis.speak(msg);
|
| 145 |
+
}}
|
| 146 |
+
</script>
|
| 147 |
+
"""
|
| 148 |
+
st.components.v1.html(html_code, height=70)
|
| 149 |
+
|
| 150 |
+
# --- 3. UI LAYOUT ---
|
| 151 |
+
st.markdown(f"""
|
| 152 |
+
<div class="hero-title">
|
| 153 |
+
<div class="strike">Handwronging</div>
|
| 154 |
+
<div class="hero-accent">Handwriting</div>
|
| 155 |
+
</div>
|
| 156 |
+
""", unsafe_allow_html=True)
|
| 157 |
+
|
| 158 |
+
left, right = st.columns([1, 1], gap="large")
|
| 159 |
+
|
| 160 |
+
with left:
|
| 161 |
+
if st.session_state.img_buffer is None:
|
| 162 |
+
st.markdown('<div class="uploader-wrapper">', unsafe_allow_html=True)
|
| 163 |
+
st.markdown("""
|
| 164 |
+
<div class="ingest-card">
|
| 165 |
+
<div class="system-label">SYSTEM_READY</div>
|
| 166 |
+
<div class="dashed-border"><span class="material-symbols-outlined" style="font-size:28px; color:#8ff5ff; font-variation-settings:'FILL' 1;">add_a_photo</span></div>
|
| 167 |
+
<div style="font-family:Space Grotesk; font-weight:500; font-size:16px; margin-bottom:4px; color:#fff;">Initialize Data Input</div>
|
| 168 |
+
<div style="color:#46484d; font-size:10px; margin-bottom:15px;">Drag file here or select from directory</div>
|
| 169 |
+
<div class="cyan-btn">Browse Local Storage</div>
|
| 170 |
+
</div>
|
| 171 |
+
""", unsafe_allow_html=True)
|
| 172 |
+
img_file = st.file_uploader(" ", type=["jpg", "png", "jpeg", "jfif", "webp"], label_visibility="collapsed")
|
| 173 |
+
st.markdown('</div>', unsafe_allow_html=True)
|
| 174 |
+
|
| 175 |
+
if img_file:
|
| 176 |
+
raw_bytes = img_file.getvalue()
|
| 177 |
+
if len(raw_bytes) > 0:
|
| 178 |
+
st.session_state.img_buffer = raw_bytes
|
| 179 |
+
st.rerun()
|
| 180 |
+
else:
|
| 181 |
+
st.error("Upload failed: 0 bytes received.")
|
| 182 |
+
else:
|
| 183 |
+
try:
|
| 184 |
+
processed_image = Image.open(io.BytesIO(st.session_state.img_buffer)).convert("RGB")
|
| 185 |
+
st.image(processed_image, width=300, caption="Source Input")
|
| 186 |
+
if st.button("UPLOAD ANOTHER PHOTO"):
|
| 187 |
+
st.session_state.img_buffer = None
|
| 188 |
+
st.rerun()
|
| 189 |
+
except Exception as e:
|
| 190 |
+
st.error(f"Image Error: {e}")
|
| 191 |
+
if st.button("RESET SYSTEM"):
|
| 192 |
+
st.session_state.img_buffer = None
|
| 193 |
+
st.rerun()
|
| 194 |
+
|
| 195 |
+
with right:
|
| 196 |
+
if st.session_state.img_buffer is not None:
|
| 197 |
+
proc, model, dev = load_engine()
|
| 198 |
+
if model:
|
| 199 |
+
# We initialize placeholders so we can update the UI line-by-line
|
| 200 |
+
stats_placeholder = st.empty()
|
| 201 |
+
text_placeholder = st.empty()
|
| 202 |
+
|
| 203 |
+
# Start Segmentation
|
| 204 |
+
eval_image = Image.open(io.BytesIO(st.session_state.img_buffer)).convert("RGB")
|
| 205 |
+
line_images = segment_handwriting(eval_image)
|
| 206 |
+
|
| 207 |
+
# Inference Loop
|
| 208 |
+
full_transcription = ""
|
| 209 |
+
total_confidence = 0
|
| 210 |
+
t_start = time.perf_counter()
|
| 211 |
+
|
| 212 |
+
with st.spinner(f"Extracting {len(line_images)} lines..."):
|
| 213 |
+
for idx, line_img in enumerate(line_images):
|
| 214 |
+
try:
|
| 215 |
+
pix = proc(line_img, return_tensors="pt").pixel_values.to(dev)
|
| 216 |
+
out = model.generate(pix, return_dict_in_generate=True, output_scores=True)
|
| 217 |
+
pred = proc.batch_decode(out.sequences, skip_special_tokens=True)[0]
|
| 218 |
+
|
| 219 |
+
probs = torch.stack(out.scores, dim=1).softmax(-1)
|
| 220 |
+
total_confidence += float(torch.max(probs, dim=-1).values.mean().cpu().item()) * 100
|
| 221 |
+
|
| 222 |
+
full_transcription += pred + "\n"
|
| 223 |
+
|
| 224 |
+
# Live UI Update
|
| 225 |
+
text_placeholder.markdown(f'<div class="output-box">{full_transcription}</div>', unsafe_allow_html=True)
|
| 226 |
+
except Exception as e:
|
| 227 |
+
log(f"Line {idx} failed: {e}")
|
| 228 |
+
|
| 229 |
+
total_latency = time.perf_counter() - t_start
|
| 230 |
+
avg_conf = total_confidence / len(line_images) if line_images else 0
|
| 231 |
+
log(f"INFERENCE COMPLETE: {len(line_images)} lines in {total_latency:.2f}s")
|
| 232 |
+
|
| 233 |
+
with stats_placeholder.container():
|
| 234 |
+
s1, s2 = st.columns(2)
|
| 235 |
+
with s1: st.markdown(f'<div class="stat-card"><div class="stat-val">{avg_conf:.1f}%</div><div class="stat-lbl">Avg Confidence</div></div>', unsafe_allow_html=True)
|
| 236 |
+
with s2: st.markdown(f'<div class="stat-card"><div class="stat-val">{total_latency:.2f}s</div><div class="stat-lbl">Total Latency</div></div>', unsafe_allow_html=True)
|
| 237 |
+
|
| 238 |
+
st.markdown('<div style="margin-top:15px; font-size:9px; color:#46484d; text-transform:uppercase; letter-spacing:2px; font-weight:700;">Audio Synthesis (TTS)</div>', unsafe_allow_html=True)
|
| 239 |
+
c1, c2, c3 = st.columns(3)
|
| 240 |
+
with c1: voice = st.selectbox("Vocal Profile", ["Aria (Neural)", "Julian (Natural)"], label_visibility="collapsed")
|
| 241 |
+
with c2: speed = st.slider("Playback Rate", min_value=0.5, max_value=2.0, value=1.0, step=0.1)
|
| 242 |
+
with c3: volume = st.slider("Volume Level", min_value=0.1, max_value=1.0, value=1.0, step=0.1)
|
| 243 |
+
|
| 244 |
+
render_tts_button(full_transcription, voice, speed, volume)
|
| 245 |
+
|
| 246 |
+
# Optional: Show the segmented lines for debugging/cool factor
|
| 247 |
+
with st.expander(f"View {len(line_images)} Segmented Lines"):
|
| 248 |
+
for i, l_img in enumerate(line_images):
|
| 249 |
+
st.image(l_img, caption=f"Line {i+1}")
|
| 250 |
+
else:
|
| 251 |
+
st.warning("Model directory not found.")
|
| 252 |
+
|
| 253 |
+
# --- 4. PERSISTENT DIAGNOSTICS ---
|
| 254 |
+
log_html = "<div class='debug-terminal'>"
|
| 255 |
+
for entry in st.session_state.debug_log:
|
| 256 |
+
log_html += f"<div>{entry}</div>"
|
| 257 |
+
log_html += "</div>"
|
| 258 |
+
st.markdown(log_html, unsafe_allow_html=True)
|