NassimeBejaia commited on
Commit
9cdef11
1 Parent(s): ce193bb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -25
app.py CHANGED
@@ -302,54 +302,51 @@ def process_with_yolo(img_pil):
302
 
303
 
304
  def display_detected_lines(original_path, output_path):
305
-
306
  # Derive the txt_path from the output_path
307
  txt_path = os.path.join('yolov3/runs/detect/mainlinedection/labels', os.path.basename(output_path).replace(".jpg", ".txt"))
308
 
309
  if os.path.exists(txt_path):
310
  # Load both original and thresholded images
311
  original_image = Image.open(original_path)
312
- thresholded_image = process_image(original_image) # This is your function that returns a thresholded PIL image
313
  boxes = get_detected_boxes(txt_path, original_image.width, original_image.height)
314
-
315
  if not boxes:
316
  st.warning("No lines detected by YOLOv3.")
317
  return
318
 
319
  # Create a temporary directory to store the detected lines
320
  with TemporaryDirectory() as temp_dir:
321
-
322
- detected_line_paths = [] # List to store paths of the detected line images
323
 
324
- # Extract lines and perform OCR
325
  for index, box in enumerate(boxes):
326
  x_center, y_center, width, height = box
327
  x_min = int(x_center - (width / 2))
328
  y_min = int(y_center - (height / 2))
329
  x_max = int(x_center + (width / 2))
330
  y_max = int(y_center + (height / 2))
331
-
332
- # Crop the ORIGINAL image here
333
- extracted_line = original_image.crop((x_min, y_min, x_max, y_max))
334
-
335
- # Use BytesIO to convert the image to a streamable format for Streamlit
336
- buffer = io.BytesIO()
337
- extracted_line.save(buffer, format="JPEG")
338
- buffer.seek(0)
339
-
340
- # Display the extracted line image
341
- st.image(buffer, use_column_width=True)
342
-
343
- # Save the thresholded line image for OCR
344
  detected_line_path = os.path.join(temp_dir, f"detected_line_{index}.jpg")
345
- thresholded_image.crop((x_min, y_min, x_max, y_max)).save(detected_line_path)
346
-
347
- # Perform OCR on this specific detected line (thresholded one)
348
- recognized_text = perform_ocr_on_detected_lines([detected_line_path])[0]
 
349
 
350
- # Display the OCR result
 
 
351
  st.markdown(
352
- f"<p style='font-size: 18px; font-weight: bold;'>{recognized_text}</p>",
353
  unsafe_allow_html=True
354
  )
355
  # Add a small break for better spacing
@@ -359,6 +356,7 @@ def display_detected_lines(original_path, output_path):
359
  st.error("Annotation file (.txt) not found!")
360
 
361
 
 
362
  def perform_ocr_on_detected_lines(detected_line_paths):
363
  """
364
  Performs OCR on the provided list of detected line image paths.
 
302
 
303
 
304
  def display_detected_lines(original_path, output_path):
 
305
  # Derive the txt_path from the output_path
306
  txt_path = os.path.join('yolov3/runs/detect/mainlinedection/labels', os.path.basename(output_path).replace(".jpg", ".txt"))
307
 
308
  if os.path.exists(txt_path):
309
  # Load both original and thresholded images
310
  original_image = Image.open(original_path)
311
+ thresholded_image = process_image(original_image)
312
  boxes = get_detected_boxes(txt_path, original_image.width, original_image.height)
313
+
314
  if not boxes:
315
  st.warning("No lines detected by YOLOv3.")
316
  return
317
 
318
  # Create a temporary directory to store the detected lines
319
  with TemporaryDirectory() as temp_dir:
320
+ detected_line_paths = [] # For storing paths of the thresholded line images for OCR
321
+ original_line_paths = [] # For storing paths of the original line images for display
322
 
 
323
  for index, box in enumerate(boxes):
324
  x_center, y_center, width, height = box
325
  x_min = int(x_center - (width / 2))
326
  y_min = int(y_center - (height / 2))
327
  x_max = int(x_center + (width / 2))
328
  y_max = int(y_center + (height / 2))
329
+
330
+ # Crop the ORIGINAL image and save
331
+ original_line = original_image.crop((x_min, y_min, x_max, y_max))
332
+ original_line_path = os.path.join(temp_dir, f"original_line_{index}.jpg")
333
+ original_line.save(original_line_path)
334
+ original_line_paths.append(original_line_path)
335
+
336
+ # Crop the THRESHOLDED image and save for OCR
337
+ extracted_line = thresholded_image.crop((x_min, y_min, x_max, y_max))
 
 
 
 
338
  detected_line_path = os.path.join(temp_dir, f"detected_line_{index}.jpg")
339
+ extracted_line.save(detected_line_path)
340
+ detected_line_paths.append(detected_line_path)
341
+
342
+ # Perform OCR on thresholded lines
343
+ recognized_texts = perform_ocr_on_detected_lines(detected_line_paths)
344
 
345
+ # Display the results
346
+ for original_img_path, text in zip(original_line_paths, recognized_texts):
347
+ st.image(original_img_path, use_column_width=True)
348
  st.markdown(
349
+ f"<p style='font-size: 18px; font-weight: bold;'>{text}</p>",
350
  unsafe_allow_html=True
351
  )
352
  # Add a small break for better spacing
 
356
  st.error("Annotation file (.txt) not found!")
357
 
358
 
359
+
360
  def perform_ocr_on_detected_lines(detected_line_paths):
361
  """
362
  Performs OCR on the provided list of detected line image paths.