Vahe commited on
Commit
df5e82e
1 Parent(s): 9c1be93

working changes done

Browse files
Files changed (2) hide show
  1. app.py +11 -12
  2. extractor.py +5 -14
app.py CHANGED
@@ -19,7 +19,7 @@ from extractor import get_card_xy, get_digit
19
  # MODEL_PATH = "runs/train/exp/weights/best.pt"
20
 
21
  def main():
22
- st.title("Card number extractor")
23
 
24
  # Use st.camera to capture images from the user's camera
25
  img_file_buffer = st.camera_input(label='Please, take a photo of a card', key='card')
@@ -54,19 +54,18 @@ def main():
54
  st.image('card_image.jpg', caption=f"{display_text}", use_column_width=True)
55
  else:
56
  # cropped_image = gray[y1:y2, x1:x2]
57
- cropped_image = resized_image[y1:y2, x1:x2]
58
- cropped_image = cv2.resize(cropped_image, (128, 128))
59
- cv2.imwrite('card_number_image.jpg', cropped_image)
60
 
61
- extracted_digit = get_digit(
62
- model_path="card_number_extractor.tflite",
63
- image_path='card_number_image.jpg',
64
- threshold=0.4
65
- )
66
 
67
- display_text = f'Here is the zoomed card number: {extracted_digit}'
68
- # display_text = 'No number so far'
69
- st.image('card_number_image.jpg', caption=f"{display_text}", use_column_width=True)
70
 
71
  image = Image.open('card_image.jpg')
72
  image_resized = image.resize((640, 640))
 
19
  # MODEL_PATH = "runs/train/exp/weights/best.pt"
20
 
21
  def main():
22
+ st.title("Card number detector")
23
 
24
  # Use st.camera to capture images from the user's camera
25
  img_file_buffer = st.camera_input(label='Please, take a photo of a card', key='card')
 
54
  st.image('card_image.jpg', caption=f"{display_text}", use_column_width=True)
55
  else:
56
  # cropped_image = gray[y1:y2, x1:x2]
57
+ # # cropped_image = resized_image[y1:y2, x1:x2]
58
+ # cropped_image = cv2.resize(cropped_image, (128, 128))
59
+ # cv2.imwrite('card_number_image.jpg', cropped_image)
60
 
61
+ # extracted_digit = get_digit(
62
+ # model_path="card_number_extractor.tflite",
63
+ # image_path='card_number_image.jpg',
64
+ # threshold=0.4
65
+ # )
66
 
67
+ # display_text = f'Here is the zoomed card number: {extracted_digit}'
68
+ # st.image('card_number_image.jpg', caption=f"{display_text}", use_column_width=True)
 
69
 
70
  image = Image.open('card_image.jpg')
71
  image_resized = image.resize((640, 640))
extractor.py CHANGED
@@ -2,7 +2,7 @@ import tensorflow as tf
2
  import numpy as np
3
  from PIL import Image
4
  import cv2
5
- import streamlit as st
6
 
7
  def get_card_xy(model_path, image_path):
8
  #model_path = 'odo_detector.tflite'
@@ -12,8 +12,6 @@ def get_card_xy(model_path, image_path):
12
  input_details = interpreter.get_input_details()
13
  output_details = interpreter.get_output_details()
14
 
15
- st.write(f"{input_details}")
16
-
17
  # Obtain the height and width of the corresponding image from the input tensor
18
  image_height = input_details[0]['shape'][2] # 640
19
  image_width = input_details[0]['shape'][3] # 640
@@ -28,8 +26,6 @@ def get_card_xy(model_path, image_path):
28
  image_np = np.moveaxis(image_np, -1, 0)
29
  image_np = image_np[np.newaxis, :]
30
 
31
- st.write(f"{image_np.shape}")
32
-
33
  # inference
34
  interpreter.set_tensor(input_details[0]['index'], image_np)
35
  interpreter.invoke()
@@ -60,15 +56,10 @@ def get_card_xy(model_path, image_path):
60
  else:
61
  pass
62
 
63
- # x1 = int((x_center - width / 2) * image_width)
64
- # y1 = int((y_center - height / 2) * image_height)
65
- # x2 = int((x_center + width / 2) * image_width)
66
- # y2 = int((y_center + height / 2) * image_height)
67
-
68
- x1 = int((x_center - width / 2) * 640)
69
- y1 = int((y_center - height / 2) * 640)
70
- x2 = int((x_center + width / 2) * 640)
71
- y2 = int((y_center + height / 2) * 640)
72
 
73
  # draw.rectangle([x1, y1, x2, y2], outline="red", width=2)
74
  # text = f"Class: {class_name}, Score: {final_score:.2f}"
 
2
  import numpy as np
3
  from PIL import Image
4
  import cv2
5
+ # import streamlit as st
6
 
7
  def get_card_xy(model_path, image_path):
8
  #model_path = 'odo_detector.tflite'
 
12
  input_details = interpreter.get_input_details()
13
  output_details = interpreter.get_output_details()
14
 
 
 
15
  # Obtain the height and width of the corresponding image from the input tensor
16
  image_height = input_details[0]['shape'][2] # 640
17
  image_width = input_details[0]['shape'][3] # 640
 
26
  image_np = np.moveaxis(image_np, -1, 0)
27
  image_np = image_np[np.newaxis, :]
28
 
 
 
29
  # inference
30
  interpreter.set_tensor(input_details[0]['index'], image_np)
31
  interpreter.invoke()
 
56
  else:
57
  pass
58
 
59
+ x1 = int((x_center - width / 2) * image_width)
60
+ y1 = int((y_center - height / 2) * image_height)
61
+ x2 = int((x_center + width / 2) * image_width)
62
+ y2 = int((y_center + height / 2) * image_height)
 
 
 
 
 
63
 
64
  # draw.rectangle([x1, y1, x2, y2], outline="red", width=2)
65
  # text = f"Class: {class_name}, Score: {final_score:.2f}"