Ntchinda-Giscard commited on
Commit
d2d3d7a
1 Parent(s): adc2a9d

add roboflow

Browse files
Files changed (5) hide show
  1. .gitignore +4 -1
  2. app.py +30 -31
  3. requirements.txt +2 -1
  4. test.py +56 -0
  5. utils.py +47 -20
.gitignore CHANGED
@@ -1 +1,4 @@
1
- .venv/
 
 
 
 
1
+ .venv/
2
+
3
+ __pycache__/
4
+ *.jpg
app.py CHANGED
@@ -4,14 +4,11 @@ from fastapi import FastAPI, File, UploadFile, HTTPException
4
  from fastapi.responses import HTMLResponse, JSONResponse
5
  import os
6
  from fastapi.middleware.cors import CORSMiddleware
7
- from utils import detect_licensePlate, licence_dect, lookup_user, lookup_user_metadata, ner_recog, read_text_img, upload_to_s3, vehicle_dect, write_to_upload
8
- from pinecone import Pinecone
9
- from deepface import DeepFace
10
 
11
 
12
- pc = Pinecone(api_key="dc53a991-1d1a-4f03-b718-1ec0df3b0f00")
13
- index = pc.Index("faces-id")
14
-
15
  app = FastAPI()
16
 
17
  app.add_middleware(
@@ -197,9 +194,11 @@ async def upload_files(
197
  with open(back_path, "wb") as back_file:
198
  back_file.write(await back.read())
199
 
200
-
201
-
202
 
 
 
203
 
204
  front_url = upload_to_s3(front_path)
205
  back_url = upload_to_s3(back_path)
@@ -226,29 +225,29 @@ async def upload_files(
226
 
227
 
228
  print(f"[*] --- Serial ---> {serial_number}")
229
- embedding = DeepFace.represent(img_path=face_path, model_name='DeepFace')
230
- embedding_vector = embedding[0]['embedding']
231
- existing_user = lookup_user_metadata(index, embedding_vector, serial_number)
232
-
233
- print(f"[*] --- Existing user ---> {existing_user}")
234
- if (len(existing_user["matches"]) <= 0):
235
- print(f"[*] --- No match found --->")
236
- await index.upsert(
237
- vectors=[
238
- {
239
- "id": str(uuid.uuid4()),
240
- "values" : embedding_vector,
241
- "metadata" : {"name": serial_number}
242
- }
243
- ],
244
- namespace="ns1"
245
- )
246
- elif(len(existing_user["matches"]) > 0):
247
- if (existing_user["matches"][0]["score"] >= 0.79):
248
- pass
249
- # return JSONResponse(content={"message": "This user and id card already exist"}, status_code=200)
250
- elif(existing_user["matches"][0]["score"]):
251
- return HTTPException(content={"message" : "This card belongs to someone else"}, status_code=404)
252
 
253
 
254
 
 
4
  from fastapi.responses import HTMLResponse, JSONResponse
5
  import os
6
  from fastapi.middleware.cors import CORSMiddleware
7
+ from utils import crop_images_from_detections, detect_licensePlate, licence_dect, lookup_user, lookup_user_metadata, ner_recog, read_text_img, upload_to_s3, vehicle_dect, write_to_upload
8
+ # from pinecone import Pinecone
9
+ # from deepface import DeepFace
10
 
11
 
 
 
 
12
  app = FastAPI()
13
 
14
  app.add_middleware(
 
194
  with open(back_path, "wb") as back_file:
195
  back_file.write(await back.read())
196
 
197
+ front_id = crop_images_from_detections(front_path)
198
+ back_id = crop_images_from_detections(back_path)
199
 
200
+ print(f"[*] ---- Text front ----> {front_id}")
201
+ print(f"[*] ---- Text back ----> {back_id}")
202
 
203
  front_url = upload_to_s3(front_path)
204
  back_url = upload_to_s3(back_path)
 
225
 
226
 
227
  print(f"[*] --- Serial ---> {serial_number}")
228
+ # embedding = DeepFace.represent(img_path=face_path, model_name='DeepFace')
229
+ # embedding_vector = embedding[0]['embedding']
230
+ # existing_user = lookup_user_metadata(index, embedding_vector, serial_number)
231
+
232
+ # print(f"[*] --- Existing user ---> {existing_user}")
233
+ # if (len(existing_user["matches"]) <= 0):
234
+ # print(f"[*] --- No match found --->")
235
+ # await index.upsert(
236
+ # vectors=[
237
+ # {
238
+ # "id": str(uuid.uuid4()),
239
+ # "values" : embedding_vector,
240
+ # "metadata" : {"name": serial_number}
241
+ # }
242
+ # ],
243
+ # namespace="ns1"
244
+ # )
245
+ # elif(len(existing_user["matches"]) > 0):
246
+ # if (existing_user["matches"][0]["score"] >= 0.79):
247
+ # pass
248
+ # # return JSONResponse(content={"message": "This user and id card already exist"}, status_code=200)
249
+ # elif(existing_user["matches"][0]["score"]):
250
+ # return HTTPException(content={"message" : "This card belongs to someone else"}, status_code=404)
251
 
252
 
253
 
requirements.txt CHANGED
@@ -11,4 +11,5 @@ python-multipart
11
  pinecone-client
12
  deepface
13
  tf-keras
14
- tensorflow==2.12
 
 
11
  pinecone-client
12
  deepface
13
  tf-keras
14
+ tensorflow==2.12
15
+ roboflow
test.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import tempfile
2
+ # from roboflow import Roboflow
3
+ # from PIL import Image
4
+ # import json
5
+
6
+ # def set_image_dpi(file_path):
7
+
8
+ # im = Image.open(file_path)
9
+
10
+ # length_x, width_y = im.size
11
+
12
+ # factor = min(1, float(1024.0 / length_x))
13
+
14
+ # size = int(factor * length_x), int(factor * width_y)
15
+
16
+ # im_resized = im.resize(size, Image.LANCZOS)
17
+
18
+ # temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.png')
19
+ # i = 0
20
+ # i =+ 1
21
+ # temp_filename = f'enhanced{i}.jpg'
22
+
23
+ # im_resized.save(temp_filename, dpi=(300, 300))
24
+
25
+ # return temp_filename
26
+
27
+ # def crop_images_from_detections(detections):
28
+ # for detection in detections:
29
+ # image_path = detection["image_path"]
30
+ # image = Image.open(image_path)
31
+
32
+ # x1 = detection['x'] - detection['width'] / 2
33
+ # x2 = detection['x'] + detection['width'] / 2
34
+ # y1 = detection['y'] - detection['height'] / 2
35
+ # y2 = detection['y'] + detection['height'] / 2
36
+ # box = (x1, y1, x2, y2)
37
+
38
+ # cropped_image = image.crop(box)
39
+ # path= f"{detection['class']}.jpg"
40
+
41
+ # cropped_image.save(path) # Or save the image using cropped_image.save('path_to_save_image')
42
+ # set_image_dpi(path)
43
+
44
+
45
+
46
+ # rf = Roboflow(api_key="P4usj8uPwcbnflvyJIAB")
47
+
48
+ # project = rf.workspace("ntchindagiscard").project("id_card_annotation")
49
+
50
+
51
+ # model = project.version(1).model
52
+
53
+ # model.predict("2ed1bdb5-5c09-40a0-a39f-9ff6c15380bf-front.jpg", confidence=20, overlap=50).save('prediction.jpg')
54
+ # result = model.predict("2ed1bdb5-5c09-40a0-a39f-9ff6c15380bf-front.jpg", confidence=20, overlap=50)
55
+ # crop_images_from_detections(result)
56
+ # print(result)
utils.py CHANGED
@@ -1,4 +1,5 @@
1
  import os
 
2
  import uuid
3
  from fastapi import File
4
  from paddleocr import PaddleOCR
@@ -11,8 +12,7 @@ from botocore.exceptions import NoCredentialsError
11
  from dotenv import load_dotenv
12
  from colorthief import ColorThief
13
  import logging
14
-
15
- import logging
16
 
17
  # Configure logging
18
  logging.basicConfig(
@@ -32,10 +32,55 @@ nlp_ner = spacy.load("en_pipeline")
32
  detector = YOLO('best.pt')
33
  vehicle = YOLO('yolov8x.pt')
34
 
 
 
 
 
35
  aws_access_key_id=os.getenv('AWS_ACCESS_KEY')
36
  aws_secret_access_key = os.getenv('AWS_SECRET_KEY')
37
  bucket_name='vvims'
38
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  # Function to upload a file to S3
40
  def upload_to_s3(
41
  file_path,
@@ -97,24 +142,6 @@ def ner_recog(text:str) -> dict:
97
 
98
  return {"entities": entities}
99
 
100
-
101
- def read_text_img(img_path:str) -> str:
102
- """
103
- Read text from images
104
-
105
- Args:
106
- - img_path: Path to the images in which the text will be extracted
107
-
108
- Returns:
109
- - text: The extracted text
110
- """
111
-
112
- result = ocr_model.ocr(img_path)
113
- text = ''
114
- if result[0]:
115
- for res in result[0]:
116
- text += res[1][0] + ' '
117
- return text
118
  def detect_licensePlate(img: str) -> dict:
119
  image = Image.open(img)
120
  results = vehicle(source=img, cls=['car', 'bus', 'truck', 'motorcycle'], conf=0.7)
 
1
  import os
2
+ import tempfile
3
  import uuid
4
  from fastapi import File
5
  from paddleocr import PaddleOCR
 
12
  from dotenv import load_dotenv
13
  from colorthief import ColorThief
14
  import logging
15
+ from roboflow import Roboflow
 
16
 
17
  # Configure logging
18
  logging.basicConfig(
 
32
  detector = YOLO('best.pt')
33
  vehicle = YOLO('yolov8x.pt')
34
 
35
+ rf = Roboflow(api_key="P4usj8uPwcbnflvyJIAB")
36
+ project = rf.workspace("ntchindagiscard").project("id_card_annotation")
37
+ model = project.version(1).model
38
+
39
  aws_access_key_id=os.getenv('AWS_ACCESS_KEY')
40
  aws_secret_access_key = os.getenv('AWS_SECRET_KEY')
41
  bucket_name='vvims'
42
 
43
+
44
+ def read_text_img(img_path:str) -> str:
45
+ """
46
+ Read text from images
47
+
48
+ Args:
49
+ - img_path: Path to the images in which the text will be extracted
50
+
51
+ Returns:
52
+ - text: The extracted text
53
+ """
54
+
55
+ result = ocr_model.ocr(img_path)
56
+ text = ''
57
+ if result[0]:
58
+ for res in result[0]:
59
+ text += res[1][0] + ' '
60
+ return text
61
+
62
+ def crop_images_from_detections(image_path):
63
+ results = {}
64
+ detections = model.predict( image_path , confidence=20, overlap=50)
65
+ for detection in detections:
66
+ image_path = image_path
67
+ image = Image.open(image_path)
68
+
69
+ x1 = detection['x'] - detection['width'] / 2
70
+ x2 = detection['x'] + detection['width'] / 2
71
+ y1 = detection['y'] - detection['height'] / 2
72
+ y2 = detection['y'] + detection['height'] / 2
73
+ box = (x1, y1, x2, y2)
74
+
75
+ cropped_image = image.crop(box)
76
+ path= f"detections.jpg"
77
+ cropped_image.save(path)
78
+ text = read_text_img(path)
79
+
80
+ results[detection['class']] = text
81
+ return results
82
+
83
+
84
  # Function to upload a file to S3
85
  def upload_to_s3(
86
  file_path,
 
142
 
143
  return {"entities": entities}
144
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145
  def detect_licensePlate(img: str) -> dict:
146
  image = Image.open(img)
147
  results = vehicle(source=img, cls=['car', 'bus', 'truck', 'motorcycle'], conf=0.7)