pleonard commited on
Commit
7d3a73f
1 Parent(s): 5b3a456

new features

Browse files
Files changed (3) hide show
  1. .gitignore +5 -0
  2. __pycache__/app.cpython-38.pyc +0 -0
  3. app.py +118 -42
.gitignore CHANGED
@@ -4,3 +4,8 @@ __pycache__
4
  /__pycache__
5
  /__pycache__
6
  /__pycache__
 
 
 
 
 
 
4
  /__pycache__
5
  /__pycache__
6
  /__pycache__
7
+ /__pycache__
8
+ /test_apple
9
+ /NamedPeople
10
+ /db
11
+ *.pyc
__pycache__/app.cpython-38.pyc CHANGED
Binary files a/__pycache__/app.cpython-38.pyc and b/__pycache__/app.cpython-38.pyc differ
 
app.py CHANGED
@@ -5,6 +5,14 @@ import PIL
5
  from PIL import Image, ImageDraw, ImageFont
6
  import time
7
 
 
 
 
 
 
 
 
 
8
 
9
  dbackends = [
10
  ['Haar Cascade (OpenCV)','opencv'],
@@ -21,57 +29,125 @@ dbackendinfo = 'Detectors with 🌈 require a color image.'
21
 
22
 
23
  with gr.Blocks() as demo:
 
24
 
25
- input_image = gr.Image(value="8428_26_SM.jpg")
26
- annotated_image = gr.AnnotatedImage()
 
 
27
 
28
- #jsontext = gr.Text(label= "deepface extract_faces results")
29
- selected_face_info = gr.Textbox(label="Selected Face Info", value="Click on a face above")
30
- selected_face_pic = gr.Image(label="Selected Face", value="Click on a face above")
31
 
 
 
 
32
 
33
-
34
- def findFaces(imgfile,dbackend):
35
- start_time = time.time()
36
- print(start_time)
37
-
38
- face_objs = DeepFace.extract_faces(img_path = imgfile, enforce_detection = False, detector_backend = dbackend)
39
-
40
- numberoffaces = len(face_objs)
41
- jsontext = ''
42
- global faceannotations
43
- faceannotations = []
44
- for i, face_obj in enumerate(face_objs,1):
45
- face_coordinates = (face_obj["facial_area"]["x"],face_obj["facial_area"]["y"], (face_obj["facial_area"]["x"] + face_obj["facial_area"]["w"]),(face_obj["facial_area"]["y"] + face_obj["facial_area"]["h"]))
46
- face_confidence = "Face " + str(i) + ": "+ "{:.0%}".format(face_obj["confidence"])
47
- face_result=[face_coordinates,face_confidence]
48
- faceannotations.append(face_result)
49
-
50
- #jsontext=faceannotations
51
- #jsontext=face_objs
52
- run_time = str(round((time.time() - start_time),2))
53
- results = gr.AnnotatedImage(
54
- label= "Detected " + str(numberoffaces) + " faces via " + dbackend + ' in ' + run_time + ' seconds.',
55
- value=(imgfile, faceannotations)
 
 
 
 
 
 
56
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
- print(run_time)
59
- return(results,numberoffaces,run_time,)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
 
61
- dbackendchoice = gr.Radio(choices=dbackends,label='Detector Backend:',info=dbackendinfo,container=True,value='retinaface')
62
- gr.Interface(
63
- allow_flagging = "never",
64
- fn=findFaces,
65
- inputs=[input_image, dbackendchoice],
66
- outputs=[annotated_image,selected_face_info,selected_face_pic],
67
- )
68
- def select_section(evt: gr.SelectData):
69
- cropped_image = np.array(Image.open(input_image.value['path']))
70
- cropped_image = cropped_image[faceannotations[evt.index][0][1]:faceannotations[evt.index][0][3], faceannotations[evt.index][0][0]:faceannotations[evt.index][0][2]]
71
- return faceannotations[evt.index], cropped_image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
 
 
73
 
74
- annotated_image.select(select_section, None, [selected_face_info,selected_face_pic])
 
 
 
 
 
 
 
 
 
75
 
76
  demo.launch(show_error=True)
77
 
 
5
  from PIL import Image, ImageDraw, ImageFont
6
  import time
7
 
8
+ import pandas as pd
9
+ from operator import itemgetter
10
+ import os
11
+
12
+ def get_named_people():
13
+ named_people = next(os.walk('db'))[1]
14
+ return named_people
15
+
16
 
17
  dbackends = [
18
  ['Haar Cascade (OpenCV)','opencv'],
 
29
 
30
 
31
  with gr.Blocks() as demo:
32
+ with gr.Tab("Add Named Person"):
33
 
34
+ input_image = gr.Image(value="8428_26_SM.jpg")
35
+ annotated_image = gr.AnnotatedImage()
36
+ selected_face_info = gr.Textbox(label="Selected Face Info", value="Click on a face above")
37
+ selected_face_pic = gr.Image(label="Selected Face", value="Click on a face above", height=148)
38
 
 
 
 
39
 
40
+ def findFaces(imgfile,dbackend):
41
+ start_time = time.time()
42
+ print(start_time)
43
 
44
+ face_objs = DeepFace.extract_faces(img_path = imgfile, enforce_detection = False, detector_backend = dbackend)
45
+
46
+ numberoffaces = len(face_objs)
47
+ jsontext = ''
48
+ global faceannotations
49
+ faceannotations = []
50
+ for i, face_obj in enumerate(face_objs,1):
51
+ face_coordinates = (face_obj["facial_area"]["x"],face_obj["facial_area"]["y"], (face_obj["facial_area"]["x"] + face_obj["facial_area"]["w"]),(face_obj["facial_area"]["y"] + face_obj["facial_area"]["h"]))
52
+ face_confidence = "Face " + str(i) + ": "+ "{:.0%}".format(face_obj["confidence"])
53
+ face_result=[face_coordinates,face_confidence]
54
+ faceannotations.append(face_result)
55
+
56
+ #jsontext=faceannotations
57
+ #jsontext=face_objs
58
+ run_time = str(round((time.time() - start_time),2))
59
+ results = gr.AnnotatedImage(
60
+ label= "Detected " + str(numberoffaces) + " faces via " + dbackend + ' in ' + run_time + ' seconds.',
61
+ value=(imgfile, faceannotations)
62
+ )
63
+
64
+ print(run_time)
65
+ return(results,numberoffaces,run_time)
66
+
67
+ dbackendchoice = gr.Radio(choices=dbackends,label='Detector Backend:',info=dbackendinfo,container=True,value='retinaface')
68
+ gr.Interface(
69
+ allow_flagging = "never",
70
+ fn=findFaces,
71
+ inputs=[input_image, dbackendchoice],
72
+ outputs=[annotated_image,selected_face_info,selected_face_pic],
73
  )
74
+
75
+ def select_section(evt: gr.SelectData):
76
+ cropped_image = np.array(Image.open(input_image.value['path']))
77
+ cropped_image = cropped_image[faceannotations[evt.index][0][1]:faceannotations[evt.index][0][3], faceannotations[evt.index][0][0]:faceannotations[evt.index][0][2]]
78
+ return faceannotations[evt.index], cropped_image
79
+
80
+
81
+ annotated_image.select(select_section, None, [selected_face_info,selected_face_pic])
82
+
83
+ with gr.Tab("Find Named Person in All Images"):
84
+ with gr.Row():
85
+
86
+ named_people_dropdown = []
87
+ for named_person in get_named_people():
88
+ named_people_dropdown.append(named_person.replace("_"," "))
89
+ find_list = gr.Dropdown(named_people_dropdown, label="Person", info="Select a Named Person."),
90
+ find_button = gr.Button(value="Find this person")
91
+
92
 
93
+ with gr.Tab("Identify People in One Image"):
94
+ oneimageannotations = []
95
+ def identify_in_one_image(imgfile):
96
+ oneimageresults = DeepFace.find(img_path=imgfile, db_path="db")
97
+ oneimageresults = pd.concat(oneimageresults)
98
+ for i, found_face in oneimageresults.iterrows():
99
+ face_coordinates = (found_face["source_x"],found_face["source_y"], (found_face["source_x"] + found_face["source_w"]),(found_face["source_y"] + found_face["source_h"]))
100
+ person = found_face["identity"].split("/")[1].replace("_"," ")
101
+ face_confidence = "Matched " + person + " {:.0%}".format(found_face["distance"])
102
+ face_result=[face_coordinates,face_confidence]
103
+ oneimageannotations.append(face_result)
104
+
105
+ results = gr.AnnotatedImage(
106
+ value=(imgfile, oneimageannotations)
107
+ )
108
+ return results, oneimageannotations
109
 
110
+ oneimage_input_image = gr.Image()
111
+
112
+ found_faces=gr.AnnotatedImage()
113
+ debug_output = gr.Textbox()
114
+ gr.Interface(
115
+ allow_flagging = "never",
116
+ fn=identify_in_one_image,
117
+ inputs=oneimage_input_image,
118
+ outputs=[found_faces, debug_output]
119
+ )
120
+
121
+
122
+
123
+
124
+
125
+ with gr.Tab("Modify Named Person") as ModifyNamedPersonTab:
126
+
127
+
128
+ def get_named_people_dropdown():
129
+ named_people_gallery_imgs = []
130
+ named_people_gallery_captions = []
131
+ for named_person in get_named_people():
132
+ #named_person = named_person.replace("_"," ")
133
+ named_people_gallery_imgs.append("NamedPeople/" + named_person.replace("_","") + ".jpg")
134
+ named_people_gallery_captions.append(named_person.replace("_"," "))
135
+ named_people_gallery_all = list(zip(named_people_gallery_imgs, named_people_gallery_captions))
136
+ images = named_people_gallery_all
137
 
138
+ images.sort(key=itemgetter(1))
139
+ return images
140
 
141
+ named_person_gallery = gr.Gallery(
142
+ label="Named People", elem_id="gallery", object_fit="none", columns=9)
143
+
144
+ ModifyNamedPersonTab.select(get_named_people_dropdown, None, named_person_gallery)
145
+
146
+ #jsontext = gr.Text(label= "deepface extract_faces results")
147
+
148
+
149
+
150
+
151
 
152
  demo.launch(show_error=True)
153