kritik commited on
Commit
25c3a48
β€’
1 Parent(s): 48b5b4a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -15
app.py CHANGED
@@ -1,10 +1,12 @@
1
  import os
2
  import pathlib
 
3
 
4
  import random
5
  from PIL import Image
 
 
6
  import torch
7
- import glob
8
  import numpy as np
9
  from basicsr.utils import imwrite
10
  from gfpgan import GFPGANer
@@ -31,27 +33,28 @@ def enhancer(image):
31
  cropped_faces, restored_faces, restored_img = restorer.enhance(
32
  image, has_aligned=False, only_center_face=False, paste_back=True)
33
 
34
- return Image.fromarray(restored_faces[0]), np.array(restored_img)
35
 
36
- def inference(img1, img2):
37
 
38
  img1 = face_recognition.face_encodings(img1)[0]
39
  img2 = face_recognition.face_encodings(img2)[0]
40
 
41
- result = face_recognition.compare_faces([img1], img2)
42
-
43
- if result[0]:
44
  result = 'People in the two images are same!'
45
  else:
46
  result = 'People in the two images are different!'
47
-
48
- return result
49
 
50
  def main():
51
 
52
  st.set_page_config(page_title='QFace.ai', page_icon='πŸ€–', layout='centered')
53
- st.title('QFace.ai')
54
  col1, col2 = st.columns(2)
 
55
 
56
  with st.form('Input Form'):
57
 
@@ -70,8 +73,8 @@ def main():
70
 
71
  with col1:
72
  img1 = Image.open(img1)
73
- eimg1_face, eimg1 = enhancer(img1)
74
- image_comparison(img1, eimg1_face,
75
  width=350,
76
  label1='Before',
77
  label2='After',
@@ -79,16 +82,25 @@ def main():
79
  make_responsive=False)
80
  with col2:
81
  img2 = Image.open(img2)
82
- eimg2_face, eimg2 = enhancer(img2)
83
- image_comparison(img2, eimg2_face,
84
  width=350,
85
  label1='Before',
86
  label2='After',
87
  show_labels=True,
88
  make_responsive=False)
89
 
90
- result = inference(np.array(img1), np.array(img2))
91
- st.write(result)
 
 
 
 
 
 
 
 
 
92
 
93
 
94
  if __name__ == '__main__':
 
1
  import os
2
  import pathlib
3
+ import glob
4
 
5
  import random
6
  from PIL import Image
7
+ import plotly.graph_objects as go
8
+
9
  import torch
 
10
  import numpy as np
11
  from basicsr.utils import imwrite
12
  from gfpgan import GFPGANer
 
33
  cropped_faces, restored_faces, restored_img = restorer.enhance(
34
  image, has_aligned=False, only_center_face=False, paste_back=True)
35
 
36
+ return Image.fromarray(restored_faces[0])
37
 
38
+ def inference(img1, img2, tolerance):
39
 
40
  img1 = face_recognition.face_encodings(img1)[0]
41
  img2 = face_recognition.face_encodings(img2)[0]
42
 
43
+ confidence = face_recognition.face_distance([img1], img2)[0]
44
+
45
+ if confidence <= tolerance:
46
  result = 'People in the two images are same!'
47
  else:
48
  result = 'People in the two images are different!'
49
+
50
+ return 1-confidence, result
51
 
52
  def main():
53
 
54
  st.set_page_config(page_title='QFace.ai', page_icon='πŸ€–', layout='centered')
55
+ st.title('Qface.ai')
56
  col1, col2 = st.columns(2)
57
+ tolerance = 0.6
58
 
59
  with st.form('Input Form'):
60
 
 
73
 
74
  with col1:
75
  img1 = Image.open(img1)
76
+ eimg1 = enhancer(img1)
77
+ image_comparison(img1, eimg1,
78
  width=350,
79
  label1='Before',
80
  label2='After',
 
82
  make_responsive=False)
83
  with col2:
84
  img2 = Image.open(img2)
85
+ eimg2 = enhancer(img2)
86
+ image_comparison(img2, eimg2,
87
  width=350,
88
  label1='Before',
89
  label2='After',
90
  show_labels=True,
91
  make_responsive=False)
92
 
93
+ confidence, result = inference(np.array(eimg1), np.array(eimg2), tolerance)
94
+ st.subheader(result)
95
+
96
+ fig = go.Figure(go.Indicator(domain = {'x': [0, 1], 'y': [0, 1]},
97
+ value = round(confidence, 3),
98
+ mode = 'gauge+number',
99
+ title = {'text': 'Confidence'},
100
+ delta = {'reference': 0.5},
101
+ gauge = {'axis': {'range': [None, 1]}}))
102
+
103
+ st.plotly_chart(fig, use_container_width=True)
104
 
105
 
106
  if __name__ == '__main__':