Spaces:
Running
Running
harshasurampudi
commited on
Commit
•
4e4a30c
1
Parent(s):
a2a302d
Upload 3 files
Browse files- .gitattributes +1 -0
- Female.png +3 -0
- age.pkl +3 -0
- app.py +29 -5
.gitattributes
CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
35 |
+
Female.png filter=lfs diff=lfs merge=lfs -text
|
Female.png
ADDED
Git LFS Details
|
age.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:96f96f8ff12b7c675847480b186cfa1da984faafc8a0f184db7cfadfe4d9eebc
|
3 |
+
size 87833539
|
app.py
CHANGED
@@ -1,23 +1,47 @@
|
|
1 |
from fastai.vision.all import *
|
2 |
import gradio as gr
|
|
|
|
|
3 |
|
4 |
def label_func(fname):
|
5 |
if int(str(fname)[str(fname).index('_')+1]) == 0:
|
6 |
return "Male"
|
7 |
return "Female"
|
8 |
|
9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
categories = ('Female', 'Male')
|
12 |
|
|
|
|
|
|
|
|
|
|
|
13 |
def classify_image(img):
|
14 |
-
pred, idx, probs =
|
15 |
return dict(zip(categories, map(float, probs)))
|
16 |
|
|
|
|
|
|
|
|
|
|
|
17 |
image = gr.inputs.Image(shape=(192,192))
|
18 |
-
|
19 |
-
|
|
|
|
|
20 |
|
21 |
|
22 |
-
iface = gr.Interface(fn=
|
23 |
iface.launch()
|
|
|
1 |
from fastai.vision.all import *
|
2 |
import gradio as gr
|
3 |
+
import cv2
|
4 |
+
classifier = cv2.CascadeClassifier('models/haarcascade_frontalface_alt2.xml')
|
5 |
|
6 |
def label_func(fname):
|
7 |
if int(str(fname)[str(fname).index('_')+1]) == 0:
|
8 |
return "Male"
|
9 |
return "Female"
|
10 |
|
11 |
+
def get_age(fname):
|
12 |
+
return int(str(fname).split('/')[1].split('_')[0])
|
13 |
+
|
14 |
+
def detect_face(img):
|
15 |
+
faces = classifier.detectMultiScale(img)
|
16 |
+
x, y, w, h = faces[0]
|
17 |
+
cropped_img = img[y:y+h, x:x+w]
|
18 |
+
return cropped_img
|
19 |
+
|
20 |
+
learn_gender = load_learner('gender.pkl')
|
21 |
+
learn_age = load_learner('age.pkl')
|
22 |
|
23 |
categories = ('Female', 'Male')
|
24 |
|
25 |
+
def predict_age(img):
|
26 |
+
detected_face = detect_face(img)
|
27 |
+
pred,_,_ = learn_age.predict(detected_face)
|
28 |
+
return str(pred[0]), detected_face
|
29 |
+
|
30 |
def classify_image(img):
|
31 |
+
pred, idx, probs = learn_gender.predict(img)
|
32 |
return dict(zip(categories, map(float, probs)))
|
33 |
|
34 |
+
def process_image(img):
|
35 |
+
gender = classify_image(img)
|
36 |
+
age, face = predict_age(img)
|
37 |
+
return gender, age, face
|
38 |
+
|
39 |
image = gr.inputs.Image(shape=(192,192))
|
40 |
+
gender_output = gr.outputs.Label()
|
41 |
+
age_output = gr.outputs.Textbox(label='Predicted Age')
|
42 |
+
detected_face_output = gr.outputs.Image(type='numpy', label='Detected Face')
|
43 |
+
examples = ['Male.jpg', 'Female.png']
|
44 |
|
45 |
|
46 |
+
iface = gr.Interface(fn=process_image, inputs=image, outputs=[gender_output, age_output, detected_face_output], examples=examples)
|
47 |
iface.launch()
|