Spaces:
Paused
Paused
add detection for colored persons
Browse files- app.py +27 -9
- requirements.txt +1 -0
app.py
CHANGED
@@ -8,6 +8,7 @@ from contextlib import nullcontext
|
|
8 |
import requests
|
9 |
import functools
|
10 |
import random
|
|
|
11 |
|
12 |
from ldm.models.diffusion.ddim import DDIMSampler
|
13 |
from ldm.models.diffusion.plms import PLMSSampler
|
@@ -28,10 +29,13 @@ model = load_model_from_config(config, ckpt, device=device, verbose=False)
|
|
28 |
model = model.to(device).half()
|
29 |
|
30 |
clip_model, preprocess = clip.load("ViT-L/14", device=device)
|
|
|
31 |
gender_learn = load_learner('gender_model.pkl')
|
32 |
gender_labels = gender_learn.dls.vocab
|
33 |
beard_learn = load_learner('facial_hair_model.pkl')
|
34 |
beard_labels = beard_learn.dls.vocab
|
|
|
|
|
35 |
|
36 |
n_inputs = 5
|
37 |
|
@@ -128,7 +132,11 @@ def is_female(img):
|
|
128 |
|
129 |
def has_beard(img):
|
130 |
pred,pred_idx,probs = beard_learn.predict(img)
|
131 |
-
return float(probs[
|
|
|
|
|
|
|
|
|
132 |
|
133 |
|
134 |
import gradio
|
@@ -136,15 +144,24 @@ import gradio
|
|
136 |
def boutsify(person):
|
137 |
portrait_path = "bouts_m1.jpg"
|
138 |
female_detected = is_female(person)
|
|
|
139 |
|
140 |
-
if
|
141 |
-
print("
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
148 |
|
149 |
person_image = Image.fromarray(person)
|
150 |
|
@@ -160,6 +177,7 @@ def boutsify(person):
|
|
160 |
3.0, 1, random.randrange(0, 10000), 50,
|
161 |
]
|
162 |
|
|
|
163 |
return run_image_mixer(inputs)
|
164 |
|
165 |
gradio_interface = gradio.Interface(
|
|
|
8 |
import requests
|
9 |
import functools
|
10 |
import random
|
11 |
+
import timm
|
12 |
|
13 |
from ldm.models.diffusion.ddim import DDIMSampler
|
14 |
from ldm.models.diffusion.plms import PLMSSampler
|
|
|
29 |
model = model.to(device).half()
|
30 |
|
31 |
clip_model, preprocess = clip.load("ViT-L/14", device=device)
|
32 |
+
|
33 |
gender_learn = load_learner('gender_model.pkl')
|
34 |
gender_labels = gender_learn.dls.vocab
|
35 |
beard_learn = load_learner('facial_hair_model.pkl')
|
36 |
beard_labels = beard_learn.dls.vocab
|
37 |
+
ethnic_learn = load_learner('ethnic_model.pkl')
|
38 |
+
ethnic_labels = ethnic_learn.dls.vocab
|
39 |
|
40 |
n_inputs = 5
|
41 |
|
|
|
132 |
|
133 |
def has_beard(img):
|
134 |
pred,pred_idx,probs = beard_learn.predict(img)
|
135 |
+
return float(probs[1]) > float(probs[0])
|
136 |
+
|
137 |
+
def ethnicity(img):
|
138 |
+
pred,pred_idx,probs = ethnic_learn.predict(img)
|
139 |
+
return pred
|
140 |
|
141 |
|
142 |
import gradio
|
|
|
144 |
def boutsify(person):
|
145 |
portrait_path = "bouts_m1.jpg"
|
146 |
female_detected = is_female(person)
|
147 |
+
ethnicity_prediction = ethnicity(person)
|
148 |
|
149 |
+
if ethnicity_prediction == "Black":
|
150 |
+
print("Colored person")
|
151 |
+
if female_detected:
|
152 |
+
print("This is a female portrait")
|
153 |
+
portrait_path = "bouts_fc1.jpg"
|
154 |
+
else:
|
155 |
+
portrait_path = "bouts_mc1.jpg"
|
156 |
+
else:
|
157 |
+
if female_detected:
|
158 |
+
print("This is a female portrait")
|
159 |
+
portrait_path = "bouts_f1.jpg"
|
160 |
+
else:
|
161 |
+
print("This is a male portrait, checking facial hair")
|
162 |
+
if has_beard(person):
|
163 |
+
print("The person has a beard")
|
164 |
+
portrait_path = "bouts_mb1.jpg"
|
165 |
|
166 |
person_image = Image.fromarray(person)
|
167 |
|
|
|
177 |
3.0, 1, random.randrange(0, 10000), 50,
|
178 |
]
|
179 |
|
180 |
+
#return person
|
181 |
return run_image_mixer(inputs)
|
182 |
|
183 |
gradio_interface = gradio.Interface(
|
requirements.txt
CHANGED
@@ -20,6 +20,7 @@ fire==0.4.0
|
|
20 |
diffusers==0.3.0
|
21 |
datasets[vision]==2.4.0
|
22 |
fastai==2.7.11
|
|
|
23 |
-e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
24 |
-e git+https://github.com/openai/CLIP.git@main#egg=clip
|
25 |
-e git+https://github.com/justinpinkney/nomi.git@e9ded23b7e2269cc64d39683e1bf3c0319f552ab#egg=nomi
|
|
|
20 |
diffusers==0.3.0
|
21 |
datasets[vision]==2.4.0
|
22 |
fastai==2.7.11
|
23 |
+
timm==0.6.13
|
24 |
-e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers
|
25 |
-e git+https://github.com/openai/CLIP.git@main#egg=clip
|
26 |
-e git+https://github.com/justinpinkney/nomi.git@e9ded23b7e2269cc64d39683e1bf3c0319f552ab#egg=nomi
|