Spaces:
Running
on
A10G
Running
on
A10G
rynmurdock
commited on
Commit
•
a36ac2f
1
Parent(s):
b27e8d4
Update app.py
Browse files
app.py
CHANGED
@@ -52,6 +52,7 @@ pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taesdxl", torch_dtype=to
|
|
52 |
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
|
53 |
|
54 |
pipe.to(device=DEVICE)
|
|
|
55 |
|
56 |
@spaces.GPU
|
57 |
def compile_em():
|
@@ -111,11 +112,11 @@ compile_em()
|
|
111 |
@spaces.GPU
|
112 |
def generate(prompt, in_embs=None,):
|
113 |
if prompt != '':
|
114 |
-
print(prompt)
|
115 |
in_embs = in_embs / in_embs.abs().max() * .15 if in_embs != None else None
|
116 |
in_embs = .9 * in_embs.to('cuda') + .5 * autoencoder.embed(prompt).to('cuda') if in_embs != None else autoencoder.embed(prompt).to('cuda')
|
117 |
else:
|
118 |
-
print('From embeds.')
|
119 |
in_embs = in_embs / in_embs.abs().max() * .15
|
120 |
text = autoencoder.generate_from_latent(in_embs.to('cuda').to(dtype=torch.bfloat16), temperature=.3, top_p=.99, min_new_tokens=5)
|
121 |
in_embs = autoencoder.embed(text).to('cuda')
|
@@ -175,7 +176,7 @@ def get_coeff(embs_local, ys):
|
|
175 |
# this ends up adding a rating but losing an embedding, it seems.
|
176 |
# let's take off a rating if so to continue without indexing errors.
|
177 |
if len(ys) > len(embs_local):
|
178 |
-
print('ys are longer than embs; popping latest rating')
|
179 |
ys.pop(-1)
|
180 |
|
181 |
# also add the latest 0 and the latest 1
|
@@ -194,7 +195,7 @@ def get_coeff(embs_local, ys):
|
|
194 |
feature_embs = np.array(torch.cat([embs_local[i].to('cpu') for i in indices]).to('cpu'))
|
195 |
scaler = preprocessing.StandardScaler().fit(feature_embs)
|
196 |
feature_embs = scaler.transform(feature_embs)
|
197 |
-
print(len(feature_embs), len(ys))
|
198 |
|
199 |
lin_class = SVC(max_iter=50000, kernel='linear', class_weight='balanced', C=.1).fit(feature_embs, np.array([ys[i] for i in indices]))
|
200 |
coef_ = torch.tensor(lin_class.coef_, dtype=torch.double)
|
@@ -222,16 +223,16 @@ def next_image(embs, img_embs, ys, calibrate_prompts):
|
|
222 |
|
223 |
with torch.no_grad():
|
224 |
if len(calibrate_prompts) > 0:
|
225 |
-
print('######### Calibrating with sample prompts #########')
|
226 |
prompt = calibrate_prompts.pop(0)
|
227 |
-
print(prompt)
|
228 |
image, img_emb = predict(prompt)
|
229 |
im_emb = autoencoder.embed(prompt)
|
230 |
embs.append(im_emb)
|
231 |
img_embs.append(img_emb)
|
232 |
return image, embs, img_embs, ys, calibrate_prompts, prompt
|
233 |
else:
|
234 |
-
print('######### Roaming #########')
|
235 |
|
236 |
pos_indices = [i for i in range(len(embs)) if ys[i] == 1]
|
237 |
neg_indices = [i for i in range(len(embs)) if ys[i] == 0]
|
@@ -251,7 +252,7 @@ def next_image(embs, img_embs, ys, calibrate_prompts):
|
|
251 |
|
252 |
prompt= '' if not glob_idx % 3 == 0 else rng_prompt
|
253 |
prompt, _ = generate(prompt, in_embs=im_s)
|
254 |
-
print(prompt)
|
255 |
im_emb = autoencoder.embed(prompt)
|
256 |
embs.append(im_emb)
|
257 |
|
@@ -298,7 +299,7 @@ def choose(img, choice, embs, img_embs, ys, calibrate_prompts):
|
|
298 |
choice = 0
|
299 |
|
300 |
if img is None:
|
301 |
-
print('NSFW -- choice is disliked')
|
302 |
choice = 0
|
303 |
|
304 |
ys.append(choice)
|
|
|
52 |
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
|
53 |
|
54 |
pipe.to(device=DEVICE)
|
55 |
+
pipe.set_progress_bar_config(disable=True)
|
56 |
|
57 |
@spaces.GPU
|
58 |
def compile_em():
|
|
|
112 |
@spaces.GPU
|
113 |
def generate(prompt, in_embs=None,):
|
114 |
if prompt != '':
|
115 |
+
# # print(prompt)
|
116 |
in_embs = in_embs / in_embs.abs().max() * .15 if in_embs != None else None
|
117 |
in_embs = .9 * in_embs.to('cuda') + .5 * autoencoder.embed(prompt).to('cuda') if in_embs != None else autoencoder.embed(prompt).to('cuda')
|
118 |
else:
|
119 |
+
# print('From embeds.')
|
120 |
in_embs = in_embs / in_embs.abs().max() * .15
|
121 |
text = autoencoder.generate_from_latent(in_embs.to('cuda').to(dtype=torch.bfloat16), temperature=.3, top_p=.99, min_new_tokens=5)
|
122 |
in_embs = autoencoder.embed(text).to('cuda')
|
|
|
176 |
# this ends up adding a rating but losing an embedding, it seems.
|
177 |
# let's take off a rating if so to continue without indexing errors.
|
178 |
if len(ys) > len(embs_local):
|
179 |
+
# print('ys are longer than embs; popping latest rating')
|
180 |
ys.pop(-1)
|
181 |
|
182 |
# also add the latest 0 and the latest 1
|
|
|
195 |
feature_embs = np.array(torch.cat([embs_local[i].to('cpu') for i in indices]).to('cpu'))
|
196 |
scaler = preprocessing.StandardScaler().fit(feature_embs)
|
197 |
feature_embs = scaler.transform(feature_embs)
|
198 |
+
# print(len(feature_embs), len(ys))
|
199 |
|
200 |
lin_class = SVC(max_iter=50000, kernel='linear', class_weight='balanced', C=.1).fit(feature_embs, np.array([ys[i] for i in indices]))
|
201 |
coef_ = torch.tensor(lin_class.coef_, dtype=torch.double)
|
|
|
223 |
|
224 |
with torch.no_grad():
|
225 |
if len(calibrate_prompts) > 0:
|
226 |
+
# print('######### Calibrating with sample prompts #########')
|
227 |
prompt = calibrate_prompts.pop(0)
|
228 |
+
# print(prompt)
|
229 |
image, img_emb = predict(prompt)
|
230 |
im_emb = autoencoder.embed(prompt)
|
231 |
embs.append(im_emb)
|
232 |
img_embs.append(img_emb)
|
233 |
return image, embs, img_embs, ys, calibrate_prompts, prompt
|
234 |
else:
|
235 |
+
# print('######### Roaming #########')
|
236 |
|
237 |
pos_indices = [i for i in range(len(embs)) if ys[i] == 1]
|
238 |
neg_indices = [i for i in range(len(embs)) if ys[i] == 0]
|
|
|
252 |
|
253 |
prompt= '' if not glob_idx % 3 == 0 else rng_prompt
|
254 |
prompt, _ = generate(prompt, in_embs=im_s)
|
255 |
+
# print(prompt)
|
256 |
im_emb = autoencoder.embed(prompt)
|
257 |
embs.append(im_emb)
|
258 |
|
|
|
299 |
choice = 0
|
300 |
|
301 |
if img is None:
|
302 |
+
# print('NSFW -- choice is disliked')
|
303 |
choice = 0
|
304 |
|
305 |
ys.append(choice)
|