Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -234,22 +234,24 @@ def gen_fn(method, seed, psi):
|
|
234 |
z = RandomState(int(seed) + 2 ** 31).randn(1, 512) if method == 1 else np.random.randn(1, 512)
|
235 |
w = model.get_w(z.astype(dtype=np.float32), psi)
|
236 |
img_out = model.get_img(w)
|
237 |
-
return img_out,
|
238 |
|
239 |
|
240 |
def encode_img_fn(img):
|
|
|
|
|
241 |
imgs = model.detect(img, 0.2, 0.03)
|
242 |
if len(imgs) == 0:
|
243 |
return "failed to detect waifu", None, None, None, None
|
244 |
w = model.encode_img(imgs[0])
|
245 |
img_out = model.get_img(w)
|
246 |
-
return "success", imgs[0], img_out,
|
247 |
|
248 |
|
249 |
def gen_video_fn(w1, w2, frame):
|
250 |
-
if w1 is None or w2 is None
|
251 |
return None
|
252 |
-
model.gen_video(
|
253 |
int(frame))
|
254 |
return "video.mp4"
|
255 |
|
@@ -260,7 +262,7 @@ if __name__ == '__main__':
|
|
260 |
app = gr.Blocks()
|
261 |
with app:
|
262 |
gr.Markdown("# full-body anime\n\n"
|
263 |
-
"the model is not
|
264 |
with gr.Tabs():
|
265 |
with gr.TabItem("generate image"):
|
266 |
with gr.Row():
|
@@ -270,11 +272,11 @@ if __name__ == '__main__':
|
|
270 |
gen_input2 = gr.Number(value=1, label="seed")
|
271 |
gen_input3 = gr.Slider(minimum=0, maximum=1, step=0.01, value=0.6, label="truncation psi")
|
272 |
with gr.Group():
|
273 |
-
gen_submit = gr.Button("
|
274 |
with gr.Column():
|
275 |
gen_output1 = gr.Image()
|
276 |
-
select_img_input_w1 = gr.
|
277 |
-
select_img_input_img1 = gr.
|
278 |
|
279 |
with gr.TabItem("encode image"):
|
280 |
with gr.Row():
|
@@ -288,8 +290,8 @@ if __name__ == '__main__':
|
|
288 |
with gr.Row():
|
289 |
encode_img_output2 = gr.Image(label="detected")
|
290 |
encode_img_output3 = gr.Image(label="encoded")
|
291 |
-
select_img_input_w2 = gr.
|
292 |
-
select_img_input_img2 = gr.
|
293 |
|
294 |
with gr.TabItem("generate video"):
|
295 |
with gr.Row():
|
@@ -304,7 +306,7 @@ if __name__ == '__main__':
|
|
304 |
with gr.Group():
|
305 |
select_img1_button = gr.Button("select")
|
306 |
select_img1_output_img = gr.Image(label="image 1")
|
307 |
-
select_img1_output_w = gr.
|
308 |
with gr.Column():
|
309 |
gr.Markdown("please select image 2")
|
310 |
select_img2_dropdown = gr.Radio(label="source",
|
@@ -313,10 +315,10 @@ if __name__ == '__main__':
|
|
313 |
with gr.Group():
|
314 |
select_img2_button = gr.Button("select")
|
315 |
select_img2_output_img = gr.Image(label="image 2")
|
316 |
-
select_img2_output_w = gr.
|
317 |
-
generate_video_frame = gr.Slider(minimum=10, maximum=30, step=1, label="frame", value=
|
318 |
with gr.Group():
|
319 |
-
generate_video_button = gr.Button("
|
320 |
with gr.Column():
|
321 |
generate_video_output = gr.Video()
|
322 |
|
|
|
234 |
z = RandomState(int(seed) + 2 ** 31).randn(1, 512) if method == 1 else np.random.randn(1, 512)
|
235 |
w = model.get_w(z.astype(dtype=np.float32), psi)
|
236 |
img_out = model.get_img(w)
|
237 |
+
return img_out, w, img_out
|
238 |
|
239 |
|
240 |
def encode_img_fn(img):
|
241 |
+
if img is None:
|
242 |
+
return "please upload a image", None, None, None, None
|
243 |
imgs = model.detect(img, 0.2, 0.03)
|
244 |
if len(imgs) == 0:
|
245 |
return "failed to detect waifu", None, None, None, None
|
246 |
w = model.encode_img(imgs[0])
|
247 |
img_out = model.get_img(w)
|
248 |
+
return "success", imgs[0], img_out, w, img_out
|
249 |
|
250 |
|
251 |
def gen_video_fn(w1, w2, frame):
|
252 |
+
if w1 is None or w2 is None:
|
253 |
return None
|
254 |
+
model.gen_video(w1, w2, "video.mp4",
|
255 |
int(frame))
|
256 |
return "video.mp4"
|
257 |
|
|
|
262 |
app = gr.Blocks()
|
263 |
with app:
|
264 |
gr.Markdown("# full-body anime\n\n"
|
265 |
+
"the model is not well, just use for fun.")
|
266 |
with gr.Tabs():
|
267 |
with gr.TabItem("generate image"):
|
268 |
with gr.Row():
|
|
|
272 |
gen_input2 = gr.Number(value=1, label="seed")
|
273 |
gen_input3 = gr.Slider(minimum=0, maximum=1, step=0.01, value=0.6, label="truncation psi")
|
274 |
with gr.Group():
|
275 |
+
gen_submit = gr.Button("Generate")
|
276 |
with gr.Column():
|
277 |
gen_output1 = gr.Image()
|
278 |
+
select_img_input_w1 = gr.Variable()
|
279 |
+
select_img_input_img1 = gr.Variable()
|
280 |
|
281 |
with gr.TabItem("encode image"):
|
282 |
with gr.Row():
|
|
|
290 |
with gr.Row():
|
291 |
encode_img_output2 = gr.Image(label="detected")
|
292 |
encode_img_output3 = gr.Image(label="encoded")
|
293 |
+
select_img_input_w2 = gr.Variable()
|
294 |
+
select_img_input_img2 = gr.Variable()
|
295 |
|
296 |
with gr.TabItem("generate video"):
|
297 |
with gr.Row():
|
|
|
306 |
with gr.Group():
|
307 |
select_img1_button = gr.Button("select")
|
308 |
select_img1_output_img = gr.Image(label="image 1")
|
309 |
+
select_img1_output_w = gr.Variable()
|
310 |
with gr.Column():
|
311 |
gr.Markdown("please select image 2")
|
312 |
select_img2_dropdown = gr.Radio(label="source",
|
|
|
315 |
with gr.Group():
|
316 |
select_img2_button = gr.Button("select")
|
317 |
select_img2_output_img = gr.Image(label="image 2")
|
318 |
+
select_img2_output_w = gr.Variable()
|
319 |
+
generate_video_frame = gr.Slider(minimum=10, maximum=30, step=1, label="frame", value=15)
|
320 |
with gr.Group():
|
321 |
+
generate_video_button = gr.Button("Generate")
|
322 |
with gr.Column():
|
323 |
generate_video_output = gr.Video()
|
324 |
|