Spaces:
Runtime error
Runtime error
dreamdrop-art
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -123,6 +123,10 @@ class Prodia:
|
|
123 |
response = self._get(f"{self.base}/sdxl/samplers")
|
124 |
return response.json()
|
125 |
|
|
|
|
|
|
|
|
|
126 |
def _post(self, url, params):
|
127 |
headers = {
|
128 |
**self.headers,
|
@@ -335,8 +339,25 @@ def xl_img2img(input_image, denoising, prompt, negative_prompt, model, steps, sa
|
|
335 |
|
336 |
|
337 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
338 |
|
339 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
340 |
|
341 |
css = """
|
342 |
#generate {
|
@@ -624,6 +645,15 @@ with gr.Blocks(css=css) as demo:
|
|
624 |
|
625 |
hf_text_button.click(hf_inference, inputs=[hf_prompt, hf_negative_prompt, hf_model, hf_steps, sampler, hf_cfg_scale, hf_width, hf_height,
|
626 |
hf_seed], outputs=hf_image_output, concurrency_limit=64)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
627 |
with gr.Tab("Prompt Generator"):
|
628 |
gpt2_pipe = pipeline('text-generation', model='Gustavosta/MagicPrompt-Stable-Diffusion', tokenizer='gpt2')
|
629 |
with open("ideas.txt", "r") as f:
|
|
|
123 |
response = self._get(f"{self.base}/sdxl/samplers")
|
124 |
return response.json()
|
125 |
|
126 |
+
def face_swap(self):
|
127 |
+
response = self._get(f"{self.base}/faceswap")
|
128 |
+
return response.json()
|
129 |
+
|
130 |
def _post(self, url, params):
|
131 |
headers = {
|
132 |
**self.headers,
|
|
|
339 |
|
340 |
|
341 |
|
342 |
+
def face_swaper(source, target, progress=gr.Progress()):
|
343 |
+
progress(0, desc="Starting")
|
344 |
+
time.sleep(1.5)
|
345 |
+
progress(0.10, desc="Uploading source image")
|
346 |
+
time.sleep(1)
|
347 |
+
progress(0.15, desc="Uploading target image")
|
348 |
+
time.sleep(1)
|
349 |
+
progress(0.25, desc="Swapping")
|
350 |
|
351 |
+
result = prodia_client.face_swap({
|
352 |
+
"sourceUrl": source,
|
353 |
+
"targetUrl": target,
|
354 |
+
})
|
355 |
+
|
356 |
+
progress(0.75, desc="Opening image")
|
357 |
+
job = prodia_client.wait(result)
|
358 |
+
progress(0.99, desc="Sending image")
|
359 |
+
time.sleep(0.5)
|
360 |
+
return [job["imageUrl"]], job["imageUrl"]
|
361 |
|
362 |
css = """
|
363 |
#generate {
|
|
|
645 |
|
646 |
hf_text_button.click(hf_inference, inputs=[hf_prompt, hf_negative_prompt, hf_model, hf_steps, sampler, hf_cfg_scale, hf_width, hf_height,
|
647 |
hf_seed], outputs=hf_image_output, concurrency_limit=64)
|
648 |
+
with gr.Tab("Face Swap"):
|
649 |
+
with gr.Row():
|
650 |
+
source_image = gr.File(label="Source image", type="filepath", file_types="image", file_count="single", interactive=True)
|
651 |
+
target_image = gr.File(label="Target image", type="filepath", file_types="image", file_count="single", interactive=True)
|
652 |
+
swap_button = gr.Button(value="Swap")
|
653 |
+
with gr.Row():
|
654 |
+
swaped_result = gr.Gallery(show_label=False, rows2, allow_preview=True, preview=True)
|
655 |
+
swaped_past = gr.Textbox(visiable=False, interactive=False)
|
656 |
+
swap_button.click(face_swaper, inputs=[source_image, target_image], outputs=[swaped_result, swaped_past])
|
657 |
with gr.Tab("Prompt Generator"):
|
658 |
gpt2_pipe = pipeline('text-generation', model='Gustavosta/MagicPrompt-Stable-Diffusion', tokenizer='gpt2')
|
659 |
with open("ideas.txt", "r") as f:
|