File size: 3,789 Bytes
c92fc1d ad2bc89 c92fc1d d16154e c92fc1d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 |
import gradio as gr
import os
import sys
from pathlib import Path
models = [
{"name": "Stable Diffusion 2", "url": "stabilityai/stable-diffusion-2-1"},
{"name": "stability AI", "url": "stabilityai/stable-diffusion-2-1-base"},
{"name": "XL-Refiner-1.0", "url": "stabilityai/stable-diffusion-xl-refiner-1.0"},
{"name": "Future Diffusion", "url": "nitrosocke/Future-Diffusion"},
{"name": "JWST Deep Space Diffusion", "url": "dallinmackay/JWST-Deep-Space-diffusion"},
{"name": "Robo Diffusion 3 Base", "url": "nousr/robo-diffusion-2-base"},
{"name": "Robo Diffusion", "url": "nousr/robo-diffusion"},
{"name": "Tron Legacy Diffusion", "url": "dallinmackay/Tron-Legacy-diffusion"},
]
current_model = models[0]
text_gen = gr.Interface.load("spaces/daspartho/prompt-extend")
models2 = []
for model in models:
model_url = f"models/{model['url']}"
loaded_model = gr.Interface.load(model_url, live=True, preprocess=True)
models2.append(loaded_model)
def text_it(inputs, text_gen=text_gen):
return text_gen(inputs)
def set_model(current_model_index):
global current_model
current_model = models[current_model_index]
return gr.update(value=f"{current_model['name']}")
def send_it(inputs, model_choice):
proc = models2[model_choice]
return proc(inputs)
with gr.Blocks() as myface:
gr.HTML(
)
display: flex;
gap: var(--layout-gap);
width: var(--size-full);
with gr.Row():
with gr.Row():
input_text = gr.Textbox(label="Prompt idea", placeholder="", lines=1)
# Model selection dropdown
model_name1 = gr.Dropdown(
label="Choose Model",
choices=[m["name"] for m in models],
type="index",
value=current_model["name"],
interactive=True,
)
with gr.Row():
see_prompts = gr.Button("Generate Prompts")
run = gr.Button("Generate Images", variant="primary")
with gr.Row():
output1 = gr.Image(label="")
output2 = gr.Image(label="")
output3 = gr.Image(label="")
with gr.Row():
magic1 = gr.Textbox(label="Generated Prompt", lines=2)
magic2 = gr.Textbox(label="Generated Prompt", lines=2)
magic3 = gr.Textbox(label="Generated Prompt", lines=2)
with gr.Row():
output4 = gr.Image(label="")
output5 = gr.Image(label="")
output6 = gr.Image(label="")
with gr.Row():
magic4 = gr.Textbox(label="Generated Prompt", lines=2)
magic5 = gr.Textbox(label="Generated Prompt", lines=2)
magic6 = gr.Textbox(label="Generated Prompt", lines=2)
model_name1.change(set_model, inputs=model_name1, outputs=[output1, output2, output3, output4, output5, output6])
run.click(send_it, inputs=[magic1, model_name1], outputs=[output1])
run.click(send_it, inputs=[magic2, model_name1], outputs=[output2])
run.click(send_it, inputs=[magic3, model_name1], outputs=[output3])
run.click(send_it, inputs=[magic4, model_name1], outputs=[output4])
run.click(send_it, inputs=[magic5, model_name1], outputs=[output5])
run.click(send_it, inputs=[magic6, model_name1], outputs=[output6])
see_prompts.click(text_it, inputs=[input_text], outputs=[magic1])
see_prompts.click(text_it, inputs=[input_text], outputs=[magic2])
see_prompts.click(text_it, inputs=[input_text], outputs=[magic3])
see_prompts.click(text_it, inputs=[input_text], outputs=[magic4])
see_prompts.click(text_it, inputs=[input_text], outputs=[magic5])
see_prompts.click(text_it, inputs=[input_text], outputs=[magic6])
myface.queue(concurrency_count=200)
myface.launch(inline=True, show_api=True, max_threads=400)
demo.launch() |