Spaces:
Running
Running
File size: 3,792 Bytes
a846cff 1130e52 a846cff f96c8ea 1c3bb07 f96c8ea 1c3bb07 a846cff f96c8ea 1c3bb07 a846cff 1c3bb07 a846cff 1c3bb07 a846cff bb51f97 f96c8ea 8d73fdc e46e062 8d73fdc 5be8d82 e46e062 5be8d82 8d73fdc 038d6ef 1c3bb07 196e12a 1c3bb07 30e4fda 1c3bb07 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 |
import gradio as gr
import os
import sys
from pathlib import Path
models = [
"Yntec/OpenLexica",
"Yntec/MapleSyrup",
]
current_model = models[0]
text_gen1=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
models2=[
gr.Interface.load(f"models/{models[0]}",live=True,preprocess=False),
gr.Interface.load(f"models/{models[1]}",live=True,preprocess=False),
]
def text_it1(inputs,text_gen1=text_gen1):
go_t1=text_gen1(inputs)
return(go_t1)
def set_model(current_model):
current_model = models[current_model]
return gr.update(label=(f"{current_model}"))
def send_it1(inputs, model_choice):
proc1=models2[model_choice]
output1=proc1(inputs)
return(output1)
css=""""""
with gr.Blocks(css=css) as myface:
gr.HTML("""
<div style="text-align: center; max-width: 1200px; margin: 0 auto;">
<div>
<style>
h1 {
font-size: 6em;
color: #ffffff;
margin-top: 30px;
margin-bottom: 30px;
text-shadow: 3px 3px 0 rgba(0, 0, 0, 1) !important;
}
h3 {
color: #ffaa66; !important;
}
h4 {
color: #ffffff; !important;
}
.gradio-container {
color: #ffaa66 !important;
background-color: #660099 !important;
font-family: 'IBM Plex Sans', sans-serif !important;
}
</style>
<body>
<div class="center"><h1>ToyWorld XL 401</h1>
</div>
</body>
</div>
<p style="margin-bottom: 10px; color: #ffaa66;">
<h3>Top SDXLModels and 401 SD1.5 models for your enjoyment!</h3></p>
<p style="margin-bottom: 10px; font-size: 98%">
<br><h4>The first time you load a model it takes 200 seconds</h4>
<br><h4>But after it loads each image takes 20 seconds to generate!</h4></p>
</div>
""")
with gr.Row():
with gr.Column(scale=100):
#Model selection dropdown
model_name1 = gr.Dropdown(label="Select Model", choices=[m for m in models], type="index", value=current_model, interactive=True)
with gr.Row():
with gr.Column(scale=100):
magic1=gr.Textbox(label="Your Prompt", lines=4)
gr.HTML("""<style> .gr-button {
color: white !important;
border-color: #ffffff !important;
background: #000000 !important;
}</style>""")
run=gr.Button("Generate Image")
with gr.Row():
with gr.Column(style="width=800px"):
output1=gr.Image(label=(f"{current_model}"))
with gr.Row():
with gr.Column(scale=50):
input_text=gr.Textbox(label="Use this box to extend an idea automagically, by typing some words and clicking Extend Idea",lines=2)
use_short=gr.Button("Use Short Prompt")
see_prompts=gr.Button("Extend Idea")
def short_prompt(inputs):
return(inputs)
model_name1.change(set_model,inputs=model_name1,outputs=[output1])
run.click(send_it1, inputs=[magic1, model_name1], outputs=[output1])
use_short.click(short_prompt,inputs=[input_text],outputs=magic1)
see_prompts.click(text_it1,inputs=[input_text],outputs=magic1)
myface.queue(concurrency_count=200)
myface.launch(inline=True, show_api=False, max_threads=400) |