File size: 3,691 Bytes
883428d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
import gradio as gr
import os
import sys
from pathlib import Path

models = [
    {"name": "Stable Diffusion 1.4","url": "CompVis/stable-diffusion-v1-4"},
    {"name": "Stable Diffusion 1.5","url": "runwayml/stable-diffusion-v1-5"},
    ]
models = [
    "",
    "runwayml/stable-diffusion-v1-5",
    "CompVis/stable-diffusion-v1-4",
    "claudfuen/photorealistic-fuen-v1",
    "andite/anything-v4.0",
    "naclbit/trinart_stable_diffusion_v2",
    "nitrosocke/Arcane-Diffusion",
    "nitrosocke/archer-diffusion",
    "nitrosocke/elden-ring-diffusion",
    "nitrosocke/redshift-diffusion",
    "nitrosocke/spider-verse-diffusion", 
    "nitrosocke/mo-di-diffusion",
    "nitrosocke/classic-anim-diffusion",
    "dreamlike-art/dreamlike-photoreal-1.0",
    "dreamlike-art/dreamlike-photoreal-2.0",    
    "wavymulder/wavyfusion",
    "wavymulder/Analog-Diffusion",
    "prompthero/midjourney-v4-diffusion",
    "prompthero/openjourney",
    "dallinmackay/Van-Gogh-diffusion",
    "hakurei/waifu-diffusion",
    "DGSpitzer/Cyberpunk-Anime-Diffusion",
    "Fictiverse/Stable_Diffusion_BalloonArt_Model",
    "dallinmackay/Tron-Legacy-diffusion",
    "AstraliteHeart/pony-diffusion",
    "nousr/robo-diffusion",
    "Linaqruf/anything-v3",
    "Omnibus/maximum_diffusion_fast",
    "",
]
current_model = models[0]

text_gen = gr.Interface.load("spaces/daspartho/prompt-extend") 

models2 = []
for model in models:
    model_url = f"models/{model['url']}"
    loaded_model = gr.Interface.load(model_url, live=True, preprocess=True)
    models2.append(loaded_model)


def text_it(inputs, text_gen=text_gen):
    return text_gen(inputs)


def set_model(current_model_index):
    global current_model
    current_model = models[current_model_index]
    return gr.update(value=f"{current_model['name']}")


def send_it(inputs, model_choice):
    proc = models2[model_choice]
    return proc(inputs)


with gr.Blocks() as myface:
    gr.HTML("""
    <head> <style> with global {width: 500px; position; absolute; background-color: #000000; height: 100%; margin-left:2px; margin-right: 2px; font-weight:800; font-size: 24px; margin-right: 10px; padding: 10px;} </style> </head>"""
   
    )
    with gr.Row():
        input_text = gr.Textbox(label=" ",placeholder="PROMPT HERE ",lines=4)
        # Model selection dropdown
        model_name1 = gr.Dropdown(
        label=" ",
        choices=[m["name"] for m in models],
        type="index",
        value=current_model["name"],
        interactive=True,
    
        
        )
    with gr.Row():
        see_prompts = gr.Button("Generate Prompts")                                                      
        run = gr.Button("Generate Images", varant="primery")
        
    with gr.Row():
        output1 = gr.Image(label="")
        output2 = gr.Image(label="")
        output3 = gr.Image(label="")
    with gr.Row():          
        magic1 = gr.Textbox(label="Generated Prompt", lines=2)
        magic2 = gr.Textbox(label="Generated Prompt", lines=2)
        magic3 = gr.Textbox(label="Generated Prompt", lines=2)
   
    model_name1.change(set_model, inputs=model_name1, outputs=[output1, output2, output3,])

    run.click(send_it, inputs=[magic1, model_name1], outputs=[output1])
    run.click(send_it, inputs=[magic2, model_name1], outputs=[output2])
    run.click(send_it, inputs=[magic3, model_name1], outputs=[output3])
   

    see_prompts.click(text_it, inputs=[input_text], outputs=[magic1])
    see_prompts.click(text_it, inputs=[input_text], outputs=[magic2])
    see_prompts.click(text_it, inputs=[input_text], outputs=[magic3])
   

myface.queue(concurrency_count=200)
myface.launch(inline=True, show_api=False, max_threads=400)