Nickhilearla135095 Omnibus commited on
Commit
13adbb5
0 Parent(s):

Duplicate from Omnibus/maximum_diffusion

Browse files

Co-authored-by: Bot <Omnibus@users.noreply.huggingface.co>

Files changed (4) hide show
  1. .gitattributes +34 -0
  2. README.md +13 -0
  3. app.py +246 -0
  4. requirements.txt +1 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Maximum Diffusion
3
+ emoji: 🛕
4
+ colorFrom: red
5
+ colorTo: blue
6
+ sdk: gradio
7
+ sdk_version: 3.15.0
8
+ app_file: app.py
9
+ pinned: true
10
+ duplicated_from: Omnibus/maximum_diffusion
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import sys
4
+ from pathlib import Path
5
+ import time
6
+ import accelerate
7
+
8
+
9
+
10
+ models = [
11
+ "",
12
+ "runwayml/stable-diffusion-v1-5",
13
+ "CompVis/stable-diffusion-v1-4",
14
+ "claudfuen/photorealistic-fuen-v1",
15
+ "andite/anything-v4.0",
16
+ "naclbit/trinart_stable_diffusion_v2",
17
+ "nitrosocke/Arcane-Diffusion",
18
+ "nitrosocke/archer-diffusion",
19
+ "nitrosocke/elden-ring-diffusion",
20
+ "nitrosocke/redshift-diffusion",
21
+ "nitrosocke/spider-verse-diffusion",
22
+ "nitrosocke/mo-di-diffusion",
23
+ "nitrosocke/classic-anim-diffusion",
24
+ "dreamlike-art/dreamlike-photoreal-1.0",
25
+ "dreamlike-art/dreamlike-photoreal-2.0",
26
+ "wavymulder/wavyfusion",
27
+ "wavymulder/Analog-Diffusion",
28
+ "prompthero/midjourney-v4-diffusion",
29
+ "prompthero/openjourney",
30
+ "dallinmackay/Van-Gogh-diffusion",
31
+ "hakurei/waifu-diffusion",
32
+ "DGSpitzer/Cyberpunk-Anime-Diffusion",
33
+ "Fictiverse/Stable_Diffusion_BalloonArt_Model",
34
+ "dallinmackay/Tron-Legacy-diffusion",
35
+ "AstraliteHeart/pony-diffusion",
36
+ "nousr/robo-diffusion",
37
+ "Linaqruf/anything-v3",
38
+ "Omnibus/maximum_diffusion_fast",
39
+ "",
40
+
41
+ ]
42
+ model_1=models[1]
43
+ model_2=models[2]
44
+ model_3=models[3]
45
+ model_4=models[4]
46
+
47
+ model_5=models[5]
48
+ model_6=models[6]
49
+ model_7=models[9]
50
+ model_8=models[13]
51
+
52
+ model_9=models[14]
53
+ model_10=models[15]
54
+ model_11=models[16]
55
+ model_12=models[17]
56
+
57
+ text_gen=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link",live=True, preprocess=True)
58
+
59
+ proc1=gr.Interface.load(f"models/{model_1}",live=False,preprocess=True, postprocess=False)
60
+ proc2=gr.Interface.load(f"models/{model_2}",live=False,preprocess=True, postprocess=False)
61
+ proc3=gr.Interface.load(f"models/{model_3}",live=False,preprocess=True, postprocess=False)
62
+ proc4=gr.Interface.load(f"models/{model_4}",live=False,preprocess=True, postprocess=False)
63
+
64
+ proc5=gr.Interface.load(f"models/{model_5}",live=False,preprocess=True, postprocess=False)
65
+ proc6=gr.Interface.load(f"models/{model_6}",live=False,preprocess=True, postprocess=False)
66
+ proc7=gr.Interface.load(f"models/{model_7}",live=False,preprocess=True, postprocess=False)
67
+ proc8=gr.Interface.load(f"models/{model_8}",live=False,preprocess=True, postprocess=False)
68
+
69
+ proc9=gr.Interface.load(f"models/{model_9}",live=False,preprocess=True, postprocess=False)
70
+ proc10=gr.Interface.load(f"models/{model_10}",live=False,preprocess=True, postprocess=False)
71
+ proc11=gr.Interface.load(f"models/{model_11}",live=False,preprocess=True, postprocess=False)
72
+ proc12=gr.Interface.load(f"models/{model_12}",live=False,preprocess=True, postprocess=False)
73
+
74
+
75
+ '''
76
+ proc1=gr.Interface.load("models/nitrosocke/Arcane-Diffusion", live=False, preprocess=True, postprocess=False)
77
+ proc2=gr.Interface.load("models/naclbit/trinart_stable_diffusion_v2", live=False, preprocess=True, postprocess=False)
78
+ proc3=gr.Interface.load("models/nitrosocke/redshift-diffusion", live=False, preprocess=True, postprocess=False)
79
+ proc4=gr.Interface.load("models/runwayml/stable-diffusion-v1-5", live=False, preprocess=True, postprocess=False)
80
+
81
+ proc5=gr.Interface.load("models/claudfuen/photorealistic-fuen-v1", live=False, preprocess=True, postprocess=False)
82
+ proc6=gr.Interface.load("models/CompVis/stable-diffusion-v1-4", live=False, preprocess=True, postprocess=False)
83
+ proc7=gr.Interface.load("models/Linaqruf/anything-v3.0", live=False, preprocess=True, postprocess=False)
84
+ proc8=gr.Interface.load("models/andite/anything-v4.0", live=False, preprocess=True, postprocess=False)
85
+
86
+ proc9=gr.Interface.load("models/dreamlike-art/dreamlike-photoreal-1.0", live=False, preprocess=True, postprocess=False)
87
+ proc10=gr.Interface.load("models/prompthero/openjourney", live=False, preprocess=True, postprocess=False)
88
+ proc11=gr.Interface.load("models/prompthero/midjourney-v4-diffusion", live=False, preprocess=True, postprocess=False)
89
+ proc12=gr.Interface.load("models/wavymulder/Analog-Diffusion", live=False, preprocess=True, postprocess=False)
90
+ '''
91
+
92
+ def get_prompts(prompt_text):
93
+ return text_gen(prompt_text)
94
+ def send_it1(inputs,proc1=proc1):
95
+ output1=proc1(inputs)
96
+ return(output1)
97
+ def send_it2(inputs,proc2=proc2):
98
+ output2=proc2(inputs)
99
+ return(output2)
100
+ def send_it3(inputs,proc3=proc3):
101
+ output3=proc3(inputs)
102
+ return(output3)
103
+ def send_it4(inputs,proc4=proc4):
104
+ output4=proc4(inputs)
105
+ return(output4)
106
+
107
+ def send_it5(inputs,proc5=proc5):
108
+ output5=proc5(inputs)
109
+ return(output5)
110
+ def send_it6(inputs,proc6=proc6):
111
+ output6=proc6(inputs)
112
+ return(output6)
113
+ def send_it7(inputs,proc7=proc7):
114
+ output7=proc7(inputs)
115
+ return(output7)
116
+ def send_it8(inputs,proc8=proc8):
117
+ output8=proc8(inputs)
118
+ return(output8)
119
+
120
+ def send_it9(inputs,proc9=proc9):
121
+ output9=proc9(inputs)
122
+ return(output9)
123
+ def send_it10(inputs,proc10=proc10):
124
+ output10=proc10(inputs)
125
+ return(output10)
126
+ def send_it11(inputs,proc11=proc11):
127
+ output11=proc11(inputs)
128
+ return(output11)
129
+ def send_it12(inputs,proc12=proc12):
130
+ output12=proc12(inputs)
131
+ return(output12)
132
+
133
+ def main():
134
+ with gr.Blocks(batch=False) as myface:
135
+ with gr.Row():
136
+ with gr.Tab("Title"):
137
+ gr.HTML("""<title>Maximum Diffusion</title><div style="text-align: center; max-width: 1500px; margin: 0 auto;">
138
+ <div style="display: inline-flex;align-items: center;gap: 0.8rem;font-size: 1.75rem;margin-bottom: 10px;line-height: 1em;">
139
+ <h1 style="font-weight: 900;font-size:30; margin-top: 9px;">Maximum Diffusion</h1></div>
140
+ <br><br><p style="margin-bottom: 1px;font-size: 15;font-weight: 100;line-height: 1.5em;">
141
+ Text to Image Model Comparison Space - CPU</p>
142
+ <br><p style="margin-bottom: 0px;font-size: 15;font-weight: 100;line-height: 1.5em;">
143
+ This space becomes quickly encumbered when even 1 model of 12 fails to load, or error out</p>
144
+ <p style="margin-bottom: 0px;font-size: 15;font-weight: 100;line-height: 1.5em;">
145
+
146
+ Any tips on how to "time-out" Gradio.Interface models are welcome in the "Community" button above<br>
147
+ <br></div>""")
148
+ with gr.Tab("Description"):
149
+ gr.HTML("""<div style="text-align:center;">
150
+ <h4>Enter your Prompt into the "Short Prompt" box and click "Magic Prompt" to load a prettified version of your prompt<br>
151
+ When you are satisfied with the prompt that is in the "Text to Image" box, click "Launch" to load the Models.<br><br>
152
+ Images load faster with a simpler prompt.<br>
153
+ Most images should load within 2 minutes.<br>
154
+ Some models become stuck on certain prompts, and refreshing the page seems to fix it.<br><br>
155
+ Not responsible for content, use at your own risk.
156
+ </h4></div>""")
157
+ with gr.Tab("DIY"):
158
+ gr.HTML("""<div style="text-align:Left;">
159
+ <h4>Copy/Paste this code in your app.py file<br><br>
160
+ import gradio as gr<br>
161
+ max_d=gr.Interface.load("spaces/Omnibus/maximum_diffusion")<br>
162
+ max_d.launch()<br>
163
+ </h4></div>""")
164
+ with gr.Tab("Credits"):
165
+ with gr.Row():
166
+ gr.Column()
167
+ with gr.Column(style="text-align:left;"):
168
+ gr.HTML("""
169
+ <div style="vertical-align:center">
170
+ <br>
171
+ <p>I learned everything I know from:
172
+ <p><a href="https://huggingface.co/spaces/anzorq/finetuned_diffusion">Finetuned Diffusion</a></p>
173
+ <p><a href="https://huggingface.co/spaces/Gustavosta/MagicPrompt-Stable-Diffusion">Magic Prompt Stable Diffusion</a></p>
174
+ <p><a href="https://huggingface.co/spaces/huggingface-projects/magic-diffusion">Magic Diffusion</a></p>
175
+ <p>Models by <a href="https://huggingface.co/Gustavosta">@Gustavosta</a>, <a href="https://twitter.com/haruu1367">@haruu1367</a>, <a href="https://twitter.com/DGSpitzer">@Helixngc7293</a>, <a href="https://twitter.com/dal_mack">@dal_mack</a>, <a href="https://twitter.com/prompthero">@prompthero</a> and others.</p>
176
+ </div>
177
+ """)
178
+ gr.Column()
179
+ with gr.Tab("Tools"):
180
+ with gr.Tab("View"):
181
+ with gr.Row():
182
+ with gr.Column(style="width=50%, height=70%"):
183
+ gr.Pil(label="Crop")
184
+ with gr.Column(style="width=50%, height=70%"):
185
+ gr.Pil(label="Crop")
186
+ with gr.Tab("Draw"):
187
+ with gr.Row():
188
+ with gr.Column(style="width=50%, height=70%"):
189
+ gr.Pil(label="Crop")
190
+ with gr.Column(style="width=50%, height=70%"):
191
+ gr.Pil(label="Draw")
192
+ gr.ImagePaint(label="Draw")
193
+ with gr.Tab("Text"):
194
+ with gr.Row():
195
+ with gr.Column(scale=50):
196
+ gr.Textbox(label="", lines=8, interactive=True)
197
+ with gr.Column(scale=50):
198
+ gr.Textbox(label="", lines=8, interactive=True)
199
+ with gr.Tab("Color Picker"):
200
+ with gr.Row():
201
+ with gr.Column(scale=50):
202
+ gr.ColorPicker(label="Color", interactive=True)
203
+ with gr.Column(scale=50):
204
+ gr.ImagePaint(label="Draw", interactive=True)
205
+
206
+ with gr.Row():
207
+ input_text=gr.Textbox(label="Short Prompt")
208
+ see_prompts=gr.Button("Magic Prompt")
209
+ with gr.Row():
210
+ prompt=gr.Textbox(label="Text to Image")
211
+ run=gr.Button("Launch")
212
+ with gr.Row():
213
+ output1=gr.Image(label=(f"{model_1}"))
214
+ output2=gr.Image(label=(f"{model_2}"))
215
+ output3=gr.Image(label=(f"{model_3}"))
216
+ output4=gr.Image(label=(f"{model_4}"))
217
+ with gr.Row():
218
+ output5=gr.Image(label=(f"{model_5}"))
219
+ output6=gr.Image(label=(f"{model_6}"))
220
+ output7=gr.Image(label=(f"{model_7}"))
221
+ output8=gr.Image(label=(f"{model_8}"))
222
+ with gr.Row():
223
+ output9=gr.Image(label=(f"{model_9}"))
224
+ output10=gr.Image(label=(f"{model_10}"))
225
+ output11=gr.Image(label=(f"{model_11}"))
226
+ output12=gr.Image(label=(f"{model_12}"))
227
+
228
+ see_prompts.click(get_prompts, inputs=[input_text], outputs=[prompt])
229
+
230
+ run.click(send_it1, inputs=[prompt], outputs=[output1])
231
+ run.click(send_it2, inputs=[prompt], outputs=[output2])
232
+ run.click(send_it3, inputs=[prompt], outputs=[output3])
233
+ run.click(send_it4, inputs=[prompt], outputs=[output4])
234
+ run.click(send_it5, inputs=[prompt], outputs=[output5])
235
+ run.click(send_it6, inputs=[prompt], outputs=[output6])
236
+ run.click(send_it7, inputs=[prompt], outputs=[output7])
237
+ run.click(send_it8, inputs=[prompt], outputs=[output8])
238
+ run.click(send_it9, inputs=[prompt], outputs=[output9])
239
+ run.click(send_it10, inputs=[prompt], outputs=[output10])
240
+ run.click(send_it11, inputs=[prompt], outputs=[output11])
241
+ run.click(send_it12, inputs=[prompt], outputs=[output12])
242
+
243
+ myface.queue(concurrency_count=600,status_update_rate=1)
244
+ myface.launch(enable_queue=True,inline=True,max_threads=600)
245
+ if __name__ == "__main__":
246
+ main()
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ accelerate