mindtube Omnibus commited on
Commit
69aee09
β€’
0 Parent(s):

Duplicate from Omnibus/maximum_multiplier_places

Browse files

Co-authored-by: Bot <Omnibus@users.noreply.huggingface.co>

Files changed (4) hide show
  1. .gitattributes +34 -0
  2. README.md +13 -0
  3. app.py +283 -0
  4. index.html +16 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Maximum Multiplier
3
+ emoji: πŸ›•πŸ›•
4
+ colorFrom: green
5
+ colorTo: blue
6
+ sdk: gradio
7
+ sdk_version: 3.15.0
8
+ app_file: app.py
9
+ pinned: true
10
+ duplicated_from: Omnibus/maximum_multiplier_places
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import sys
4
+ from pathlib import Path
5
+
6
+ models = [
7
+ "johnslegers/epic-diffusion-v1.1",
8
+ "andite/anything-v4.0",
9
+ "runwayml/stable-diffusion-v1-5",
10
+ "claudfuen/photorealistic-fuen-v1",
11
+ "naclbit/trinart_stable_diffusion_v2",
12
+ "nitrosocke/Arcane-Diffusion",
13
+ "nitrosocke/archer-diffusion",
14
+ "nitrosocke/elden-ring-diffusion",
15
+ "nitrosocke/redshift-diffusion",
16
+ "nitrosocke/spider-verse-diffusion",
17
+ "nitrosocke/mo-di-diffusion",
18
+ "nitrosocke/classic-anim-diffusion",
19
+ "dreamlike-art/dreamlike-diffusion-1.0",
20
+ "dreamlike-art/dreamlike-photoreal-2.0",
21
+ "wavymulder/wavyfusion",
22
+ "wavymulder/Analog-Diffusion",
23
+ "prompthero/midjourney-v4-diffusion",
24
+ "prompthero/openjourney",
25
+ "dallinmackay/Van-Gogh-diffusion",
26
+ "hakurei/waifu-diffusion",
27
+ "DGSpitzer/Cyberpunk-Anime-Diffusion",
28
+ "Fictiverse/Stable_Diffusion_BalloonArt_Model",
29
+ "dallinmackay/Tron-Legacy-diffusion",
30
+ "AstraliteHeart/pony-diffusion",
31
+ "nousr/robo-diffusion",
32
+ "CompVis/stable-diffusion-v1-4",
33
+
34
+ ]
35
+ current_model = models[0]
36
+
37
+ text_gen1=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
38
+ text_gen2=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
39
+ text_gen3=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
40
+ text_gen4=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
41
+ text_gen5=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
42
+ text_gen6=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
43
+ text_gen7=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
44
+ text_gen8=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
45
+
46
+ models2=[
47
+ gr.Interface.load(f"models/{models[0]}",live=True,preprocess=True),
48
+ gr.Interface.load(f"models/{models[1]}",live=True,preprocess=True),
49
+ gr.Interface.load(f"models/{models[2]}",live=True,preprocess=True),
50
+ gr.Interface.load(f"models/{models[3]}",live=True,preprocess=True),
51
+ gr.Interface.load(f"models/{models[4]}",live=True,preprocess=True),
52
+ gr.Interface.load(f"models/{models[5]}",live=True,preprocess=True),
53
+ gr.Interface.load(f"models/{models[6]}",live=True,preprocess=True),
54
+ gr.Interface.load(f"models/{models[7]}",live=True,preprocess=True),
55
+ gr.Interface.load(f"models/{models[8]}",live=True,preprocess=True),
56
+ gr.Interface.load(f"models/{models[9]}",live=True,preprocess=True),
57
+ gr.Interface.load(f"models/{models[10]}",live=True,preprocess=True),
58
+ gr.Interface.load(f"models/{models[11]}",live=True,preprocess=True),
59
+ gr.Interface.load(f"models/{models[12]}",live=True,preprocess=True),
60
+ gr.Interface.load(f"models/{models[13]}",live=True,preprocess=True),
61
+ gr.Interface.load(f"models/{models[14]}",live=True,preprocess=True),
62
+ gr.Interface.load(f"models/{models[15]}",live=True,preprocess=True),
63
+ gr.Interface.load(f"models/{models[16]}",live=True,preprocess=True),
64
+ gr.Interface.load(f"models/{models[17]}",live=True,preprocess=True),
65
+ gr.Interface.load(f"models/{models[18]}",live=True,preprocess=True),
66
+ gr.Interface.load(f"models/{models[19]}",live=True,preprocess=True),
67
+ gr.Interface.load(f"models/{models[20]}",live=True,preprocess=True),
68
+ gr.Interface.load(f"models/{models[21]}",live=True,preprocess=True),
69
+ gr.Interface.load(f"models/{models[22]}",live=True,preprocess=True),
70
+ gr.Interface.load(f"models/{models[23]}",live=True,preprocess=True),
71
+ gr.Interface.load(f"models/{models[24]}",live=True,preprocess=True),
72
+ gr.Interface.load(f"models/{models[25]}",live=True,preprocess=True),
73
+
74
+ ]
75
+
76
+
77
+ def text_it1(inputs,text_gen1=text_gen1):
78
+ go_t1=text_gen1(inputs)
79
+ return(go_t1)
80
+ def text_it2(inputs,text_gen2=text_gen2):
81
+ go_t2=text_gen2(inputs)
82
+ return(go_t2)
83
+ def text_it3(inputs,text_gen3=text_gen3):
84
+ go_t3=text_gen3(inputs)
85
+ return(go_t3)
86
+ def text_it4(inputs,text_gen4=text_gen4):
87
+ go_t4=text_gen4(inputs)
88
+ return(go_t4)
89
+ def text_it5(inputs,text_gen5=text_gen5):
90
+ go_t5=text_gen5(inputs)
91
+ return(go_t5)
92
+ def text_it6(inputs,text_gen6=text_gen6):
93
+ go_t6=text_gen6(inputs)
94
+ return(go_t6)
95
+ def text_it7(inputs,text_gen7=text_gen7):
96
+ go_t7=text_gen7(inputs)
97
+ return(go_t7)
98
+ def text_it8(inputs,text_gen8=text_gen8):
99
+ go_t8=text_gen8(inputs)
100
+ return(go_t8)
101
+
102
+
103
+
104
+ def set_model(current_model):
105
+ current_model = models[current_model]
106
+ return gr.update(label=(f"{current_model}")),gr.update(label=(f"{current_model}")),gr.update(label=(f"{current_model}")),gr.update(label=(f"{current_model}")),gr.update(label=(f"{current_model}")),gr.update(label=(f"{current_model}")),gr.update(label=(f"{current_model}")),gr.update(label=(f"{current_model}")),
107
+
108
+
109
+ def send_it1(inputs, model_choice):
110
+ proc1=models2[model_choice]
111
+ output1=proc1(inputs)
112
+ return(output1)
113
+ def send_it2(inputs, model_choice):
114
+ proc2=models2[model_choice]
115
+ output2=proc2(inputs)
116
+ return(output2)
117
+ def send_it3(inputs, model_choice):
118
+ proc3=models2[model_choice]
119
+ output3=proc3(inputs)
120
+ return(output3)
121
+ def send_it4(inputs, model_choice):
122
+ proc4=models2[model_choice]
123
+ output4=proc4(inputs)
124
+ return(output4)
125
+ def send_it5(inputs, model_choice):
126
+ proc5=models2[model_choice]
127
+ output5=proc5(inputs)
128
+ return(output5)
129
+ def send_it6(inputs, model_choice):
130
+ proc6=models2[model_choice]
131
+ output6=proc6(inputs)
132
+ return(output6)
133
+ def send_it7(inputs, model_choice):
134
+ proc7=models2[model_choice]
135
+ output7=proc7(inputs)
136
+ return(output7)
137
+ def send_it8(inputs, model_choice):
138
+ proc8=models2[model_choice]
139
+ output8=proc8(inputs)
140
+ return(output8)
141
+ css=""""""
142
+
143
+
144
+ with gr.Blocks(css=css) as myface:
145
+ gr.HTML("""<!DOCTYPE html>
146
+ <html lang="en">
147
+ <head>
148
+ <meta charset="utf-8" />
149
+ <meta name="twitter:card" content="player"/>
150
+ <meta name="twitter:site" content=""/>
151
+ <meta name="twitter:player" content="https://omnibus-maximum-multiplier-places.hf.space"/>
152
+ <meta name="twitter:player:stream" content="https://omnibus-maximum-multiplier-places.hf.space"/>
153
+ <meta name="twitter:player:width" content="100%"/>
154
+ <meta name="twitter:player:height" content="600"/>
155
+ <meta property="og:title" content="Embedded Live Viewer"/>
156
+ <meta property="og:description" content="Tweet Genie - A Huggingface Space"/>
157
+ <meta property="og:image" content="https://cdn.glitch.global/80dbe92e-ce75-44af-84d5-74a2e21e9e55/omnicard.png?v=1676772531627"/>
158
+ <!--<meta http-equiv="refresh" content="0; url=https://huggingface.co/spaces/corbt/tweet-genie">-->
159
+
160
+ </head>
161
+
162
+ </html>
163
+ """)
164
+ with gr.Row():
165
+ with gr.Tab("Title"):
166
+ gr.HTML(""" <title>Maximum Multiplier</title><div style="text-align: center; max-width: 1500px; margin: 0 auto;">
167
+ <h1>Everything</h1>
168
+ <br><br><h4>It just does a lot of things at the same time</h4>
169
+
170
+ """)
171
+
172
+ with gr.Tab("Description"):
173
+ gr.HTML("""<div style="text-align:center;">
174
+ <h4>As many Text-to-Image Models as I can fit here</h4><br>
175
+ <h4>Suggest more up in the "Community" button</h4>
176
+
177
+ </div>""")
178
+
179
+ with gr.Tab("Tools"):
180
+ with gr.Tab("View"):
181
+ with gr.Row():
182
+ with gr.Column(style="width=50%, height=70%"):
183
+ gr.Pil(label="Crop")
184
+ with gr.Column(style="width=50%, height=70%"):
185
+ gr.Pil(label="Crop")
186
+
187
+
188
+ with gr.Tab("Draw"):
189
+ with gr.Column(style="width=50%, height=70%"):
190
+ gr.Pil(label="Crop")
191
+ with gr.Column(style="width=50%, height=70%"):
192
+ gr.Pil(label="Draw")
193
+
194
+
195
+ gr.ImagePaint(label="Draw")
196
+
197
+ with gr.Tab("Text"):
198
+ with gr.Row():
199
+
200
+ with gr.Column(scale=50):
201
+ gr.Textbox(label="", lines=8, interactive=True)
202
+
203
+
204
+ with gr.Column(scale=50):
205
+ gr.Textbox(label="", lines=8, interactive=True)
206
+
207
+ with gr.Tab("Color Picker"):
208
+ with gr.Row():
209
+
210
+ with gr.Column(scale=50):
211
+ gr.ColorPicker(label="Color", interactive=True)
212
+
213
+
214
+ with gr.Column(scale=50):
215
+ gr.ImagePaint(label="Draw", interactive=True)
216
+
217
+ with gr.Row():
218
+ with gr.Column():
219
+ input_text=gr.Textbox(label="Short Prompt",lines=2)
220
+ #Model selection dropdown
221
+ model_name1 = gr.Dropdown(show_label=False, choices=[m for m in models], type="index", value=current_model, interactive=True)
222
+ with gr.Column():
223
+ use_short=gr.Button("Use Short Prompt")
224
+ see_prompts=gr.Button("Generate Magic Prompts")
225
+ run=gr.Button("Launch")
226
+ with gr.Tab("Main"):
227
+ with gr.Row():
228
+ output1=gr.Image(label=(f"{current_model}"))
229
+ output2=gr.Image(label=(f"{current_model}"))
230
+ output3=gr.Image(label=(f"{current_model}"))
231
+ output4=gr.Image(label=(f"{current_model}"))
232
+ with gr.Row():
233
+ magic1=gr.Textbox(lines=4)
234
+ magic2=gr.Textbox(lines=4)
235
+ magic3=gr.Textbox(lines=4)
236
+ magic4=gr.Textbox(lines=4)
237
+ with gr.Row():
238
+ output5=gr.Image(label=(f"{current_model}"))
239
+ output6=gr.Image(label=(f"{current_model}"))
240
+ output7=gr.Image(label=(f"{current_model}"))
241
+ output8=gr.Image(label=(f"{current_model}"))
242
+ with gr.Row():
243
+ magic5=gr.Textbox(lines=4)
244
+ magic6=gr.Textbox(lines=4)
245
+ magic7=gr.Textbox(lines=4)
246
+ magic8=gr.Textbox(lines=4)
247
+
248
+ def short_prompt(inputs):
249
+ return(inputs)
250
+
251
+ model_name1.change(set_model,inputs=model_name1,outputs=[output1,output2,output3,output4,output5,output6,output7,output8])
252
+
253
+ run.click(send_it1, inputs=[magic1, model_name1], outputs=[output1])
254
+ run.click(send_it2, inputs=[magic2, model_name1], outputs=[output2])
255
+ run.click(send_it3, inputs=[magic3, model_name1], outputs=[output3])
256
+ run.click(send_it4, inputs=[magic4, model_name1], outputs=[output4])
257
+ run.click(send_it5, inputs=[magic5, model_name1], outputs=[output5])
258
+ run.click(send_it6, inputs=[magic6, model_name1], outputs=[output6])
259
+ run.click(send_it7, inputs=[magic7, model_name1], outputs=[output7])
260
+ run.click(send_it8, inputs=[magic8, model_name1], outputs=[output8])
261
+
262
+ use_short.click(short_prompt,inputs=[input_text],outputs=magic1)
263
+ use_short.click(short_prompt,inputs=[input_text],outputs=magic2)
264
+ use_short.click(short_prompt,inputs=[input_text],outputs=magic3)
265
+ use_short.click(short_prompt,inputs=[input_text],outputs=magic4)
266
+ use_short.click(short_prompt,inputs=[input_text],outputs=magic5)
267
+ use_short.click(short_prompt,inputs=[input_text],outputs=magic6)
268
+ use_short.click(short_prompt,inputs=[input_text],outputs=magic7)
269
+ use_short.click(short_prompt,inputs=[input_text],outputs=magic8)
270
+
271
+ see_prompts.click(text_it1,inputs=[input_text],outputs=magic1)
272
+ see_prompts.click(text_it2,inputs=[input_text],outputs=magic2)
273
+ see_prompts.click(text_it3,inputs=[input_text],outputs=magic3)
274
+ see_prompts.click(text_it4,inputs=[input_text],outputs=magic4)
275
+ see_prompts.click(text_it5,inputs=[input_text],outputs=magic5)
276
+ see_prompts.click(text_it6,inputs=[input_text],outputs=magic6)
277
+ see_prompts.click(text_it7,inputs=[input_text],outputs=magic7)
278
+ see_prompts.click(text_it8,inputs=[input_text],outputs=magic8)
279
+
280
+
281
+
282
+ myface.queue(concurrency_count=200)
283
+ myface.launch(inline=True, show_api=False, max_threads=400)
index.html ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="utf-8" />
5
+ <meta name="twitter:card" content="player"/>
6
+ <meta name="twitter:site" content=""/>
7
+ <meta name="twitter:player" content="https://omnibus-maximum-multiplier-places.hf.space"/>
8
+ <meta name="twitter:player:stream" content="https://omnibus-maximum-multiplier-places.hf.space"/>
9
+ <meta name="twitter:player:width" content="100%"/>
10
+ <meta name="twitter:player:height" content="600"/>
11
+ <meta property="og:title" content="Embedded Live Viewer"/>
12
+ <meta property="og:description" content="Tweet Genie - A Huggingface Space"/>
13
+ <meta property="og:image" content="https://cdn.glitch.global/80dbe92e-ce75-44af-84d5-74a2e21e9e55/omnicard.png?v=1676772531627"/>
14
+ <!--<meta http-equiv="refresh" content="0; url=https://huggingface.co/spaces/corbt/tweet-genie">-->
15
+ </head>
16
+ </html>