tombio commited on
Commit
cd7d65e
1 Parent(s): 480ed39

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -126
app.py CHANGED
@@ -116,130 +116,16 @@ def run(*args):
116
 
117
 
118
  import gradio as gr
119
- from functools import partial
120
- from itertools import chain
121
 
122
- def change_visible(txt1, im1, val):
123
- outputs = {}
124
- if val == "Image":
125
- outputs[im1] = gr.update(visible=True)
126
- outputs[txt1] = gr.update(visible=False)
127
- elif val == "Text/URL":
128
- outputs[im1] = gr.update(visible=False)
129
- outputs[txt1] = gr.update(visible=True)
130
- elif val == "Nothing":
131
- outputs[im1] = gr.update(visible=False)
132
- outputs[txt1] = gr.update(visible=False)
133
- return outputs
134
-
135
-
136
- with gr.Blocks(title="Image Mixer", css=".gr-box {border-color: #8136e2}") as demo:
137
-
138
- gr.Markdown("")
139
- gr.Markdown(
140
- """
141
- # Image Mixer
142
-
143
- _Created by [Justin Pinkney](https://www.justinpinkney.com) at [Lambda Labs](https://lambdalabs.com/)_
144
-
145
- To skip the queue you can try it on <a href="https://cloud.lambdalabs.com/demos/lambda/image-mixer-demo" style="display:inline-block;position: relative;"><img style="margin-top: 0;margin-bottom: 0;margin-left: .25em;" src="https://img.shields.io/badge/-Lambda%20Cloud-blueviolet"></a>
146
-
147
- ### __Provide one or more images to be mixed together by a fine-tuned Stable Diffusion model (see tips and advice below👇).__
148
-
149
- ![banner-large.jpeg](https://s3.amazonaws.com/moonup/production/uploads/1674039767068-62bd5f951e22ec84279820e8.jpeg)
150
-
151
- """)
152
-
153
- btns = []
154
- txts = []
155
- ims = []
156
- strengths = []
157
-
158
- with gr.Row():
159
- for i in range(n_inputs):
160
- with gr.Box():
161
- with gr.Column():
162
- btn1 = gr.Radio(
163
- choices=["Image", "Text/URL", "Nothing"],
164
- label=f"Input {i} type",
165
- interactive=True,
166
- value="Nothing",
167
- )
168
- txt1 = gr.Textbox(label="Text or Image URL", visible=False, interactive=True)
169
- im1 = gr.Image(label="Image", interactive=True, visible=False, type="pil")
170
- strength = gr.Slider(label="Strength", minimum=0, maximum=5, step=0.05, value=1, interactive=True)
171
-
172
- fn = partial(change_visible, txt1, im1)
173
- btn1.change(fn=fn, inputs=[btn1], outputs=[txt1, im1], queue=False)
174
-
175
- btns.append(btn1)
176
- txts.append(txt1)
177
- ims.append(im1)
178
- strengths.append(strength)
179
- with gr.Row():
180
- cfg_scale = gr.Slider(label="CFG scale", value=3, minimum=1, maximum=10, step=0.5)
181
- n_samples = gr.Slider(label="Num samples", value=1, minimum=1, maximum=1, step=1)
182
- seed = gr.Slider(label="Seed", value=0, minimum=0, maximum=10000, step=1)
183
- steps = gr.Slider(label="Steps", value=30, minimum=10, maximum=100, step=5)
184
-
185
- with gr.Row():
186
- submit = gr.Button("Generate")
187
- output = gr.Gallery().style(grid=[1,2], height="640px")
188
-
189
- inps = list(chain(btns, txts, ims, strengths))
190
- inps.extend([cfg_scale,n_samples,seed, steps,])
191
- submit.click(fn=run, inputs=inps, outputs=[output])
192
-
193
- ex = gr.Examples([
194
- [
195
- "Image", "Image", "Text/URL", "Nothing", "Nothing",
196
- "","","central symmetric figure detailed artwork","","",
197
- "gainsborough.jpeg","blonder.jpeg","blonder.jpeg","blonder.jpeg","blonder.jpeg",
198
- 1,1.35,1.4,1,1,
199
- 3.0, 1, 0, 30,
200
- ],
201
- [
202
- "Image", "Image", "Text/URL", "Image", "Nothing",
203
- "","","flowers","","",
204
- "ex2-1.jpeg","ex2-2.jpeg","blonder.jpeg","ex2-3.jpeg","blonder.jpeg",
205
- 1,1,1.5,1.25,1,
206
- 3.0, 1, 0, 30,
207
- ],
208
- [
209
- "Image", "Image", "Image", "Nothing", "Nothing",
210
- "","","","","",
211
- "ex1-1.jpeg","ex1-2.jpeg","ex1-3.jpeg","blonder.jpeg","blonder.jpeg",
212
- 1.1,1,1.4,1,1,
213
- 3.0, 1, 0, 30,
214
- ],
215
- ],
216
- fn=run, inputs=inps, outputs=[output], cache_examples=True)
217
-
218
- gr.Markdown(
219
- """
220
-
221
- ## Tips
222
-
223
- - You can provide between 1 and 5 inputs, these can either be an uploaded image a text prompt or a url to an image file.
224
- - The order of the inputs shouldn't matter, any images will be centre cropped before use.
225
- - Each input has an individual strength parameter which controls how big an influence it has on the output.
226
- - The model was not trained using text and can not interpret complex text prompts.
227
- - Using only text prompts doesn't work well, make sure there is at least one image or URL to an image.
228
- - The parameters on the bottom row such as cfg scale do the same as for a normal Stable Diffusion model.
229
- - Balancing the different inputs requires tweaking of the strengths, I suggest getting the right balance for a small number of samples and with few steps until you're
230
- happy with the result then increase the steps for better quality.
231
- - Outputs are 640x640 by default.
232
- - If you want to run locally see the instruction on the [Model Card](https://huggingface.co/lambdalabs/image-mixer).
233
-
234
- ## How does this work?
235
-
236
- This model is based on the [Stable Diffusion Image Variations model](https://huggingface.co/lambdalabs/sd-image-variations-diffusers)
237
- but it has been fined tuned to take multiple CLIP image embeddings. During training, up to 5 random crops were taken from the training images and
238
- the CLIP image embeddings were computed, these were then concatenated and used as the conditioning for the model. At inference time we can combine the image
239
- embeddings from multiple images to mix their concepts (and we can also use the text encoder to add text concepts too).
240
-
241
- The model was trained on a subset of LAION Improved Aesthetics at a resolution of 640x640 and was trained using 8xA100 GPUs on [Lambda GPU Cloud](https://lambdalabs.com/service/gpu-cloud).
242
-
243
- """)
244
-
245
- demo.launch()
 
116
 
117
 
118
  import gradio as gr
 
 
119
 
120
+ def my_inference_function(bouts, person):
121
+ return "Hello " + bouts + " turned into " + person + "!"
122
+
123
+ gradio_interface = gradio.Interface(
124
+ fn=my_inference_function,
125
+ inputs=["text","text"],
126
+ outputs="text",
127
+ title="Boutsify images",
128
+ description="Turn portraits into a painting in the style of Flemish master Dirck Bouts",
129
+ article= iO Digital"
130
+ )
131
+ gradio_interface.launch()