tiki-64 / app.py
verkaDerkaDerk's picture
Update app.py
df42ef1
import gradio as gr
import torch
from diffusers import DDPMPipeline, DDIMPipeline, PNDMPipeline
MODEL_ID = "verkaDerkaDerk/tiki-based-128"
MODEL_ID = "verkaDerkaDerk/tiki-64"
PIPELINE = DDPMPipeline.from_pretrained(MODEL_ID)
#############################################################################
def tiki(batch_size=1,seed=0):
generator = torch.manual_seed(seed)
return PIPELINE(generator=generator, batch_size=batch_size)["sample"]
def imagine4():
return tiki(batch_size=4)
def imagine():
return tiki()[0]
#############################################################################
def fancy():
# try some https://huggingface.co/spaces/stabilityai/stable-diffusion/blob/main/app.py trix
css = '''
.output_image {height: 40rem !important; width: 100% !important; }
.object-contain {height: 256px !important; width: 256px !important; }
#gallery img { height: 256px !important; width: 256px !important; }
#.center { text-align: center; }
'''
block = gr.Blocks(css=css)
with block:
gr.HTML('''
<pre>
This is an unconditioned diffusion model trained on around 500
miscellaneous tiki images from around the web.
It was trained for around 4k epochs with a loss around 1.5%.
The 64x64 version (used here) took around 12s/epoch.
Despite the loss staying about the same, the visual quality
continue[sd] to improve. Occasionally, the tiki generated
require some suspension of disbelief.
More training in the near future, but the current model is
good enough to provide some limited play value.
Image generation is slow, from 80s - 120s per image.
When running 4 images concurrently it takes 4-5 minutes total.
Despite the long wait time, it's more fun to generate 4 at a time.
Different "tiki" values will give different tiki images if you can
imagine.
</pre>
<p class="center">
<center whatever="i know, i know...">
<img src="https://freeimghost.net/images/2022/08/23/tiki-600e.md.png" height="256">
</center>
</p>
''')
with gr.Group():
with gr.Box():
with gr.Row():
btn = gr.Button("Generate image")
gallery = gr.Gallery(
label="Generated images", show_label=False, elem_id="gallery"
).style(grid=[4], height="256")
#btn.click(imagine4, inputs=None, outputs=gallery)
maximum = 4294967296
seed = torch.randint(maximum,[1])[0].item()
seed = 2607725669 # lulz
seed = 917832826
with gr.Row(elem_id="tiki-options"):
batch_size = gr.Slider(label="image count", minimum=1, maximum=4, value=1, step=1)
seed = gr.Slider(label="tiki", minimum=0, maximum=maximum, value=seed, step=1)
btn.click(tiki, inputs=[batch_size,seed], outputs=gallery)
gr.HTML('''
<p>Trained with <a href="https://github.com/huggingface/diffusers">huggingface/diffusers</a>.</p>
''')
#block.queue(max_size=40).launch()
block.queue().launch()
#############################################################################
def plain():
# trix from https://tmabraham.github.io/blog/gradio_hf_spaces_tutorial
title = "Tiki Diffusion Model"
description = '''
Diffusion model trained on random tiki images.
FIXME:
- runs very slow 120s - 240s
- image is weirdly stretched
'''
article = gr.HTML('''
<p>Trained with <a href="https://github.com/huggingface/diffusers">diffusers</a>.</p>
''')
# ValueError: The parameter `examples` must either be a string directory or a list(if there is only 1 input component) or (more generally), a nested list, where each sublist represents a set of inputs.
examples = ['tiki-600e.png']
interpretation = 'default' # no idea...
enable_queue = True
# https://github.com/gradio-app/gradio/issues/287
css = '''
.output_image {height: 40rem !important; width: 100% !important; }
.object-contain {height: 256px !important; width: 256px !important; }
'''
# css = ".output-image, .input-image, .image-preview {height: 600px !important}"
inputs = None
outputs = "pil"
gr.Interface(
fn=imagine,
inputs=inputs,
outputs=outputs,
title=title,
description=description,
article=article,
css=css,
#examples=examples,
interpretation=interpretation,
enable_queue=enable_queue
).launch()
#############################################################################
def main():
if True:
return fancy()
plain()
main()
# EOF
#############################################################################