File size: 1,527 Bytes
3421f5b
 
 
 
 
 
 
 
 
 
a2045eb
44f0ef5
 
d1294ba
c0533c2
d1294ba
 
c0533c2
 
 
 
 
 
333c77e
5173f57
3421f5b
 
 
 
c0533c2
3421f5b
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
import torch
from diffusers import AmusedPipeline
from transformers import pipeline
import PIL.Image
from diffusers.utils import load_image
import gradio as gr
from PIL import Image
import os, random, gc
from accelerate import Accelerator
accelerator = Accelerator(cpu=True)
pipe = accelerator.prepare(AmusedPipeline.from_pretrained("amused/amused-512", variant=None, torch_dtype=torch.float32, use_safetensors=True))
pipe.vqvae.to(torch.float32)
pipe.to("cpu")
apol=[]
def plex(prompt, guod, fifth, twice, nut):
    gc.collect()
    apol=[]
    if nut == 0:
        nm = random.randint(1, 2147483616)
        while nm % 32 != 0:
            nm = random.randint(1, 2147483616)
    else:
        nm=nut
    generator = torch.Generator(device="cpu").manual_seed(nm)
    image = pipe(prompt=prompt,guidance_scale=guod,num_inference_steps=twice,num_images_per_prompt=fifth,generator=generator)
    for a, imze in enumerate(image["images"]):
        apol.append(imze)
    return apol

iface = gr.Interface(fn=plex, inputs=[gr.Textbox(label="prompt",),gr.Slider(label="guidance scale",minimum=1,step=1,maximum=10,value=4),gr.Slider(label="num images", minimum=1, step=1, maximum=4, value=1), gr.Slider(label="num inference steps", minimum=1, step=1, maximum=20, value=12), gr.Slider(label="manual seed (leave 0 for random)",minimum=0,step=32,maximum=2147483616,value=0)], outputs=gr.Gallery(label="out", columns=2),description="Running on cpu, very slow! by JoPmt.")
iface.queue(max_size=1,api_open=False)
iface.launch(max_threads=1)