File size: 2,971 Bytes
00da6c1
 
 
 
 
 
0a31d24
f5ee026
297ff18
00da6c1
 
0a31d24
fa9bd4f
fb8c040
0a31d24
fa9bd4f
b37f43c
052c4af
fd845ad
6f88eb2
00da6c1
9cfb47a
0a31d24
 
 
f5ee026
0a31d24
 
 
 
 
5e82cd6
0a31d24
 
 
 
6f88eb2
6c562f3
00da6c1
052c4af
00da6c1
15eacc9
 
3b62481
 
5cbb02c
e20590d
00da6c1
4d372e3
00da6c1
692a571
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import gradio as gr
import torch
import numpy as np
import modin.pandas as pd
from PIL import Image
from huggingface_hub import hf_hub_download
from diffusers import StableCascadeDecoderPipeline, StableCascadePriorPipeline

device = 'cuda' #if torch.cuda.is_available() else 'cpu'
torch.cuda.max_memory_allocated(device=device)
torch.cuda.empty_cache()
prior = StableCascadePriorPipeline.from_pretrained("stabilityai/stable-cascade-prior", variant="bf16", torch_dtype=torch.bfloat16)
prior.enable_xformers_memory_efficient_attention()
prior = prior.to(device)
decoder = StableCascadeDecoderPipeline.from_pretrained("stabilityai/stable-cascade", variant="bf16", torch_dtype=torch.float16)
decoder.enable_xformers_memory_efficient_attention()
decoder.enable_model_cpu_offload()
torch.cuda.empty_cache()
                    
def genie (Prompt, negative_prompt, height, width, scale, steps, d_steps, seed):
    generator = np.random.seed(0) if seed == 0 else torch.manual_seed(seed)
    torch.cuda.empty_cache()
    prior_image=prior(
    prompt=Prompt,
    height=height,
    width=width,
    negative_prompt=negative_prompt,
    guidance_scale=scale,
    num_images_per_prompt=1,
    num_inference_steps=steps)
    image=decoder(
    image_embeddings=prior_image.image_embeddings.to(torch.float16),
    prompt=Prompt,
    negative_prompt=negative_prompt,
    guidance_scale=0.0,
    output_type="pil",
    num_inference_steps=d_steps).images[0]
    return image
    
gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'), 
                               gr.Textbox(label='What you Do Not want the AI to generate. 77 Token Limit'),
                               gr.Slider(512, 1536, 1024, step=128, label='Height'),
                               gr.Slider(512, 1536, 1024, step=128, label='Width'),
                               gr.Slider(.5, maximum=10, value=3, step=.25, label='Guidance Scale'), 
                               gr.Slider(10, maximum=40, value=20, step=5, label='Number of Prior Iterations'), 
                               gr.Slider(5, maximum=20, value=10, step=5, label="Number of Decoder Iterations"),
                               gr.Slider(minimum=0, step=1, maximum=9999999999999999, randomize=True, label='Seed: 0 is Random')],
             outputs=gr.Image(label='Generated Image'), 
             title="Manju Dream Booth V2.2 with Stable-Cascade - GPU", 
             description="<br><br><b/>Warning: This Demo is capable of producing NSFW content.", 
             article = "If You Enjoyed this Demo and would like to Donate, you can send any amount to any of these Wallets. <br><br>SHIB (BEP20): 0xbE8f2f3B71DFEB84E5F7E3aae1909d60658aB891 <br>PayPal: https://www.paypal.me/ManjushriBodhisattva <br>ETH: 0xbE8f2f3B71DFEB84E5F7E3aae1909d60658aB891 <br>DOGE: D9QdVPtcU1EFH8jDC8jhU9uBcSTqUiA8h6<br><br>Code Monkey: <a href=\"https://huggingface.co/Manjushri\">Manjushri</a>").launch(debug=True)