File size: 5,883 Bytes
85a91b8
1083e8b
85a91b8
 
63f2987
85a91b8
4ac004d
 
85a91b8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78078c6
85a91b8
 
cd08edc
bebd114
85a91b8
 
 
 
 
 
 
 
 
 
 
18989dd
85a91b8
 
 
 
 
 
 
 
18989dd
85a91b8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ac0f3d9
85a91b8
636ac67
 
85a91b8
 
d992931
85a91b8
 
 
 
4ea0fd4
 
63c08a9
1813644
63c08a9
1813644
f09747c
4ea0fd4
f09747c
4ea0fd4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85a91b8
43da0e2
18989dd
85a91b8
 
 
 
 
 
 
 
 
 
 
 
ae46b9c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler
import gradio as gr
import torch
from PIL import Image

model_id = 'Randolph/hadenjax-dreams'
prefix = ''
suffix = 'by hadenjax'
     
scheduler = DPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler")

pipe = StableDiffusionPipeline.from_pretrained(
  model_id,
  torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
  scheduler=scheduler)

pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(
  model_id,
  torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
  scheduler=scheduler)

if torch.cuda.is_available():
  pipe = pipe.to("cuda")
  pipe_i2i = pipe_i2i.to("cuda")

def error_str(error, title="Error"):
    return f"""#### {title}
            {error}"""  if error else ""

def inference(prompt, guidance, steps, width=800, height=800, seed=0, img=None, strength=0.5, neg_prompt="", auto_prefix=False):

  generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
  prompt = f"{prefix} {prompt} {suffix}" if auto_prefix else f"{prompt} {suffix}"
  neg_prompt = f"{neg_prompt}, photo, DSLR, photorealistic"

  try:
    if img is not None:
      return img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator), None
    else:
      return txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator), None
  except Exception as e:
    return None, error_str(e)

def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator):

  result = pipe(
      prompt,
      negative_prompt = neg_prompt,
      num_inference_steps = int(steps),
      guidance_scale = guidance,
      width = width,
      height = height,
      generator = generator)
    
  return result.images[0]

def img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator):

    ratio = min(height / img.height, width / img.width)
    img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
    result = pipe_i2i(
        prompt,
        negative_prompt = neg_prompt,
        init_image = img,
        num_inference_steps = int(steps),
        strength = strength,
        guidance_scale = guidance,
        width = width,
        height = height,
        generator = generator)
        
    return result.images[0]

css = """.main-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.main-div div h1{font-weight:900;margin-bottom:7px}.main-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem}
"""
with gr.Blocks(css=css) as demo:
    gr.HTML(
        f"""<div class="main-div">
              <div>
                <h1>RIP Haden Jack Nimmer</h1>
                <h2>February 2nd, 1990 - May 31st, 2014</h2>
              </div>
              <p>
               <a href="https://huggingface.co/Randolph/hadenjax-dreams">HadenJax Dreams</a> is a memorial to my late brother, Haden "Jax" Jack Nimmer. It illustrates what you request in his artistic style.<br>
              </p>
              Running on {"<b>GPU 🔥</b>" if torch.cuda.is_available() else f"<b>CPU 🥶</b>. For faster inference it is recommended to <b>upgrade to GPU in <a href='https://huggingface.co/spaces/Randolph/hadenjax-dreams/settings'>Settings</a></b>"}<br><br>
            </div>
        """
    )   
    with gr.Row(equal_height=True):
        with gr.Column(scale=50):
            with gr.Group():
                image_out = gr.Image(height=800)
                error_output = gr.Markdown()
            with gr.Row():
                prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2, placeholder="(graphic novel)(webcomic of)(smudgy the whale)(parts-unknown)").style(container=False)
            with gr.Row():
                generate = gr.Button(value="Generate").style(rounded=(False, True, True, False))  
        with gr.Column(scale=50):
            with gr.Tab("Options"):
                with gr.Group():
                    neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
                    auto_prefix = gr.Checkbox(label="Unused", value=prefix, visible=prefix)
                with gr.Row():
                    guidance = gr.Slider(label="Guidance scale", value=10, maximum=15)
                with gr.Row():
                    steps = gr.Slider(label="Steps", value=25, minimum=2, maximum=75, step=1)
                with gr.Row():
                    width = gr.Slider(label="Width", value=400, minimum=64, maximum=1024, step=8)
                with gr.Row():
                    height = gr.Slider(label="Height", value=400, minimum=64, maximum=1024, step=8)
                with gr.Row():
                    seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
            with gr.Tab("Image to image"):
                with gr.Group():
                    image = gr.Image(label="Image", height=600, tool="editor", type="pil")
                    strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)

    auto_prefix.change(lambda x: gr.update(placeholder=f"[Your prompt] {suffix}"), inputs=auto_prefix, outputs=prompt, queue=False)
    inputs = [prompt, guidance, steps, width, height, seed, image, strength, neg_prompt, auto_prefix]
    outputs = [image_out, error_output]
    prompt.submit(inference, inputs=inputs, outputs=outputs)
    generate.click(inference, inputs=inputs, outputs=outputs)

    gr.HTML("""
    <div style="border-top: 1px solid #303030;">
      <br>
      <p>This space was created using <a href="https://huggingface.co/spaces/anzorq/sd-space-creator">SD Space Creator</a>.</p>
    </div>
    """)

demo.queue(concurrency_count=1)
demo.launch()