File size: 1,385 Bytes
62f266f
 
 
 
 
 
 
 
 
cdd743a
 
 
 
 
 
62f266f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
from diffusers import StableDiffusionPipeline
from compel import Compel
import gradio
import torch

model_id = "dream-textures/texture-diffusion"
device = "cuda"
dtype = torch.float16

if torch.cuda.is_available():
  pipe = StableDiffusionPipeline.from_pretrained(
    model_id, torch_dtype=dtype
  ).to(device)
else:
  pipe = StableDiffusionPipeline.from_pretrained(model_id)

compel_proc = Compel(
  tokenizer=pipe.tokenizer,
  text_encoder=pipe.text_encoder,
  truncate_long_prompts=False,
)

def predict(
  prompt: str,
  generator: int,
  num_inference_steps: int,
  strength: float,
  guidance_scale: float,
):
  generator = torch.manual_seed(generator)
  prompt_embeds = compel_proc(prompt)

  results = pipe(
    prompt_embeds=prompt_embeds,
    generator=generator,
    guidance_scale=float(guidance_scale),
    num_inference_steps=num_inference_steps,
    output_type="pil",
    strength=float(strength),
  )

  if len(results.images) > 0:
    return results.images[0]
  return None

app = gradio.Interface(
  fn=predict,
  inputs=[
    gradio.Textbox("pbr brick wall"), # prompt
    gradio.Slider(0, 2147483647, 2159232, step=1), # generator
    gradio.Slider(2, 15, 4, step=1), # num_inference_steps
    gradio.Slider(0.0, 1.0, 0.5, step=0.01), # strength
    gradio.Slider(0.0, 5.0, 0.2, step=0.01), # guidance_scale
  ],
  outputs=gradio.Image(type="pil")
)
app.launch()