hysts HF staff commited on
Commit
7890caa
1 Parent(s): b1ae048

Update for ZeroGPU

Browse files
Files changed (1) hide show
  1. app.py +6 -8
app.py CHANGED
@@ -1,4 +1,5 @@
1
  import gradio as gr
 
2
  import torch
3
  from torchvision import transforms
4
  from SDXL.diff_pipe import StableDiffusionXLDiffImg2ImgPipeline
@@ -9,7 +10,7 @@ device = "cuda"
9
 
10
  base = StableDiffusionXLDiffImg2ImgPipeline.from_pretrained(
11
  "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
12
- )
13
 
14
  refiner = StableDiffusionXLDiffImg2ImgPipeline.from_pretrained(
15
  "stabilityai/stable-diffusion-xl-refiner-1.0",
@@ -18,7 +19,7 @@ refiner = StableDiffusionXLDiffImg2ImgPipeline.from_pretrained(
18
  torch_dtype=torch.float16,
19
  use_safetensors=True,
20
  variant="fp16",
21
- )
22
 
23
  base.scheduler = DPMSolverMultistepScheduler.from_config(base.scheduler.config)
24
  refiner.scheduler = DPMSolverMultistepScheduler.from_config(base.scheduler.config)
@@ -42,24 +43,21 @@ def preprocess_map(map):
42
  return map
43
 
44
 
 
45
  def inference(image, map, gs, prompt, negative_prompt):
46
  validate_inputs(image, map)
47
  image = preprocess_image(image)
48
  map = preprocess_map(map)
49
- base_cuda = base.to(device)
50
- edited_images = base_cuda(prompt=prompt, original_image=image, image=image, strength=1, guidance_scale=gs,
51
  num_images_per_prompt=1,
52
  negative_prompt=negative_prompt,
53
  map=map,
54
  num_inference_steps=NUM_INFERENCE_STEPS, denoising_end=0.8, output_type="latent").images
55
- base_cuda=None
56
- refiner_cuda = refiner.to(device)
57
- edited_images = refiner_cuda(prompt=prompt, original_image=image, image=edited_images, strength=1, guidance_scale=7.5,
58
  num_images_per_prompt=1,
59
  negative_prompt=negative_prompt,
60
  map=map,
61
  num_inference_steps=NUM_INFERENCE_STEPS, denoising_start=0.8).images[0]
62
- refiner_cuda=None
63
  return edited_images
64
 
65
 
 
1
  import gradio as gr
2
+ import spaces
3
  import torch
4
  from torchvision import transforms
5
  from SDXL.diff_pipe import StableDiffusionXLDiffImg2ImgPipeline
 
10
 
11
  base = StableDiffusionXLDiffImg2ImgPipeline.from_pretrained(
12
  "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
13
+ ).to(device)
14
 
15
  refiner = StableDiffusionXLDiffImg2ImgPipeline.from_pretrained(
16
  "stabilityai/stable-diffusion-xl-refiner-1.0",
 
19
  torch_dtype=torch.float16,
20
  use_safetensors=True,
21
  variant="fp16",
22
+ ).to(device)
23
 
24
  base.scheduler = DPMSolverMultistepScheduler.from_config(base.scheduler.config)
25
  refiner.scheduler = DPMSolverMultistepScheduler.from_config(base.scheduler.config)
 
43
  return map
44
 
45
 
46
+ @spaces.GPU
47
  def inference(image, map, gs, prompt, negative_prompt):
48
  validate_inputs(image, map)
49
  image = preprocess_image(image)
50
  map = preprocess_map(map)
51
+ edited_images = base(prompt=prompt, original_image=image, image=image, strength=1, guidance_scale=gs,
 
52
  num_images_per_prompt=1,
53
  negative_prompt=negative_prompt,
54
  map=map,
55
  num_inference_steps=NUM_INFERENCE_STEPS, denoising_end=0.8, output_type="latent").images
56
+ edited_images = refiner(prompt=prompt, original_image=image, image=edited_images, strength=1, guidance_scale=7.5,
 
 
57
  num_images_per_prompt=1,
58
  negative_prompt=negative_prompt,
59
  map=map,
60
  num_inference_steps=NUM_INFERENCE_STEPS, denoising_start=0.8).images[0]
 
61
  return edited_images
62
 
63