hysts HF staff commited on
Commit
28ed681
1 Parent(s): c193464
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -4,7 +4,7 @@ from PIL import Image
4
  from diffusers import AutoPipelineForText2Image, DDIMScheduler
5
  from transformers import CLIPVisionModelWithProjection
6
  import numpy as np
7
- import spaces
8
 
9
 
10
  image_encoder = CLIPVisionModelWithProjection.from_pretrained(
@@ -25,7 +25,7 @@ pipeline.set_ip_adapter_scale([0.7, 0.5])
25
 
26
  pipeline.enable_model_cpu_offload()
27
 
28
- @spaces.ZeroGPU
29
  def transform_image(face_image):
30
  generator = torch.Generator(device="cpu").manual_seed(0)
31
 
@@ -39,7 +39,7 @@ def transform_image(face_image):
39
  raise ValueError("Unsupported image format")
40
 
41
  # Load the style image from the local path
42
- style_image_path = "/content/soyjak2.jpeg"
43
  style_image = Image.open(style_image_path)
44
 
45
  # Perform the transformation
@@ -63,4 +63,4 @@ demo = gr.Interface(
63
  )
64
 
65
  demo.queue(max_size=20) # Configures the queue with a maximum size of 20
66
- demo.launch()
 
4
  from diffusers import AutoPipelineForText2Image, DDIMScheduler
5
  from transformers import CLIPVisionModelWithProjection
6
  import numpy as np
7
+ import spaces
8
 
9
 
10
  image_encoder = CLIPVisionModelWithProjection.from_pretrained(
 
25
 
26
  pipeline.enable_model_cpu_offload()
27
 
28
+ @spaces.GPU
29
  def transform_image(face_image):
30
  generator = torch.Generator(device="cpu").manual_seed(0)
31
 
 
39
  raise ValueError("Unsupported image format")
40
 
41
  # Load the style image from the local path
42
+ style_image_path = "examples/soyjak2.jpg"
43
  style_image = Image.open(style_image_path)
44
 
45
  # Perform the transformation
 
63
  )
64
 
65
  demo.queue(max_size=20) # Configures the queue with a maximum size of 20
66
+ demo.launch()