Spaces:
briaai
/
Running on Zero

mokady commited on
Commit
96e106c
1 Parent(s): 54da31f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -18
app.py CHANGED
@@ -26,26 +26,26 @@ scheduler = EulerAncestralDiscreteScheduler(
26
  pipe = StableDiffusionXLPipeline.from_pretrained(model_id, torch_dtype=torch.float16,scheduler=scheduler).to("cuda")
27
  pipe.force_zeros_for_empty_prompt = False
28
 
29
- print("Optimizing BRIA-2.3 - this could take a while")
30
- t=time.time()
31
- pipe.unet = torch.compile(
32
- pipe.unet, mode="reduce-overhead", fullgraph=True # 600 secs compilation
33
- )
34
- with torch.no_grad():
35
- outputs = pipe(
36
- prompt="an apple",
37
- num_inference_steps=30,
38
- )
39
 
40
- # This will avoid future compilations on different shapes
41
- unet_compiled = torch._dynamo.run(pipe.unet)
42
- unet_compiled.config=pipe.unet.config
43
- unet_compiled.add_embedding = Dummy()
44
- unet_compiled.add_embedding.linear_1 = Dummy()
45
- unet_compiled.add_embedding.linear_1.in_features = pipe.unet.add_embedding.linear_1.in_features
46
- pipe.unet = unet_compiled
47
 
48
- print(f"Optimizing finished successfully after {time.time()-t} secs")
49
 
50
  @spaces.GPU(enable_queue=True)
51
  def infer(prompt,negative_prompt,seed,resolution):
 
26
  pipe = StableDiffusionXLPipeline.from_pretrained(model_id, torch_dtype=torch.float16,scheduler=scheduler).to("cuda")
27
  pipe.force_zeros_for_empty_prompt = False
28
 
29
+ # print("Optimizing BRIA-2.3 - this could take a while")
30
+ # t=time.time()
31
+ # pipe.unet = torch.compile(
32
+ # pipe.unet, mode="reduce-overhead", fullgraph=True # 600 secs compilation
33
+ # )
34
+ # with torch.no_grad():
35
+ # outputs = pipe(
36
+ # prompt="an apple",
37
+ # num_inference_steps=30,
38
+ # )
39
 
40
+ # # This will avoid future compilations on different shapes
41
+ # unet_compiled = torch._dynamo.run(pipe.unet)
42
+ # unet_compiled.config=pipe.unet.config
43
+ # unet_compiled.add_embedding = Dummy()
44
+ # unet_compiled.add_embedding.linear_1 = Dummy()
45
+ # unet_compiled.add_embedding.linear_1.in_features = pipe.unet.add_embedding.linear_1.in_features
46
+ # pipe.unet = unet_compiled
47
 
48
+ # print(f"Optimizing finished successfully after {time.time()-t} secs")
49
 
50
  @spaces.GPU(enable_queue=True)
51
  def infer(prompt,negative_prompt,seed,resolution):