LPX55 commited on
Commit
b71375b
·
verified ·
1 Parent(s): 2b517e0

Update app_v3.py

Browse files
Files changed (1) hide show
  1. app_v3.py +4 -1
app_v3.py CHANGED
@@ -47,9 +47,12 @@ pipe = FluxControlNetPipeline.from_pretrained(
47
  )
48
  pipe.to("cuda")
49
 
50
- pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
51
  # For FLUX models, compiling VAE decode can also be beneficial if needed, though UNet is primary.
52
  # pipe.vae.decode = torch.compile(pipe.vae.decode, mode="reduce-overhead", fullgraph=True) # Uncomment if VAE compile helps
 
 
 
 
53
 
54
  # 2. Memory Efficient Attention (xFormers): Reduces memory usage and improves speed
55
  # Requires xformers library installation. Beneficial even with high VRAM.
 
47
  )
48
  pipe.to("cuda")
49
 
 
50
  # For FLUX models, compiling VAE decode can also be beneficial if needed, though UNet is primary.
51
  # pipe.vae.decode = torch.compile(pipe.vae.decode, mode="reduce-overhead", fullgraph=True) # Uncomment if VAE compile helps
52
+ try:
53
+ pipe.vae.decode = torch.compile(pipe.vae.decode, mode="reduce-overhead", fullgraph=True)
54
+ except Exception as e:
55
+ print(f"Compile failed: {e}")
56
 
57
  # 2. Memory Efficient Attention (xFormers): Reduces memory usage and improves speed
58
  # Requires xformers library installation. Beneficial even with high VRAM.