kadirnar commited on
Commit
2a84d59
1 Parent(s): 88d03e7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -12
app.py CHANGED
@@ -8,7 +8,7 @@ import numpy as np
8
  from PIL import Image
9
  import spaces
10
  import torch
11
- from diffusers import StableDiffusionXLPipeline, DPMSolverSinglestepScheduler, AutoencoderKL
12
 
13
  DESCRIPTION = """# Stable Diffusion 3"""
14
  if not torch.cuda.is_available():
@@ -24,14 +24,8 @@ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
24
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
25
 
26
  if torch.cuda.is_available():
27
- pipe = StableDiffusionXLPipeline.from_single_file(
28
- "https://huggingface.co/kadirnar/Black-Hole/blob/main/tachyon.safetensors",
29
- torch_dtype=torch.float16,
30
- use_safetensors=True,
31
- add_watermarker=False,
32
- variant="fp16",
33
- vae=vae,
34
- )
35
  if ENABLE_CPU_OFFLOAD:
36
  pipe.enable_model_cpu_offload()
37
  else:
@@ -63,9 +57,9 @@ def generate(
63
  seed: int = 0,
64
  width: int = 1024,
65
  height: int = 1024,
66
- guidance_scale: float = 3,
67
  randomize_seed: bool = False,
68
- num_inference_steps=5,
69
  NUM_IMAGES_PER_PROMPT=1,
70
  use_resolution_binning: bool = True,
71
  progress=gr.Progress(track_tqdm=True),
@@ -73,7 +67,6 @@ def generate(
73
  pipe.to(device)
74
  seed = int(randomize_seed_fn(seed, randomize_seed))
75
  generator = torch.Generator().manual_seed(seed)
76
- sampling_schedule = [999, 845, 730, 587, 443, 310, 193, 116, 53, 13, 0]
77
  #pipe.scheduler = DPMSolverSinglestepScheduler(use_karras_sigmas=True).from_config(pipe.scheduler.config)
78
  #pipe.scheduler = DPMSolverMultistepScheduler(algorithm_type="sde-dpmsolver++").from_config(pipe.scheduler.config)
79
 
 
8
  from PIL import Image
9
  import spaces
10
  import torch
11
+ from diffusers import StableDiffusion3Pipeline, DPMSolverSinglestepScheduler, AutoencoderKL
12
 
13
  DESCRIPTION = """# Stable Diffusion 3"""
14
  if not torch.cuda.is_available():
 
24
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
25
 
26
  if torch.cuda.is_available():
27
+ pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3-medium", torch_dtype=torch.float16)
28
+
 
 
 
 
 
 
29
  if ENABLE_CPU_OFFLOAD:
30
  pipe.enable_model_cpu_offload()
31
  else:
 
57
  seed: int = 0,
58
  width: int = 1024,
59
  height: int = 1024,
60
+ guidance_scale: float = 7,
61
  randomize_seed: bool = False,
62
+ num_inference_steps=30,
63
  NUM_IMAGES_PER_PROMPT=1,
64
  use_resolution_binning: bool = True,
65
  progress=gr.Progress(track_tqdm=True),
 
67
  pipe.to(device)
68
  seed = int(randomize_seed_fn(seed, randomize_seed))
69
  generator = torch.Generator().manual_seed(seed)
 
70
  #pipe.scheduler = DPMSolverSinglestepScheduler(use_karras_sigmas=True).from_config(pipe.scheduler.config)
71
  #pipe.scheduler = DPMSolverMultistepScheduler(algorithm_type="sde-dpmsolver++").from_config(pipe.scheduler.config)
72