hostin commited on
Commit
d9d976a
1 Parent(s): 87caf77

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -5
app.py CHANGED
@@ -20,16 +20,24 @@ def generate_and_display_images(model_selection, scenery, style, height, width,
20
  return "Invalid seed value. Seed must be an integer."
21
  torch.manual_seed(seed)
22
 
 
 
 
 
 
 
 
 
23
  prompt = f"Scenery: {scenery}; Style: {style}"
24
 
25
  generated_images = []
26
  if model_selection == "dreamlike-art/dreamlike-photoreal-2.0":
27
- model = StableDiffusionPipeline.from_pretrained(model_selection, torch_dtype=torch.float16).to("cpu")
28
  for _ in range(num_images):
29
  image = model(prompt=prompt, num_inference_steps=n_steps, guidance_scale=guidance_scale, negative_prompt=negative_prompt, height=height, width=width).images[0]
30
  generated_images.append(image)
31
  else:
32
- base = DiffusionPipeline.from_pretrained(model_selection, torch_dtype=torch.float16, use_auth_token=True).to("cpu")
33
  for _ in range(num_images):
34
  if "refiner" in model_selection:
35
  image_latent = base(prompt=prompt, num_inference_steps=n_steps, denoising_end=high_noise_frac, output_type="latent").images
@@ -37,7 +45,7 @@ def generate_and_display_images(model_selection, scenery, style, height, width,
37
  else:
38
  image = base(prompt=prompt, num_inference_steps=n_steps, guidance_scale=guidance_scale, negative_prompt=negative_prompt, height=height, width=width).images[0]
39
  generated_images.append(image)
40
-
41
  # Save images and return file paths for Gradio display
42
  file_paths = []
43
  for i, image in enumerate(generated_images):
@@ -48,8 +56,6 @@ def generate_and_display_images(model_selection, scenery, style, height, width,
48
 
49
  return file_paths
50
 
51
-
52
-
53
  # Define Gradio interface
54
  iface = gr.Interface(
55
  fn=generate_and_display_images,
 
20
  return "Invalid seed value. Seed must be an integer."
21
  torch.manual_seed(seed)
22
 
23
+ # Check if CUDA is available and set the appropriate dtype
24
+ if torch.cuda.is_available():
25
+ device = "cuda"
26
+ dtype = torch.float16
27
+ else:
28
+ device = "cpu"
29
+ dtype = torch.float32
30
+
31
  prompt = f"Scenery: {scenery}; Style: {style}"
32
 
33
  generated_images = []
34
  if model_selection == "dreamlike-art/dreamlike-photoreal-2.0":
35
+ model = StableDiffusionPipeline.from_pretrained(model_selection, torch_dtype=dtype).to(device)
36
  for _ in range(num_images):
37
  image = model(prompt=prompt, num_inference_steps=n_steps, guidance_scale=guidance_scale, negative_prompt=negative_prompt, height=height, width=width).images[0]
38
  generated_images.append(image)
39
  else:
40
+ base = DiffusionPipeline.from_pretrained(model_selection, torch_dtype=dtype, use_auth_token=True).to(device)
41
  for _ in range(num_images):
42
  if "refiner" in model_selection:
43
  image_latent = base(prompt=prompt, num_inference_steps=n_steps, denoising_end=high_noise_frac, output_type="latent").images
 
45
  else:
46
  image = base(prompt=prompt, num_inference_steps=n_steps, guidance_scale=guidance_scale, negative_prompt=negative_prompt, height=height, width=width).images[0]
47
  generated_images.append(image)
48
+
49
  # Save images and return file paths for Gradio display
50
  file_paths = []
51
  for i, image in enumerate(generated_images):
 
56
 
57
  return file_paths
58
 
 
 
59
  # Define Gradio interface
60
  iface = gr.Interface(
61
  fn=generate_and_display_images,