cocktailpeanut commited on
Commit
49901fa
1 Parent(s): 8fbd147
Files changed (2) hide show
  1. app.py +21 -33
  2. requirements.txt +3 -2
app.py CHANGED
@@ -1,13 +1,12 @@
1
  import gradio as gr
2
  import torch
 
3
  from diffusers import StableDiffusionXLPipeline, StableDiffusionPipeline, LCMScheduler
4
  from diffusers.schedulers import TCDScheduler
5
 
6
  import spaces
7
  from PIL import Image
8
 
9
- SAFETY_CHECKER = True
10
-
11
  checkpoints = {
12
  "2-Step": ["pcm_{}_smallcfg_2step_converted.safetensors", 2, 0.0],
13
  "4-Step": ["pcm_{}_smallcfg_4step_converted.safetensors", 4, 0.0],
@@ -26,37 +25,26 @@ checkpoints = {
26
 
27
  loaded = None
28
 
29
- if torch.cuda.is_available():
30
- pipe_sdxl = StableDiffusionXLPipeline.from_pretrained(
31
- "stabilityai/stable-diffusion-xl-base-1.0",
32
- torch_dtype=torch.float16,
33
- variant="fp16",
34
- ).to("cuda")
35
- pipe_sd15 = StableDiffusionPipeline.from_pretrained(
36
- "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16"
37
- ).to("cuda")
38
-
39
- if SAFETY_CHECKER:
40
- from safety_checker import StableDiffusionSafetyChecker
41
- from transformers import CLIPFeatureExtractor
42
-
43
- safety_checker = StableDiffusionSafetyChecker.from_pretrained(
44
- "CompVis/stable-diffusion-safety-checker"
45
- ).to("cuda")
46
- feature_extractor = CLIPFeatureExtractor.from_pretrained(
47
- "openai/clip-vit-base-patch32"
48
- )
49
-
50
- def check_nsfw_images(
51
- images: list[Image.Image],
52
- ) -> tuple[list[Image.Image], list[bool]]:
53
- safety_checker_input = feature_extractor(images, return_tensors="pt").to("cuda")
54
- has_nsfw_concepts = safety_checker(
55
- images=[images], clip_input=safety_checker_input.pixel_values.to("cuda")
56
- )
57
-
58
- return images, has_nsfw_concepts
59
-
60
 
61
  @spaces.GPU(enable_queue=True)
62
  def generate_image(
 
1
  import gradio as gr
2
  import torch
3
+ import devicetorch
4
  from diffusers import StableDiffusionXLPipeline, StableDiffusionPipeline, LCMScheduler
5
  from diffusers.schedulers import TCDScheduler
6
 
7
  import spaces
8
  from PIL import Image
9
 
 
 
10
  checkpoints = {
11
  "2-Step": ["pcm_{}_smallcfg_2step_converted.safetensors", 2, 0.0],
12
  "4-Step": ["pcm_{}_smallcfg_4step_converted.safetensors", 4, 0.0],
 
25
 
26
  loaded = None
27
 
28
+ device = devicetorch.get(torch)
29
+
30
+ #if torch.cuda.is_available():
31
+ # pipe_sdxl = StableDiffusionXLPipeline.from_pretrained(
32
+ # "stabilityai/stable-diffusion-xl-base-1.0",
33
+ # torch_dtype=torch.float16,
34
+ # variant="fp16",
35
+ # ).to("cuda")
36
+ # pipe_sd15 = StableDiffusionPipeline.from_pretrained(
37
+ # "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16"
38
+ # ).to("cuda")
39
+
40
+ pipe_sdxl = StableDiffusionXLPipeline.from_pretrained(
41
+ "stabilityai/stable-diffusion-xl-base-1.0",
42
+ torch_dtype=torch.float16,
43
+ variant="fp16",
44
+ ).to(device)
45
+ pipe_sd15 = StableDiffusionPipeline.from_pretrained(
46
+ "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16"
47
+ ).to(device)
 
 
 
 
 
 
 
 
 
 
 
48
 
49
  @spaces.GPU(enable_queue=True)
50
  def generate_image(
requirements.txt CHANGED
@@ -3,6 +3,7 @@ datasets
3
  transformers
4
  accelerate
5
  peft
6
- xformers
7
  gradio==4.32.2
8
- spaces
 
 
3
  transformers
4
  accelerate
5
  peft
6
+ #xformers
7
  gradio==4.32.2
8
+ #spaces
9
+ devicetorch