IDKiro commited on
Commit
6994be4
1 Parent(s): 51f05bf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +60 -40
app.py CHANGED
@@ -7,21 +7,25 @@ import PIL.Image
7
  import torch
8
 
9
  from diffusers import StableDiffusionPipeline, AutoencoderKL, AutoencoderTiny
10
- from peft import PeftModel
11
 
12
  device = "cuda"
13
  weight_type = torch.float16
14
 
15
- pipe = StableDiffusionPipeline.from_pretrained("IDKiro/sdxs-512-dreamshaper", torch_dtype=weight_type)
16
  pipe.unet = PeftModel.from_pretrained(pipe.unet, "IDKiro/sdxs-512-dreamshaper-anime")
17
  pipe.to(torch_device=device, torch_dtype=weight_type)
18
 
19
- vae_tiny = AutoencoderTiny.from_pretrained("IDKiro/sdxs-512-dreamshaper", subfolder="vae")
 
 
20
  vae_tiny.to(device, dtype=weight_type)
21
 
22
- vae_large = AutoencoderKL.from_pretrained("IDKiro/sdxs-512-dreamshaper", subfolder="vae_large")
 
 
23
  vae_tiny.to(device, dtype=weight_type)
24
 
 
25
  def pil_image_to_data_url(img, format="PNG"):
26
  buffered = BytesIO()
27
  img.save(buffered, format=format)
@@ -34,7 +38,7 @@ def run(
34
  prompt: str,
35
  device_type="GPU",
36
  vae_type=None,
37
- param_dtype='torch.float16',
38
  ) -> PIL.Image.Image:
39
  if vae_type == "tiny vae":
40
  pipe.vae = vae_tiny
@@ -42,12 +46,15 @@ def run(
42
  pipe.vae = vae_large
43
 
44
  if device_type == "CPU":
45
- device = "cpu"
46
- param_dtype = 'torch.float32'
47
  else:
48
  device = "cuda"
49
-
50
- pipe.to(torch_device=device, torch_dtype=torch.float16 if param_dtype == 'torch.float16' else torch.float32)
 
 
 
51
 
52
  result = pipe(
53
  prompt=prompt,
@@ -62,7 +69,7 @@ def run(
62
 
63
 
64
  examples = [
65
- "Self-portrait oil painting, a beautiful cyborg with golden hair, 8k",
66
  ]
67
 
68
  with gr.Blocks(css="style.css") as demo:
@@ -80,38 +87,51 @@ with gr.Blocks(css="style.css") as demo:
80
  container=False,
81
  )
82
  run_button = gr.Button("Run", scale=0)
83
-
84
- device_choices = ['GPU','CPU']
85
- device_type = gr.Radio(device_choices, label='Device',
86
- value=device_choices[0],
87
- interactive=True,
88
- info='Thanks to the community for the GPU!')
89
-
90
- vae_choices = ['tiny vae','large vae']
91
- vae_type = gr.Radio(vae_choices, label='Image Decoder Type',
92
- value=vae_choices[0],
93
- interactive=True,
94
- info='To save GPU memory, use tiny vae. For better quality, use large vae.')
95
-
96
- dtype_choices = ['torch.float16','torch.float32']
97
- param_dtype = gr.Radio(dtype_choices,label='torch.weight_type',
98
- value=dtype_choices[0],
99
- interactive=True,
100
- info='To save GPU memory, use torch.float16. For better quality, use torch.float32.')
101
-
102
- download_output = gr.Button("Download output", elem_id="download_output")
103
 
104
- with gr.Column(min_width=512):
105
- result = gr.Image(label="Result", height=512, width=512, elem_id="output_image", show_label=False, show_download_button=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
 
107
- gr.Examples(
108
- examples=examples,
109
- inputs=prompt,
110
- outputs=result,
111
- fn=run
112
- )
113
-
114
- demo.load(None,None,None)
 
 
 
 
 
115
 
116
  inputs = [prompt, device_type, vae_type, param_dtype]
117
  outputs = [result, download_output]
 
7
  import torch
8
 
9
  from diffusers import StableDiffusionPipeline, AutoencoderKL, AutoencoderTiny
 
10
 
11
  device = "cuda"
12
  weight_type = torch.float16
13
 
14
+ pipe = StableDiffusionPipeline.from_pretrained("IDKiro/sdxs-512-dreamshaper")
15
  pipe.unet = PeftModel.from_pretrained(pipe.unet, "IDKiro/sdxs-512-dreamshaper-anime")
16
  pipe.to(torch_device=device, torch_dtype=weight_type)
17
 
18
+ vae_tiny = AutoencoderTiny.from_pretrained(
19
+ "IDKiro/sdxs-512-dreamshaper", subfolder="vae"
20
+ )
21
  vae_tiny.to(device, dtype=weight_type)
22
 
23
+ vae_large = AutoencoderKL.from_pretrained(
24
+ "IDKiro/sdxs-512-dreamshaper", subfolder="vae_large"
25
+ )
26
  vae_tiny.to(device, dtype=weight_type)
27
 
28
+
29
  def pil_image_to_data_url(img, format="PNG"):
30
  buffered = BytesIO()
31
  img.save(buffered, format=format)
 
38
  prompt: str,
39
  device_type="GPU",
40
  vae_type=None,
41
+ param_dtype="torch.float16",
42
  ) -> PIL.Image.Image:
43
  if vae_type == "tiny vae":
44
  pipe.vae = vae_tiny
 
46
  pipe.vae = vae_large
47
 
48
  if device_type == "CPU":
49
+ device = "cpu"
50
+ param_dtype = "torch.float32"
51
  else:
52
  device = "cuda"
53
+
54
+ pipe.to(
55
+ torch_device=device,
56
+ torch_dtype=torch.float16 if param_dtype == "torch.float16" else torch.float32,
57
+ )
58
 
59
  result = pipe(
60
  prompt=prompt,
 
69
 
70
 
71
  examples = [
72
+ "A photo of beautiful mountain with realistic sunset and blue lake, highly detailed, masterpiece",
73
  ]
74
 
75
  with gr.Blocks(css="style.css") as demo:
 
87
  container=False,
88
  )
89
  run_button = gr.Button("Run", scale=0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
 
91
+ device_choices = ["GPU", "CPU"]
92
+ device_type = gr.Radio(
93
+ device_choices,
94
+ label="Device",
95
+ value=device_choices[0],
96
+ interactive=True,
97
+ info="Thanks to the community for the GPU!",
98
+ )
99
+
100
+ vae_choices = ["tiny vae", "large vae"]
101
+ vae_type = gr.Radio(
102
+ vae_choices,
103
+ label="Image Decoder Type",
104
+ value=vae_choices[0],
105
+ interactive=True,
106
+ info="To save GPU memory, use tiny vae. For better quality, use large vae.",
107
+ )
108
+
109
+ dtype_choices = ["torch.float16", "torch.float32"]
110
+ param_dtype = gr.Radio(
111
+ dtype_choices,
112
+ label="torch.weight_type",
113
+ value=dtype_choices[0],
114
+ interactive=True,
115
+ info="To save GPU memory, use torch.float16. For better quality, use torch.float32.",
116
+ )
117
+
118
+ download_output = gr.Button(
119
+ "Download output", elem_id="download_output"
120
+ )
121
 
122
+ with gr.Column(min_width=512):
123
+ result = gr.Image(
124
+ label="Result",
125
+ height=512,
126
+ width=512,
127
+ elem_id="output_image",
128
+ show_label=False,
129
+ show_download_button=True,
130
+ )
131
+
132
+ gr.Examples(examples=examples, inputs=prompt, outputs=result, fn=run)
133
+
134
+ demo.load(None, None, None)
135
 
136
  inputs = [prompt, device_type, vae_type, param_dtype]
137
  outputs = [result, download_output]