Michael Yang
commited on
Commit
•
dc975c3
1
Parent(s):
811f626
update generation script, add pillow
Browse files- baseline.py +1 -2
- generation.py +11 -8
- requirements.txt +1 -0
baseline.py
CHANGED
@@ -1,13 +1,13 @@
|
|
1 |
# Original Stable Diffusion (1.4)
|
2 |
|
3 |
import torch
|
4 |
-
import numpy as np
|
5 |
import models
|
6 |
from models import pipelines
|
7 |
from shared import model_dict, DEFAULT_OVERALL_NEGATIVE_PROMPT
|
8 |
import gc
|
9 |
from io import BytesIO
|
10 |
import base64
|
|
|
11 |
|
12 |
vae, tokenizer, text_encoder, unet, scheduler, dtype = model_dict.vae, model_dict.tokenizer, model_dict.text_encoder, model_dict.unet, model_dict.scheduler, model_dict.dtype
|
13 |
|
@@ -42,7 +42,6 @@ def run(prompt, scheduler_key='dpm_scheduler', bg_seed=1, num_inference_steps=20
|
|
42 |
)
|
43 |
|
44 |
# Convert to PIL Image
|
45 |
-
import PIL.Image
|
46 |
image = PIL.Image.fromarray(images[0])
|
47 |
|
48 |
# Save as PNG in memory
|
|
|
1 |
# Original Stable Diffusion (1.4)
|
2 |
|
3 |
import torch
|
|
|
4 |
import models
|
5 |
from models import pipelines
|
6 |
from shared import model_dict, DEFAULT_OVERALL_NEGATIVE_PROMPT
|
7 |
import gc
|
8 |
from io import BytesIO
|
9 |
import base64
|
10 |
+
import PIL.Image
|
11 |
|
12 |
vae, tokenizer, text_encoder, unet, scheduler, dtype = model_dict.vae, model_dict.tokenizer, model_dict.text_encoder, model_dict.unet, model_dict.scheduler, model_dict.dtype
|
13 |
|
|
|
42 |
)
|
43 |
|
44 |
# Convert to PIL Image
|
|
|
45 |
image = PIL.Image.fromarray(images[0])
|
46 |
|
47 |
# Save as PNG in memory
|
generation.py
CHANGED
@@ -10,8 +10,7 @@ from shared import model_dict, sam_model_dict, DEFAULT_SO_NEGATIVE_PROMPT, DEFAU
|
|
10 |
import gc
|
11 |
from io import BytesIO
|
12 |
import base64
|
13 |
-
import
|
14 |
-
import codecs
|
15 |
|
16 |
verbose = False
|
17 |
|
@@ -214,12 +213,16 @@ def run(
|
|
214 |
gc.collect()
|
215 |
torch.cuda.empty_cache()
|
216 |
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
#
|
221 |
-
|
222 |
-
|
|
|
|
|
|
|
|
|
223 |
|
224 |
return images[0], so_img_list, img_str
|
225 |
|
|
|
10 |
import gc
|
11 |
from io import BytesIO
|
12 |
import base64
|
13 |
+
import PIL.Image
|
|
|
14 |
|
15 |
verbose = False
|
16 |
|
|
|
213 |
gc.collect()
|
214 |
torch.cuda.empty_cache()
|
215 |
|
216 |
+
# Convert to PIL Image
|
217 |
+
image = PIL.Image.fromarray(images[0])
|
218 |
+
|
219 |
+
# Save as PNG in memory
|
220 |
+
buffer = BytesIO()
|
221 |
+
image.save(buffer, format='PNG')
|
222 |
+
|
223 |
+
# Encode PNG to base64
|
224 |
+
png_bytes = buffer.getvalue()
|
225 |
+
base64_string = base64.b64encode(png_bytes).decode('utf-8')\
|
226 |
|
227 |
return images[0], so_img_list, img_str
|
228 |
|
requirements.txt
CHANGED
@@ -10,3 +10,4 @@ inflect==6.0.4
|
|
10 |
easydict
|
11 |
accelerate==0.18.0
|
12 |
gradio==3.35.2
|
|
|
|
10 |
easydict
|
11 |
accelerate==0.18.0
|
12 |
gradio==3.35.2
|
13 |
+
Pillow==10.0.0
|