el-el-san commited on
Commit
451bce5
·
verified ·
1 Parent(s): 2a7d14a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -32
app.py CHANGED
@@ -7,6 +7,9 @@ import random
7
  from diffusers import ControlNetModel, StableDiffusionXLPipeline, AutoencoderKL
8
  import cv2
9
  import torch
 
 
 
10
 
11
  from diffusers import (
12
  DDIMScheduler,
@@ -20,21 +23,27 @@ from diffusers import (
20
  UniPCMultistepScheduler,
21
  )
22
 
 
 
 
 
23
 
24
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
 
 
 
 
 
 
 
 
 
 
25
 
26
- #vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
27
 
28
- #pipe = StableDiffusionXLPipeline.from_pretrained(
29
- # #"yodayo-ai/clandestine-xl-1.0",
30
- # torch_dtype=torch.float16,
31
- # use_safetensors=True,
32
- # custom_pipeline="lpw_stable_diffusion_xl",
33
- # add_watermarker=False #,
34
- # #variant="fp16"
35
- #)
36
  pipe = StableDiffusionXLPipeline.from_single_file(
37
- #"https://huggingface.co/Laxhar/noob_sdxl_beta/noob_hercules4/fp16/checkpoint-e0_s10000.safetensors/checkpoint-e0_s10000.safetensors",
38
  "https://huggingface.co/bluepen5805/illustrious_pencil-XL/illustrious_pencil-XL-v1.2.1.safetensors",
39
  use_safetensors=True,
40
  torch_dtype=torch.float16,
@@ -45,9 +54,10 @@ pipe.to(device)
45
  MAX_SEED = np.iinfo(np.int32).max
46
  MAX_IMAGE_SIZE = 1216
47
 
48
-
49
  @spaces.GPU
50
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, sampler_name):
 
 
51
 
52
  # サンプラーの設定
53
  if sampler_name == "DDIM":
@@ -71,7 +81,6 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
71
  else:
72
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
73
 
74
-
75
  if randomize_seed:
76
  seed = random.randint(0, MAX_SEED)
77
 
@@ -87,11 +96,16 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
87
  generator=generator
88
  ).images[0]
89
 
90
- # PNGに変換する
91
- output_image = output_image.convert("RGBA")
92
-
93
- return output_image
94
-
 
 
 
 
 
95
 
96
  css = """
97
  #col-container {
@@ -101,14 +115,12 @@ css = """
101
  """
102
 
103
  with gr.Blocks(css=css) as demo:
104
-
105
  with gr.Column(elem_id="col-container"):
106
  gr.Markdown("""
107
  Text-to-Image Demo
108
  using [illustrious_pencil-XL](https://huggingface.co/bluepen5805/illustrious_pencil-XL)
109
  """)
110
- #yodayo-ai/clandestine-xl-1.0 
111
- #yodayo-ai/holodayo-xl-2.1
112
  with gr.Row():
113
  prompt = gr.Text(
114
  label="Prompt",
@@ -117,20 +129,16 @@ with gr.Blocks(css=css) as demo:
117
  placeholder="Enter your prompt",
118
  container=False,
119
  )
120
-
121
  run_button = gr.Button("Run", scale=0)
122
 
123
  result = gr.Image(
124
  label="Result",
125
  show_label=False,
126
- type="pil",
127
- elem_id="output_image",
128
- show_download_button=True#,
129
- #download_filename="output.png"
130
  )
131
 
132
  with gr.Accordion("Advanced Settings", open=False):
133
-
134
  negative_prompt = gr.Text(
135
  label="Negative prompt",
136
  max_lines=1,
@@ -160,7 +168,7 @@ with gr.Blocks(css=css) as demo:
160
  minimum=256,
161
  maximum=MAX_IMAGE_SIZE,
162
  step=32,
163
- value=1024,#832,
164
  )
165
 
166
  height = gr.Slider(
@@ -168,7 +176,7 @@ with gr.Blocks(css=css) as demo:
168
  minimum=256,
169
  maximum=MAX_IMAGE_SIZE,
170
  step=32,
171
- value=1024,#1216,
172
  )
173
 
174
  with gr.Row():
@@ -188,12 +196,13 @@ with gr.Blocks(css=css) as demo:
188
  value=28,
189
  )
190
 
191
-
192
-
193
- run_button.click(#lambda x: None, inputs=None, outputs=result).then(
194
  fn=infer,
195
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, sampler_name],
196
  outputs=[result]
197
  )
198
 
 
 
 
199
  demo.queue().launch()
 
7
  from diffusers import ControlNetModel, StableDiffusionXLPipeline, AutoencoderKL
8
  import cv2
9
  import torch
10
+ import os
11
+ import time
12
+ import glob
13
 
14
  from diffusers import (
15
  DDIMScheduler,
 
23
  UniPCMultistepScheduler,
24
  )
25
 
26
+ # 一時ファイルの管理設定
27
+ TEMP_DIR = "temp_images"
28
+ FILE_RETENTION_PERIOD = 3600 # 1時間
29
+ os.makedirs(TEMP_DIR, exist_ok=True)
30
 
31
+ def cleanup_old_files():
32
+ """古い一時ファイルを削除する"""
33
+ current_time = time.time()
34
+ pattern = os.path.join(TEMP_DIR, "output_*.png")
35
+
36
+ for file_path in glob.glob(pattern):
37
+ try:
38
+ file_modified_time = os.path.getmtime(file_path)
39
+ if current_time - file_modified_time > FILE_RETENTION_PERIOD:
40
+ os.remove(file_path)
41
+ except Exception as e:
42
+ print(f"Error while cleaning up file {file_path}: {e}")
43
 
44
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
45
 
 
 
 
 
 
 
 
 
46
  pipe = StableDiffusionXLPipeline.from_single_file(
 
47
  "https://huggingface.co/bluepen5805/illustrious_pencil-XL/illustrious_pencil-XL-v1.2.1.safetensors",
48
  use_safetensors=True,
49
  torch_dtype=torch.float16,
 
54
  MAX_SEED = np.iinfo(np.int32).max
55
  MAX_IMAGE_SIZE = 1216
56
 
 
57
  @spaces.GPU
58
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, sampler_name):
59
+ # 古い一時ファイルの削除
60
+ cleanup_old_files()
61
 
62
  # サンプラーの設定
63
  if sampler_name == "DDIM":
 
81
  else:
82
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
83
 
 
84
  if randomize_seed:
85
  seed = random.randint(0, MAX_SEED)
86
 
 
96
  generator=generator
97
  ).images[0]
98
 
99
+ # RGBモードで保存
100
+ if output_image.mode != 'RGB':
101
+ output_image = output_image.convert('RGB')
102
+
103
+ # 一時ファイルとして保存
104
+ timestamp = int(time.time())
105
+ temp_filename = os.path.join(TEMP_DIR, f"output_{timestamp}.png")
106
+ output_image.save(temp_filename)
107
+
108
+ return temp_filename
109
 
110
  css = """
111
  #col-container {
 
115
  """
116
 
117
  with gr.Blocks(css=css) as demo:
 
118
  with gr.Column(elem_id="col-container"):
119
  gr.Markdown("""
120
  Text-to-Image Demo
121
  using [illustrious_pencil-XL](https://huggingface.co/bluepen5805/illustrious_pencil-XL)
122
  """)
123
+
 
124
  with gr.Row():
125
  prompt = gr.Text(
126
  label="Prompt",
 
129
  placeholder="Enter your prompt",
130
  container=False,
131
  )
 
132
  run_button = gr.Button("Run", scale=0)
133
 
134
  result = gr.Image(
135
  label="Result",
136
  show_label=False,
137
+ type="filepath", # filepathに変更
138
+ elem_id="output_image"
 
 
139
  )
140
 
141
  with gr.Accordion("Advanced Settings", open=False):
 
142
  negative_prompt = gr.Text(
143
  label="Negative prompt",
144
  max_lines=1,
 
168
  minimum=256,
169
  maximum=MAX_IMAGE_SIZE,
170
  step=32,
171
+ value=1024,
172
  )
173
 
174
  height = gr.Slider(
 
176
  minimum=256,
177
  maximum=MAX_IMAGE_SIZE,
178
  step=32,
179
+ value=1024,
180
  )
181
 
182
  with gr.Row():
 
196
  value=28,
197
  )
198
 
199
+ run_button.click(
 
 
200
  fn=infer,
201
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, sampler_name],
202
  outputs=[result]
203
  )
204
 
205
+ # 起動時に古いファイルを削除
206
+ cleanup_old_files()
207
+
208
  demo.queue().launch()