yeq6x commited on
Commit
87c0a2f
·
1 Parent(s): c8129bb
Files changed (2) hide show
  1. app.py +2 -0
  2. scripts/process_utils.py +6 -5
app.py CHANGED
@@ -1,4 +1,5 @@
1
  import gradio as gr
 
2
  import os
3
  import io
4
  from PIL import Image
@@ -12,6 +13,7 @@ from scripts.survey import handle_form_submission, handle_visit_choice, handle_p
12
  # 初期化
13
  initialize(_use_local=False, use_gpu=True, use_dotenv=True)
14
 
 
15
  def process_image(input_image, mode, weight1=None, weight2=None):
16
  tokyo_time = datetime.now(timezone('Asia/Tokyo')).strftime("%Y-%m-%d %H:%M:%S") # 日本時間のタイムスタンプ
17
  print(f"[{tokyo_time}] Processing image with mode={mode}, weight1={weight1}, weight2={weight2}")
 
1
  import gradio as gr
2
+ import spaces
3
  import os
4
  import io
5
  from PIL import Image
 
13
  # 初期化
14
  initialize(_use_local=False, use_gpu=True, use_dotenv=True)
15
 
16
+ @spaces.GPU
17
  def process_image(input_image, mode, weight1=None, weight2=None):
18
  tokyo_time = datetime.now(timezone('Asia/Tokyo')).strftime("%Y-%m-%d %H:%M:%S") # 日本時間のタイムスタンプ
19
  print(f"[{tokyo_time}] Processing image with mode={mode}, weight1={weight1}, weight2={weight2}")
scripts/process_utils.py CHANGED
@@ -39,8 +39,10 @@ def initialize(_use_local=False, use_gpu=False, use_dotenv=False):
39
  if use_dotenv:
40
  load_dotenv()
41
  global model, sotai_gen_pipe, refine_gen_pipe, use_local, device, torch_dtype
42
- device = "cuda" if use_gpu and torch.cuda.is_available() else "cpu"
43
- torch_dtype = torch.float16 if device == "cuda" else torch.float32
 
 
44
  use_local = _use_local
45
 
46
  print(f"\nDevice: {device}, Local model: {_use_local}\n")
@@ -205,7 +207,7 @@ def create_rgba_image(binary_image: np.ndarray, color: list) -> Image.Image:
205
  rgba_image[:, :, 3] = binary_image
206
  return Image.fromarray(rgba_image, 'RGBA')
207
 
208
- @spaces.GPU
209
  def generate_sotai_image(input_image: Image.Image, output_width: int, output_height: int) -> Image.Image:
210
  input_image = ensure_rgb(input_image)
211
  global sotai_gen_pipe
@@ -224,7 +226,6 @@ def generate_sotai_image(input_image: Image.Image, output_width: int, output_hei
224
  # EasyNegativeV2の内容
225
  easy_negative_v2 = "(worst quality, low quality, normal quality:1.4), lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry, artist name, (bad_prompt_version2:0.8)"
226
 
227
- print(f"{sotai_gen_pipe.device}で画像生成中...")
228
  output = sotai_gen_pipe(
229
  prompt,
230
  image=[input_image, input_image],
@@ -251,7 +252,7 @@ def generate_sotai_image(input_image: Image.Image, output_width: int, output_hei
251
  torch.cuda.empty_cache()
252
  gc.collect()
253
 
254
- @spaces.GPU
255
  def generate_refined_image(prompt: str, original_image: Image.Image, output_width: int, output_height: int, weight1: float, weight2: float) -> Image.Image:
256
  original_image = ensure_rgb(original_image)
257
  global refine_gen_pipe
 
39
  if use_dotenv:
40
  load_dotenv()
41
  global model, sotai_gen_pipe, refine_gen_pipe, use_local, device, torch_dtype
42
+ # device = "cuda" if use_gpu and torch.cuda.is_available() else "cpu"
43
+ # torch_dtype = torch.float16 if device == "cuda" else torch.float32
44
+ device = torch.device('cuda')
45
+ torch_dtype = torch.float16
46
  use_local = _use_local
47
 
48
  print(f"\nDevice: {device}, Local model: {_use_local}\n")
 
207
  rgba_image[:, :, 3] = binary_image
208
  return Image.fromarray(rgba_image, 'RGBA')
209
 
210
+ # @spaces.GPU
211
  def generate_sotai_image(input_image: Image.Image, output_width: int, output_height: int) -> Image.Image:
212
  input_image = ensure_rgb(input_image)
213
  global sotai_gen_pipe
 
226
  # EasyNegativeV2の内容
227
  easy_negative_v2 = "(worst quality, low quality, normal quality:1.4), lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry, artist name, (bad_prompt_version2:0.8)"
228
 
 
229
  output = sotai_gen_pipe(
230
  prompt,
231
  image=[input_image, input_image],
 
252
  torch.cuda.empty_cache()
253
  gc.collect()
254
 
255
+ # @spaces.GPU
256
  def generate_refined_image(prompt: str, original_image: Image.Image, output_width: int, output_height: int, weight1: float, weight2: float) -> Image.Image:
257
  original_image = ensure_rgb(original_image)
258
  global refine_gen_pipe