openfree commited on
Commit
16cf206
ยท
verified ยท
1 Parent(s): 2723237

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -4,7 +4,7 @@ import json
4
  import logging
5
  import torch
6
  from PIL import Image
7
- import spaces
8
  from diffusers import (
9
  DiffusionPipeline,
10
  AutoencoderTiny,
@@ -73,7 +73,6 @@ pipe_controlnet = FluxControlNetPipeline(
73
  transformer=pipe.transformer, # unet ๋Œ€์‹  transformer ์‚ฌ์šฉ
74
  controlnet=controlnet,
75
  scheduler=pipe.scheduler
76
- # 'safety_checker'์™€ 'feature_extractor'๋Š” ์ œ๊ฑฐ๋จ
77
  ).to(device) # 'torch_dtype' ์ œ๊ฑฐ
78
 
79
  MAX_SEED = 2**32 - 1
@@ -305,7 +304,7 @@ def remove_custom_lora(selected_indices, current_loras):
305
  lora_image_2
306
  )
307
 
308
- @spaces.GPU(duration=75)
309
  def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress):
310
  print("Generating image...")
311
  pipe.to(device)
@@ -325,7 +324,7 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
325
  ):
326
  yield img
327
 
328
- @spaces.GPU(duration=75)
329
  def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, seed):
330
  pipe_i2i.to(device)
331
  generator = torch.Generator(device=device).manual_seed(seed)
@@ -429,7 +428,7 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
429
 
430
  yield final_image, seed, gr.update(value=progress_bar, visible=False)
431
 
432
- run_lora.zerogpu = True
433
 
434
  def get_huggingface_safetensors(link):
435
  split_link = link.split("/")
@@ -516,7 +515,7 @@ def process_input(input_image, upscale_factor, **kwargs):
516
 
517
  return input_image.resize((w, h)), w_original, h_original, was_resized
518
 
519
- @spaces.GPU(duration=75)
520
  def infer_upscale(
521
  seed,
522
  randomize_seed,
@@ -662,7 +661,7 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css, delete_cache=(60, 3600)) as a
662
  # ์ด๋ฒคํŠธ ํ•ธ๋“ค๋Ÿฌ ์„ค์ •
663
  generate_button.click(
664
  fn=run_lora,
665
- inputs=[prompt, input_image, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state],
666
  outputs=[result, seed, progress_bar]
667
  ).then( # Update the history gallery
668
  fn=lambda x, history: update_history(x, history),
@@ -671,7 +670,7 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css, delete_cache=(60, 3600)) as a
671
  )
672
  prompt.submit(
673
  fn=run_lora,
674
- inputs=[prompt, input_image, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state],
675
  outputs=[result, seed, progress_bar]
676
  ).then( # Update the history gallery
677
  fn=lambda x, history: update_history(x, history),
@@ -764,3 +763,4 @@ app.queue()
764
  app.launch()
765
 
766
 
 
 
4
  import logging
5
  import torch
6
  from PIL import Image
7
+ # 'spaces' ๋ชจ๋“ˆ ์ œ๊ฑฐ (ํ•„์š”์‹œ ์˜ฌ๋ฐ”๋ฅธ ๋ชจ๋“ˆ๋กœ ๋Œ€์ฒด)
8
  from diffusers import (
9
  DiffusionPipeline,
10
  AutoencoderTiny,
 
73
  transformer=pipe.transformer, # unet ๋Œ€์‹  transformer ์‚ฌ์šฉ
74
  controlnet=controlnet,
75
  scheduler=pipe.scheduler
 
76
  ).to(device) # 'torch_dtype' ์ œ๊ฑฐ
77
 
78
  MAX_SEED = 2**32 - 1
 
304
  lora_image_2
305
  )
306
 
307
+ @spaces.GPU(duration=75) # ๋ฐ์ฝ”๋ ˆ์ดํ„ฐ ์ œ๊ฑฐ๊ฐ€ ํ•„์š”ํ•  ์ˆ˜ ์žˆ์Œ
308
  def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress):
309
  print("Generating image...")
310
  pipe.to(device)
 
324
  ):
325
  yield img
326
 
327
+ @spaces.GPU(duration=75) # ๋ฐ์ฝ”๋ ˆ์ดํ„ฐ ์ œ๊ฑฐ๊ฐ€ ํ•„์š”ํ•  ์ˆ˜ ์žˆ์Œ
328
  def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, seed):
329
  pipe_i2i.to(device)
330
  generator = torch.Generator(device=device).manual_seed(seed)
 
428
 
429
  yield final_image, seed, gr.update(value=progress_bar, visible=False)
430
 
431
+ run_lora.zerogpu = True # ์ด ์ค„๋„ ๋ฐ์ฝ”๋ ˆ์ดํ„ฐ ๋ฌธ์ œ๋กœ ์ œ๊ฑฐ ํ•„์š”ํ•  ์ˆ˜ ์žˆ์Œ
432
 
433
  def get_huggingface_safetensors(link):
434
  split_link = link.split("/")
 
515
 
516
  return input_image.resize((w, h)), w_original, h_original, was_resized
517
 
518
+ @spaces.GPU(duration=75) # ๋ฐ์ฝ”๋ ˆ์ดํ„ฐ ์ œ๊ฑฐ๊ฐ€ ํ•„์š”ํ•  ์ˆ˜ ์žˆ์Œ
519
  def infer_upscale(
520
  seed,
521
  randomize_seed,
 
661
  # ์ด๋ฒคํŠธ ํ•ธ๋“ค๋Ÿฌ ์„ค์ •
662
  generate_button.click(
663
  fn=run_lora,
664
+ inputs=[prompt, image_input, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state],
665
  outputs=[result, seed, progress_bar]
666
  ).then( # Update the history gallery
667
  fn=lambda x, history: update_history(x, history),
 
670
  )
671
  prompt.submit(
672
  fn=run_lora,
673
+ inputs=[prompt, image_input, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state],
674
  outputs=[result, seed, progress_bar]
675
  ).then( # Update the history gallery
676
  fn=lambda x, history: update_history(x, history),
 
763
  app.launch()
764
 
765
 
766
+