fffiloni commited on
Commit
238d2f7
1 Parent(s): 35f5939

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -6
app.py CHANGED
@@ -35,6 +35,9 @@ pipe.to("cuda")
35
  #pipe.enable_model_cpu_offload()
36
 
37
  def infer(use_custom_model, model_name, custom_lora_weight, image_in, prompt, negative_prompt, preprocessor, controlnet_conditioning_scale, guidance_scale, steps, seed, progress=gr.Progress(track_tqdm=True)):
 
 
 
38
 
39
  if preprocessor == "canny":
40
 
@@ -52,11 +55,6 @@ def infer(use_custom_model, model_name, custom_lora_weight, image_in, prompt, ne
52
  # This is where you load your trained weights
53
  pipe.load_lora_weights(custom_model, use_auth_token=True)
54
 
55
- prompt = prompt
56
- negative_prompt = negative_prompt
57
- generator = torch.Generator(device="cuda").manual_seed(seed)
58
-
59
- if use_custom_model:
60
  lora_scale=custom_lora_weight
61
 
62
  images = pipe(
@@ -123,4 +121,4 @@ with gr.Blocks(css=css) as demo:
123
  outputs = [result]
124
  )
125
 
126
- demo.queue().launch()
 
35
  #pipe.enable_model_cpu_offload()
36
 
37
  def infer(use_custom_model, model_name, custom_lora_weight, image_in, prompt, negative_prompt, preprocessor, controlnet_conditioning_scale, guidance_scale, steps, seed, progress=gr.Progress(track_tqdm=True)):
38
+ prompt = prompt
39
+ negative_prompt = negative_prompt
40
+ generator = torch.Generator(device="cuda").manual_seed(seed)
41
 
42
  if preprocessor == "canny":
43
 
 
55
  # This is where you load your trained weights
56
  pipe.load_lora_weights(custom_model, use_auth_token=True)
57
 
 
 
 
 
 
58
  lora_scale=custom_lora_weight
59
 
60
  images = pipe(
 
121
  outputs = [result]
122
  )
123
 
124
+ demo.queue(max_size=12).launch()