HighCWu commited on
Commit
5fe3b59
1 Parent(s): 6cd2b77

fix button label kwarg

Browse files
Files changed (2) hide show
  1. app_openpose.py +1 -1
  2. model.py +16 -13
app_openpose.py CHANGED
@@ -18,7 +18,7 @@ def create_demo(process):
18
  with gr.Column():
19
  image = gr.Image()
20
  prompt = gr.Textbox(label="Prompt")
21
- run_button = gr.Button(label="Run")
22
  with gr.Accordion("Advanced options", open=False):
23
  preprocessor_name = gr.Radio(
24
  label="Preprocessor", choices=["Openpose", "None"], type="value", value="Openpose"
 
18
  with gr.Column():
19
  image = gr.Image()
20
  prompt = gr.Textbox(label="Prompt")
21
+ run_button = gr.Button("Run")
22
  with gr.Accordion("Advanced options", open=False):
23
  preprocessor_name = gr.Radio(
24
  label="Preprocessor", choices=["Openpose", "None"], type="value", value="Openpose"
model.py CHANGED
@@ -47,11 +47,11 @@ class Model:
47
  unet.set_adapter(task_name)
48
  return self.pipe
49
  unet: UNet2DConditionModelEx = UNet2DConditionModelEx.from_pretrained(
50
- base_model_id, subfolder="unet", torch_dtype=torch.float16
51
  )
52
  unet.add_extra_conditions(["Placeholder"])
53
  pipe: StableDiffusionControlLoraV3Pipeline = StableDiffusionControlLoraV3Pipeline.from_pretrained(
54
- base_model_id, safety_checker=None, unet=unet, torch_dtype=torch.float16
55
  )
56
  for _task_name, subfolder in CONTROL_LORA_V3_MODEL_IDS.items():
57
  pipe.load_lora_weights("HighCWu/control-lora-v3", adapter_name=_task_name, subfolder=subfolder)
@@ -92,7 +92,6 @@ class Model:
92
  prompt = f"{prompt}, {additional_prompt}"
93
  return prompt
94
 
95
- # @torch.autocast("cuda")
96
  def run_pipe(
97
  self,
98
  prompt: str,
@@ -103,16 +102,20 @@ class Model:
103
  guidance_scale: float,
104
  seed: int,
105
  ) -> list[PIL.Image.Image]:
106
- generator = torch.Generator().manual_seed(seed)
107
- return self.pipe(
108
- prompt=prompt,
109
- negative_prompt=negative_prompt,
110
- guidance_scale=guidance_scale,
111
- num_images_per_prompt=num_images,
112
- num_inference_steps=num_steps,
113
- generator=generator,
114
- image=control_image,
115
- ).images
 
 
 
 
116
 
117
  @torch.inference_mode()
118
  def process_canny(
 
47
  unet.set_adapter(task_name)
48
  return self.pipe
49
  unet: UNet2DConditionModelEx = UNet2DConditionModelEx.from_pretrained(
50
+ base_model_id, subfolder="unet", torch_dtype=torch.float16 if self.device.type == "cuda" else torch.float32
51
  )
52
  unet.add_extra_conditions(["Placeholder"])
53
  pipe: StableDiffusionControlLoraV3Pipeline = StableDiffusionControlLoraV3Pipeline.from_pretrained(
54
+ base_model_id, safety_checker=None, unet=unet, torch_dtype=torch.float16 if self.device.type == "cuda" else torch.float32
55
  )
56
  for _task_name, subfolder in CONTROL_LORA_V3_MODEL_IDS.items():
57
  pipe.load_lora_weights("HighCWu/control-lora-v3", adapter_name=_task_name, subfolder=subfolder)
 
92
  prompt = f"{prompt}, {additional_prompt}"
93
  return prompt
94
 
 
95
  def run_pipe(
96
  self,
97
  prompt: str,
 
102
  guidance_scale: float,
103
  seed: int,
104
  ) -> list[PIL.Image.Image]:
105
+ def run():
106
+ generator = torch.Generator().manual_seed(seed)
107
+ return self.pipe(
108
+ prompt=prompt,
109
+ negative_prompt=negative_prompt,
110
+ guidance_scale=guidance_scale,
111
+ num_images_per_prompt=num_images,
112
+ num_inference_steps=num_steps,
113
+ generator=generator,
114
+ image=control_image,
115
+ ).images
116
+ if self.device.type == "cuda":
117
+ run = torch.autocast("cuda")(run)
118
+ return run()
119
 
120
  @torch.inference_mode()
121
  def process_canny(