Yw22 commited on
Commit
bc45f69
1 Parent(s): e933fda
Files changed (2) hide show
  1. app.py +1 -1
  2. pipelines/pipeline_imagecoductor.py +1 -1
app.py CHANGED
@@ -339,7 +339,7 @@ class ImageConductor:
339
  trajs_video = vis_flow_to_video(controlnet_flows, num_frames=self.model_length) # T-1 x H x W x 3
340
  torchvision.io.write_video(f'{output_dir}/control_flows/sample-{id}-train_flow.mp4', trajs_video, fps=8, video_codec='h264', options={'crf': '10'})
341
  controlnet_flows = torch.from_numpy(controlnet_flows)[None][:, :self.model_length, ...]
342
- controlnet_flows = rearrange(controlnet_flows, "b f h w c-> b c f h w").to(device)
343
 
344
  dreambooth_model_path = DREAM_BOOTH.get(personalized, '')
345
  lora_model_path = LORA.get(personalized, '')
 
339
  trajs_video = vis_flow_to_video(controlnet_flows, num_frames=self.model_length) # T-1 x H x W x 3
340
  torchvision.io.write_video(f'{output_dir}/control_flows/sample-{id}-train_flow.mp4', trajs_video, fps=8, video_codec='h264', options={'crf': '10'})
341
  controlnet_flows = torch.from_numpy(controlnet_flows)[None][:, :self.model_length, ...]
342
+ controlnet_flows = rearrange(controlnet_flows, "b f h w c-> b c f h w").float().to(device)
343
 
344
  dreambooth_model_path = DREAM_BOOTH.get(personalized, '')
345
  lora_model_path = LORA.get(personalized, '')
pipelines/pipeline_imagecoductor.py CHANGED
@@ -464,7 +464,7 @@ class ImageConductorPipeline(DiffusionPipeline):
464
  print("t", t.device)
465
 
466
 
467
-
468
 
469
  img_down_block_additional_residuals, img_mid_block_additional_residuals = self.image_controlnet(
470
  controlnet_noisy_latents, t,
 
464
  print("t", t.device)
465
 
466
 
467
+ print("self.image_controlnet", self.image_controlnet.controlnet_mid_block.weight.device)
468
 
469
  img_down_block_additional_residuals, img_mid_block_additional_residuals = self.image_controlnet(
470
  controlnet_noisy_latents, t,