euiia commited on
Commit
02b4ee7
·
verified ·
1 Parent(s): 40b10a0

Update ltx_manager_helpers.py

Browse files
Files changed (1) hide show
  1. ltx_manager_helpers.py +18 -13
ltx_manager_helpers.py CHANGED
@@ -123,13 +123,18 @@ class LtxPoolManager:
123
  "conditioning_items": final_conditioning_items, "is_video": True, "vae_per_channel_normalize": True,
124
  "prompt": kwargs['motion_prompt'], "negative_prompt": "blurry, distorted, static, bad quality",
125
  "guidance_scale": kwargs['guidance_scale'], "stg_scale": kwargs['stg_scale'],
126
- "rescaling_scale": kwargs['rescaling_scale'], "num_inference_steps": kwargs['num_inference_steps']
 
127
  }
128
  if worker_to_use.is_distilled:
129
  pipeline_params["timesteps"] = worker_to_use.config.get("first_pass", {}).get("timesteps")
130
  pipeline_params["num_inference_steps"] = len(pipeline_params["timesteps"]) if pipeline_params["timesteps"] else 20
131
 
132
- result = worker_to_use.generate_video_fragment_internal(**pipeline_params)
 
 
 
 
133
  return result, padding_vals
134
  except Exception as e:
135
  logger.error(f"LTX POOL MANAGER: Erro durante a geração em {worker_to_use.device}: {e}", exc_info=True)
@@ -158,18 +163,18 @@ class LtxPoolManager:
158
 
159
  logger.info("LTX POOL MANAGER: Iniciando passe de refinamento (denoise)...")
160
 
161
- # --- [INÍCIO DA CORREÇÃO] ---
162
- # Para o refinamento, usamos a pipeline de vídeo base diretamente,
163
- # ignorando o invólucro MultiScale que causa o erro de processamento de imagem.
164
- if isinstance(worker_to_use.pipeline, LTXMultiScalePipeline):
165
- result = worker_to_use.pipeline.video_pipeline(**pipeline_params).images
166
- else:
167
- result = worker_to_use.generate_video_fragment_internal(**pipeline_params)
168
- # --- [FIM DA CORREÇÃO] ---
169
-
170
- return result, None
171
  except Exception as e:
172
- logger.error(f"LTX POOL MANAGER: Erro durante o refinamento em {worker_to_use.device}: {e}", exc_info=True)
173
  raise e
174
  finally:
175
  if worker_to_use and worker_to_use.device.type == 'cuda':
 
123
  "conditioning_items": final_conditioning_items, "is_video": True, "vae_per_channel_normalize": True,
124
  "prompt": kwargs['motion_prompt'], "negative_prompt": "blurry, distorted, static, bad quality",
125
  "guidance_scale": kwargs['guidance_scale'], "stg_scale": kwargs['stg_scale'],
126
+ "rescaling_scale": kwargs['rescaling_scale'], "num_inference_steps": kwargs['num_inference_steps'],
127
+ "output_type": "latent"
128
  }
129
  if worker_to_use.is_distilled:
130
  pipeline_params["timesteps"] = worker_to_use.config.get("first_pass", {}).get("timesteps")
131
  pipeline_params["num_inference_steps"] = len(pipeline_params["timesteps"]) if pipeline_params["timesteps"] else 20
132
 
133
+ if isinstance(worker_to_use.pipeline, LTXMultiScalePipeline):
134
+ result = worker_to_use.pipeline.video_pipeline(**pipeline_params).images
135
+ else:
136
+ result = worker_to_use.generate_video_fragment_internal(**pipeline_params)
137
+
138
  return result, padding_vals
139
  except Exception as e:
140
  logger.error(f"LTX POOL MANAGER: Erro durante a geração em {worker_to_use.device}: {e}", exc_info=True)
 
163
 
164
  logger.info("LTX POOL MANAGER: Iniciando passe de refinamento (denoise)...")
165
 
166
+ pipeline_to_call = worker_to_use.pipeline.video_pipeline if isinstance(worker_to_use.pipeline, LTXMultiScalePipeline) else worker_to_use.pipeline
167
+
168
+ try:
169
+ result = pipeline_to_call(**pipeline_params).images
170
+ return result, None
171
+ except torch.cuda.OutOfMemoryError as e:
172
+ logger.error(f"FALHA DE MEMÓRIA DURANTE O REFINAMENTO em {worker_to_use.device}: {e}")
173
+ logger.warning("Limpando VRAM e retornando None para sinalizar a falha.")
174
+ gc.collect(); torch.cuda.empty_cache()
175
+ return None, None
176
  except Exception as e:
177
+ logger.error(f"LTX POOL MANAGER: Erro inesperado durante o refinamento em {worker_to_use.device}: {e}", exc_info=True)
178
  raise e
179
  finally:
180
  if worker_to_use and worker_to_use.device.type == 'cuda':