euiia commited on
Commit
f5a3825
·
verified ·
1 Parent(s): b664155

Rename deformes4D_engine (37).py to deformes4D_engine.py

Browse files
deformes4D_engine (37).py → deformes4D_engine.py RENAMED
@@ -23,6 +23,8 @@ import gc
23
 
24
  from ltx_manager_helpers import ltx_manager_singleton
25
  from gemini_helpers import gemini_singleton
 
 
26
  from ltx_video.models.autoencoders.vae_encode import vae_encode, vae_decode
27
 
28
  logger = logging.getLogger(__name__)
@@ -128,6 +130,49 @@ class Deformes4DEngine:
128
  logger.error(f"Erro no FFmpeg durante a poda do último frame: {e.stderr}")
129
  return False
130
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
  def _generate_video_from_latents(self, latent_tensor, base_name: str) -> str:
132
  """
133
  Gera um vídeo a partir de latentes, podando o último frame para garantir concatenação limpa.
@@ -295,10 +340,21 @@ class Deformes4DEngine:
295
  dejavu_latent_for_next_loop = None
296
 
297
  # 3.5. Renderização e Armazenamento do Fragmento Final
 
 
 
 
 
 
298
  base_name = f"fragment_{fragment_index}_{int(time.time())}"
299
- video_path = self._generate_video_from_latents(latents_video, base_name)
 
 
 
300
  video_clips_paths.append(video_path)
301
  logger.info(f"--- FRAGMENTO {fragment_index} FINALIZADO E SALVO EM: {video_path} ---")
 
 
302
 
303
  # Bloco de Diagnóstico: Gera um vídeo a partir do tensor do Eco
304
  if eco_latent_for_next_loop is not None:
 
23
 
24
  from ltx_manager_helpers import ltx_manager_singleton
25
  from gemini_helpers import gemini_singleton
26
+ from upscaler_specialist import upscaler_specialist_singleton
27
+ from ltx_video.models.autoencoders.causal_video_autoencoder import CausalVideoAutoencoder
28
  from ltx_video.models.autoencoders.vae_encode import vae_encode, vae_decode
29
 
30
  logger = logging.getLogger(__name__)
 
130
  logger.error(f"Erro no FFmpeg durante a poda do último frame: {e.stderr}")
131
  return False
132
 
133
+ # --- PIPELINE DE PÓS-PRODUÇÃO LATENTE ---
134
+ def _render_and_post_process_latents(self,
135
+ low_res_latents: torch.Tensor,
136
+ base_name: str,
137
+ conditioning_items_for_refine: list,
138
+ motion_prompt_for_refine: str
139
+ ) -> str:
140
+ high_res_latents = upscaler_specialist_singleton.upscale(low_res_latents)
141
+
142
+ _, _, _, refined_h_latent, refined_w_latent = high_res_latents.shape
143
+ video_h = refined_h_latent * self.ltx_manager.workers[0].pipeline.vae_scale_factor
144
+ video_w = refined_w_latent * self.ltx_manager.workers[0].pipeline.vae_scale_factor
145
+ num_latent_frames = high_res_latents.shape[2]
146
+ num_video_frames = num_latent_frames * self.ltx_manager.workers[0].pipeline.video_scale_factor
147
+ if isinstance(self.vae, CausalVideoAutoencoder):
148
+ num_video_frames -= 1
149
+
150
+ refine_kwargs = {
151
+ 'height': video_h, 'width': video_w, 'video_total_frames': num_video_frames, 'video_fps': 24,
152
+ 'current_fragment_index': int(time.time()), 'motion_prompt': motion_prompt_for_refine,
153
+ 'conditioning_items_data': conditioning_items_for_refine,
154
+ 'denoise_strength': 0.4, 'refine_steps': 10
155
+ }
156
+
157
+ final_latents, _ = self.ltx_manager.refine_latents(high_res_latents, **refine_kwargs)
158
+
159
+ untrimmed_video_path = os.path.join(self.workspace_dir, f"{base_name}_untrimmed.mp4")
160
+ trimmed_video_path = os.path.join(self.workspace_dir, f"{base_name}.mp4")
161
+
162
+ pixel_tensor = self.latents_to_pixels(final_latents)
163
+ self.save_video_from_tensor(pixel_tensor, untrimmed_video_path, fps=24)
164
+ del pixel_tensor, final_latents, high_res_latents
165
+ gc.collect()
166
+ torch.cuda.empty_cache()
167
+
168
+ success = self._trim_last_frame_ffmpeg(untrimmed_video_path, trimmed_video_path)
169
+
170
+ if os.path.exists(untrimmed_video_path):
171
+ os.remove(untrimmed_video_path)
172
+
173
+ return trimmed_video_path if success else untrimmed_video_path
174
+
175
+
176
  def _generate_video_from_latents(self, latent_tensor, base_name: str) -> str:
177
  """
178
  Gera um vídeo a partir de latentes, podando o último frame para garantir concatenação limpa.
 
340
  dejavu_latent_for_next_loop = None
341
 
342
  # 3.5. Renderização e Armazenamento do Fragmento Final
343
+ #base_name = f"fragment_{fragment_index}_{int(time.time())}"
344
+ #video_path = self._generate_video_from_latents(latents_video, base_name)
345
+ #video_clips_paths.append(video_path)
346
+ #logger.info(f"--- FRAGMENTO {fragment_index} FINALIZADO E SALVO EM: {video_path} ---")
347
+
348
+ # 3.5. Renderização, Pós-Produção e Armazenamento
349
  base_name = f"fragment_{fragment_index}_{int(time.time())}"
350
+ video_path = self._render_and_post_process_latents(
351
+ low_res_latents=latents_video, base_name=base_name,
352
+ conditioning_items_for_refine=conditioning_items,
353
+ motion_prompt_for_refine=motion_prompt)
354
  video_clips_paths.append(video_path)
355
  logger.info(f"--- FRAGMENTO {fragment_index} FINALIZADO E SALVO EM: {video_path} ---")
356
+
357
+
358
 
359
  # Bloco de Diagnóstico: Gera um vídeo a partir do tensor do Eco
360
  if eco_latent_for_next_loop is not None: