euiia commited on
Commit
f5600f1
·
verified ·
1 Parent(s): 83bfe8d

Update deformes4D_engine.py

Browse files
Files changed (1) hide show
  1. deformes4D_engine.py +5 -5
deformes4D_engine.py CHANGED
@@ -144,10 +144,10 @@ class Deformes4DEngine:
144
  logger.warning(f"A poda ({trim_percent}%) era muito grande. Ajustada para {trim_chunks} chunks para deixar 1 chunk de vídeo.")
145
 
146
  # 3. Definir fatias e alvos com base nos cálculos
147
- VIDEO_CHUNK_COUNT = total_chunks_gerados - trim_chunks -1
148
 
149
- HANDLER_CHUNK_INDICES = slice(total_chunks_gerados - 1, total_chunks_gerados)
150
- ECO_CHUNK_INDICES = slice(VIDEO_CHUNK_COUNT, VIDEO_CHUNK_COUNT + 2)
151
 
152
  HANDLER_FRAME_TARGET = ((total_chunks_gerados-1) - VIDEO_CHUNK_COUNT) * 8
153
  FRAMES_TO_GENERATE = (((total_chunks_gerados)-1) * 8) +1
@@ -205,7 +205,7 @@ class Deformes4DEngine:
205
  conditioning_items.append(LatentConditioningItem(handler_latent_for_next_loop, HANDLER_FRAME_TARGET, handler_strength))
206
 
207
  img_dest = self._preprocess_image_for_latent_conversion(Image.open(destination_keyframe_path).convert("RGB"), target_resolution_tuple)
208
- conditioning_items.append(LatentConditioningItem(self.pil_to_latent(img_dest), DESTINATION_FRAME_TARGET-1, destination_convergence_strength))
209
 
210
  current_ltx_params = {**base_ltx_params, "motion_prompt": motion_prompt}
211
  new_full_latents = self._generate_latent_tensor_internal(conditioning_items, current_ltx_params, target_resolution_tuple, FRAMES_TO_GENERATE)
@@ -215,7 +215,7 @@ class Deformes4DEngine:
215
  handler_latent_for_next_loop = new_full_latents[:, :, HANDLER_CHUNK_INDICES, :, :].clone()
216
  logger.info(f" [GUIAS] Guias para a próxima iteração extraídas. Eco shape: {eco_latent_for_next_loop.shape}, Handler shape: {handler_latent_for_next_loop.shape}.")
217
 
218
- latents_for_video = new_full_latents[:, :, :VIDEO_CHUNK_COUNT, :, :]
219
  logger.info(f" [2. EDIÇÃO] Tensor final para vídeo extraído com {latents_for_video.shape[2]} chunks.")
220
 
221
  base_name = f"fragment_{i}_{int(time.time())}"
 
144
  logger.warning(f"A poda ({trim_percent}%) era muito grande. Ajustada para {trim_chunks} chunks para deixar 1 chunk de vídeo.")
145
 
146
  # 3. Definir fatias e alvos com base nos cálculos
147
+ VIDEO_CHUNK_COUNT = total_chunks_gerados - trim_chunks
148
 
149
+ HANDLER_CHUNK_INDICES = slice(total_chunks_gerados - 3, total_chunks_gerados-1)
150
+ ECO_CHUNK_INDICES = slice(VIDEO_CHUNK_COUNT-1, VIDEO_CHUNK_COUNT + 1)
151
 
152
  HANDLER_FRAME_TARGET = ((total_chunks_gerados-1) - VIDEO_CHUNK_COUNT) * 8
153
  FRAMES_TO_GENERATE = (((total_chunks_gerados)-1) * 8) +1
 
205
  conditioning_items.append(LatentConditioningItem(handler_latent_for_next_loop, HANDLER_FRAME_TARGET, handler_strength))
206
 
207
  img_dest = self._preprocess_image_for_latent_conversion(Image.open(destination_keyframe_path).convert("RGB"), target_resolution_tuple)
208
+ conditioning_items.append(LatentConditioningItem(self.pil_to_latent(img_dest), DESTINATION_FRAME_TARGET, destination_convergence_strength))
209
 
210
  current_ltx_params = {**base_ltx_params, "motion_prompt": motion_prompt}
211
  new_full_latents = self._generate_latent_tensor_internal(conditioning_items, current_ltx_params, target_resolution_tuple, FRAMES_TO_GENERATE)
 
215
  handler_latent_for_next_loop = new_full_latents[:, :, HANDLER_CHUNK_INDICES, :, :].clone()
216
  logger.info(f" [GUIAS] Guias para a próxima iteração extraídas. Eco shape: {eco_latent_for_next_loop.shape}, Handler shape: {handler_latent_for_next_loop.shape}.")
217
 
218
+ latents_for_video = new_full_latents[:, :, :VIDEO_CHUNK_COUNT-1, :, :]
219
  logger.info(f" [2. EDIÇÃO] Tensor final para vídeo extraído com {latents_for_video.shape[2]} chunks.")
220
 
221
  base_name = f"fragment_{i}_{int(time.time())}"