Update api/ltx_server_refactored.py
Browse files- api/ltx_server_refactored.py +18 -5
api/ltx_server_refactored.py
CHANGED
|
@@ -9,6 +9,8 @@ import json
|
|
| 9 |
import numpy as np
|
| 10 |
import random
|
| 11 |
import os
|
|
|
|
|
|
|
| 12 |
import shlex
|
| 13 |
import yaml
|
| 14 |
from typing import List, Dict
|
|
@@ -209,6 +211,13 @@ class VideoService:
|
|
| 209 |
print(f"[DEBUG] Vídeo salvo em: {final_path}")
|
| 210 |
return final_path
|
| 211 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 212 |
# ==============================================================================
|
| 213 |
# --- FUNÇÕES MODULARES COM A LÓGICA DE CHUNKING SIMPLIFICADA ---
|
| 214 |
# ==============================================================================
|
|
@@ -338,7 +347,7 @@ class VideoService:
|
|
| 338 |
log_tensor_info(latents_bruto, f"Latente Bruto Gerado para: '{prompt[:40]}...'")
|
| 339 |
|
| 340 |
print("-" * 20 + " FIM: _generate_single_chunk_low " + "-"*20)
|
| 341 |
-
return
|
| 342 |
except Exception as e:
|
| 343 |
print("-" * 20 + f" ERRO: _generate_single_chunk_low {e} " + "-"*20)
|
| 344 |
finally:
|
|
@@ -427,8 +436,14 @@ class VideoService:
|
|
| 427 |
initial_latent_condition=condition_item_latent_overlap, image_conditions=current_image_conditions,
|
| 428 |
ltx_configs_override=ltx_configs_override
|
| 429 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 430 |
|
| 431 |
-
latentes_bruto = torch.load(
|
| 432 |
|
| 433 |
#poda inicio overlap
|
| 434 |
if i > 0 and poda_latents_num > 0:
|
|
@@ -451,9 +466,7 @@ class VideoService:
|
|
| 451 |
print("\n--- Finalizando Narrativa: Concatenando chunks ---")
|
| 452 |
|
| 453 |
# Carrega cada tensor do disco
|
| 454 |
-
lista_tensores = [
|
| 455 |
-
|
| 456 |
-
# Concatena ao longo da dimensão de frames (dim=2)
|
| 457 |
final_latents = torch.cat(lista_tensores, dim=2).to(self.device)
|
| 458 |
log_tensor_info(final_latents, "Tensor de Latentes Final Concatenado")
|
| 459 |
|
|
|
|
| 9 |
import numpy as np
|
| 10 |
import random
|
| 11 |
import os
|
| 12 |
+
import io
|
| 13 |
+
|
| 14 |
import shlex
|
| 15 |
import yaml
|
| 16 |
from typing import List, Dict
|
|
|
|
| 211 |
print(f"[DEBUG] Vídeo salvo em: {final_path}")
|
| 212 |
return final_path
|
| 213 |
|
| 214 |
+
def load_tensor(caminho):
|
| 215 |
+
if isinstance(caminho, (bytes, bytearray)):
|
| 216 |
+
return torch.load(io.BytesIO(caminho))
|
| 217 |
+
return torch.load(caminho)
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
|
| 221 |
# ==============================================================================
|
| 222 |
# --- FUNÇÕES MODULARES COM A LÓGICA DE CHUNKING SIMPLIFICADA ---
|
| 223 |
# ==============================================================================
|
|
|
|
| 347 |
log_tensor_info(latents_bruto, f"Latente Bruto Gerado para: '{prompt[:40]}...'")
|
| 348 |
|
| 349 |
print("-" * 20 + " FIM: _generate_single_chunk_low " + "-"*20)
|
| 350 |
+
return latents_cpu_bruto
|
| 351 |
except Exception as e:
|
| 352 |
print("-" * 20 + f" ERRO: _generate_single_chunk_low {e} " + "-"*20)
|
| 353 |
finally:
|
|
|
|
| 436 |
initial_latent_condition=condition_item_latent_overlap, image_conditions=current_image_conditions,
|
| 437 |
ltx_configs_override=ltx_configs_override
|
| 438 |
)
|
| 439 |
+
|
| 440 |
+
|
| 441 |
+
|
| 442 |
+
#latent_path_bufer = load_tensor(latent_path)
|
| 443 |
+
#final_latents = torch.cat(lista_tensores, dim=2).to(self.device)
|
| 444 |
+
|
| 445 |
|
| 446 |
+
latentes_bruto = latent_path #torch.load(latent_path_bufer).to("cpu")
|
| 447 |
|
| 448 |
#poda inicio overlap
|
| 449 |
if i > 0 and poda_latents_num > 0:
|
|
|
|
| 466 |
print("\n--- Finalizando Narrativa: Concatenando chunks ---")
|
| 467 |
|
| 468 |
# Carrega cada tensor do disco
|
| 469 |
+
lista_tensores = [load_tensor(c) for c in lista_patch_latentes_chunk]
|
|
|
|
|
|
|
| 470 |
final_latents = torch.cat(lista_tensores, dim=2).to(self.device)
|
| 471 |
log_tensor_info(final_latents, "Tensor de Latentes Final Concatenado")
|
| 472 |
|