Aduc-sdr commited on
Commit
21d9e0e
verified
1 Parent(s): e5bca6d

Update managers/ltx_manager.py

Browse files
Files changed (1) hide show
  1. managers/ltx_manager.py +17 -10
managers/ltx_manager.py CHANGED
@@ -7,16 +7,14 @@
7
  # carlex22@gmail.com
8
  # Rua Eduardo Carlos Pereira, 4125, B1 Ap32, Curitiba, PR, Brazil, CEP 8102025
9
  #
10
- # Reposit贸rios e Projetos Relacionados:
11
- # GitHub: https://github.com/carlex22/Aduc-sdr
12
- #
13
  # PENDING PATENT NOTICE: Please see NOTICE.md.
14
  #
15
- # Version: 2.2.2
16
  #
17
- # This file manages the LTX-Video specialist pool. It has been refactored to be
18
- # self-contained by automatically cloning its own dependencies and using a local
19
- # utility module for pipeline creation, fully decoupling it from external scripts.
 
20
 
21
  import torch
22
  import gc
@@ -54,8 +52,7 @@ randn_tensor = None
54
 
55
  class LtxPoolManager:
56
  """
57
- Manages a pool of LtxWorkers for optimized multi-GPU usage.
58
- Handles its own code dependencies by cloning the LTX-Video repository.
59
  """
60
  def __init__(self, device_ids, ltx_config_file_name):
61
  logger.info(f"LTX POOL MANAGER: Creating workers for devices: {device_ids}")
@@ -69,6 +66,14 @@ class LtxPoolManager:
69
  self.current_worker_index = 0
70
  self.lock = threading.Lock()
71
 
 
 
 
 
 
 
 
 
72
  self._apply_ltx_pipeline_patches()
73
 
74
  if all(w.device.type == 'cuda' for w in self.workers):
@@ -79,6 +84,8 @@ class LtxPoolManager:
79
  else:
80
  logger.info("LTX POOL MANAGER: Operating in CPU or mixed mode. GPU pre-warming skipped.")
81
 
 
 
82
  def _setup_dependencies(self):
83
  """Clones the LTX-Video repo if not found and adds it to the system path."""
84
  if not LTX_VIDEO_REPO_DIR.exists():
@@ -181,9 +188,9 @@ class LtxPoolManager:
181
  gc.collect(); torch.cuda.empty_cache()
182
 
183
  def refine_latents(self, latents_to_refine: torch.Tensor, **kwargs) -> (torch.Tensor, tuple):
184
- # This function can be expanded later if needed.
185
  pass
186
 
 
187
  class LtxWorker:
188
  """
189
  Represents a single instance of the LTX-Video pipeline on a specific device.
 
7
  # carlex22@gmail.com
8
  # Rua Eduardo Carlos Pereira, 4125, B1 Ap32, Curitiba, PR, Brazil, CEP 8102025
9
  #
 
 
 
10
  # PENDING PATENT NOTICE: Please see NOTICE.md.
11
  #
12
+ # Version: 2.3.0
13
  #
14
+ # This version adds a public property `prompt_enhancement_pipeline` to the manager.
15
+ # This allows other specialists, specifically the Deformes3DThinker, to access
16
+ # the internal prompt refinement models (captioning and LLM) used by the LTX pipeline,
17
+ # ensuring stylistic and logical consistency.
18
 
19
  import torch
20
  import gc
 
52
 
53
  class LtxPoolManager:
54
  """
55
+ Manages a pool of LtxWorkers and exposes the enhancement pipeline for other specialists.
 
56
  """
57
  def __init__(self, device_ids, ltx_config_file_name):
58
  logger.info(f"LTX POOL MANAGER: Creating workers for devices: {device_ids}")
 
66
  self.current_worker_index = 0
67
  self.lock = threading.Lock()
68
 
69
+ # <--- NOVA PROPRIEDADE PARA O DEFORMES3DTHINKER USAR --->
70
+ # Exp玫e a pipeline do primeiro worker. Assumimos que todas s茫o configuradas
71
+ # da mesma forma e cont锚m os mesmos modelos de enhancement.
72
+ self.prompt_enhancement_pipeline = self.workers[0].pipeline if self.workers else None
73
+ if self.prompt_enhancement_pipeline:
74
+ logger.info("LTX POOL MANAGER: Prompt enhancement pipeline exposed for other specialists.")
75
+ # <--- FIM DA NOVA PROPRIEDADE --->
76
+
77
  self._apply_ltx_pipeline_patches()
78
 
79
  if all(w.device.type == 'cuda' for w in self.workers):
 
84
  else:
85
  logger.info("LTX POOL MANAGER: Operating in CPU or mixed mode. GPU pre-warming skipped.")
86
 
87
+ # ... (O resto da classe LtxPoolManager, como _setup_dependencies, generate_latent_fragment, etc., permanece exatamente o mesmo) ...
88
+
89
  def _setup_dependencies(self):
90
  """Clones the LTX-Video repo if not found and adds it to the system path."""
91
  if not LTX_VIDEO_REPO_DIR.exists():
 
188
  gc.collect(); torch.cuda.empty_cache()
189
 
190
  def refine_latents(self, latents_to_refine: torch.Tensor, **kwargs) -> (torch.Tensor, tuple):
 
191
  pass
192
 
193
+ # ... (O resto do arquivo: LtxWorker, _aduc_prepare_conditioning_patch, Singleton Instantiation, etc. permanece id锚ntico) ...
194
  class LtxWorker:
195
  """
196
  Represents a single instance of the LTX-Video pipeline on a specific device.