dagloop5 commited on
Commit
b42e56f
·
verified ·
1 Parent(s): 2523dee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -15
app.py CHANGED
@@ -31,21 +31,6 @@ subprocess.run(
31
  sys.path.insert(0, os.path.join(LTX_REPO_DIR, "packages", "ltx-pipelines", "src"))
32
  sys.path.insert(0, os.path.join(LTX_REPO_DIR, "packages", "ltx-core", "src"))
33
 
34
- # Patch LTX tokenizer max_length before imports load it
35
- import pathlib
36
- _tokenizer_file = pathlib.Path(LTX_REPO_DIR) / "packages/ltx-core/src/ltx_core/text_encoders/gemma/tokenizer.py"
37
- if _tokenizer_file.exists():
38
- _src = _tokenizer_file.read_text()
39
- _patched = _src.replace(
40
- "def __init__(self, tokenizer_path: str, max_length: int = 1024):",
41
- "def __init__(self, tokenizer_path: str, max_length: int = 4096):",
42
- )
43
- if _patched != _src:
44
- _tokenizer_file.write_text(_patched)
45
- print("[Patch] Tokenizer max_length patched: 1024 → 4096")
46
- else:
47
- print("[Patch] Tokenizer max_length already patched or pattern not found")
48
-
49
  _I2V_SYSTEM_PROMPT = (
50
  "You are a Creative Assistant writing concise, action-focused image-to-video prompts."
51
  " Given an image (first frame) and user Raw Input Prompt, generate a prompt to guide"
@@ -462,6 +447,24 @@ if os.path.exists(weight_link):
462
  os.symlink(MERGED_WEIGHTS, weight_link)
463
  print(f"[Gemma] Root ready: {gemma_root}")
464
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
465
  # ---- Insert block (LoRA downloads) between lines 268 and 269 ----
466
  # LoRA repo + download the requested LoRA adapters
467
  LORA_REPO = "dagloop5/LoRA"
 
31
  sys.path.insert(0, os.path.join(LTX_REPO_DIR, "packages", "ltx-pipelines", "src"))
32
  sys.path.insert(0, os.path.join(LTX_REPO_DIR, "packages", "ltx-core", "src"))
33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  _I2V_SYSTEM_PROMPT = (
35
  "You are a Creative Assistant writing concise, action-focused image-to-video prompts."
36
  " Given an image (first frame) and user Raw Input Prompt, generate a prompt to guide"
 
447
  os.symlink(MERGED_WEIGHTS, weight_link)
448
  print(f"[Gemma] Root ready: {gemma_root}")
449
 
450
+ # ↓ INSERT BLOCK 2 HERE ↓
451
+ import json as _json
452
+
453
+ _tok_config_path = os.path.join(gemma_root, "tokenizer_config.json")
454
+ if os.path.islink(_tok_config_path) or os.path.exists(_tok_config_path):
455
+ with open(_tok_config_path, "r") as _f:
456
+ _tok_config = _json.load(_f)
457
+ _old = _tok_config.get("model_max_length", "unset")
458
+ _tok_config["model_max_length"] = 4096
459
+ if os.path.islink(_tok_config_path):
460
+ os.remove(_tok_config_path)
461
+ with open(_tok_config_path, "w") as _f:
462
+ _json.dump(_tok_config, _f, indent=2)
463
+ print(f"[Patch] tokenizer_config.json model_max_length: {_old} → 4096")
464
+ else:
465
+ print("[Patch] tokenizer_config.json not found in gemma_root")
466
+ # ↑ END BLOCK 2 ↑
467
+
468
  # ---- Insert block (LoRA downloads) between lines 268 and 269 ----
469
  # LoRA repo + download the requested LoRA adapters
470
  LORA_REPO = "dagloop5/LoRA"