Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -28,11 +28,12 @@ torch.backends.cuda.preferred_linalg_library="cusolver"
|
|
| 28 |
|
| 29 |
import cv2
|
| 30 |
import gc
|
| 31 |
-
|
|
|
|
|
|
|
| 32 |
from image_gen_aux import UpscaleWithModel
|
| 33 |
|
| 34 |
import gradio as gr
|
| 35 |
-
import numpy as np
|
| 36 |
import random
|
| 37 |
import yaml
|
| 38 |
from pathlib import Path
|
|
@@ -43,7 +44,7 @@ from huggingface_hub import hf_hub_download
|
|
| 43 |
import shutil
|
| 44 |
|
| 45 |
from diffusers import StableDiffusionXLImg2ImgPipeline, AutoencoderKL
|
| 46 |
-
|
| 47 |
from inference import (
|
| 48 |
create_ltx_video_pipeline,
|
| 49 |
create_latent_upsampler,
|
|
@@ -108,6 +109,27 @@ print(f"Target inference device: {target_inference_device}")
|
|
| 108 |
pipeline_instance.to(target_inference_device)
|
| 109 |
if latent_upsampler_instance: latent_upsampler_instance.to(target_inference_device)
|
| 110 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 111 |
def calculate_new_dimensions(orig_w, orig_h):
|
| 112 |
if orig_w == 0 or orig_h == 0: return int(768), int(768)
|
| 113 |
if orig_w >= orig_h:
|
|
@@ -279,6 +301,7 @@ def generate(prompt, negative_prompt, clips_list, input_image_filepath, input_vi
|
|
| 279 |
for idx, frame in enumerate(video_np):
|
| 280 |
progress(idx / len(video_np), desc="Saving video clip...")
|
| 281 |
video_writer.append_data(frame)
|
|
|
|
| 282 |
updated_clips_list = clips_list + [output_video_path]
|
| 283 |
counter_text = f"Clips created: {len(updated_clips_list)}"
|
| 284 |
return output_video_path, seed_ui, gr.update(visible=True), updated_clips_list, counter_text
|
|
|
|
| 28 |
|
| 29 |
import cv2
|
| 30 |
import gc
|
| 31 |
+
import subprocess
|
| 32 |
+
# --- NEW: Import for SFTP ---
|
| 33 |
+
import paramiko
|
| 34 |
from image_gen_aux import UpscaleWithModel
|
| 35 |
|
| 36 |
import gradio as gr
|
|
|
|
| 37 |
import random
|
| 38 |
import yaml
|
| 39 |
from pathlib import Path
|
|
|
|
| 44 |
import shutil
|
| 45 |
|
| 46 |
from diffusers import StableDiffusionXLImg2ImgPipeline, AutoencoderKL
|
| 47 |
+
import imageio
|
| 48 |
from inference import (
|
| 49 |
create_ltx_video_pipeline,
|
| 50 |
create_latent_upsampler,
|
|
|
|
| 109 |
pipeline_instance.to(target_inference_device)
|
| 110 |
if latent_upsampler_instance: latent_upsampler_instance.to(target_inference_device)
|
| 111 |
|
| 112 |
+
|
| 113 |
+
def upload_to_sftp(local_filepath):
|
| 114 |
+
if not all([FTP_HOST, FTP_USER, FTP_PASS, FTP_DIR]):
|
| 115 |
+
print("SFTP credentials not set. Skipping upload.")
|
| 116 |
+
return
|
| 117 |
+
try:
|
| 118 |
+
transport = paramiko.Transport((FTP_HOST, 22))
|
| 119 |
+
transport.connect(username=FTP_USER, password=FTP_PASS)
|
| 120 |
+
sftp = paramiko.SFTPClient.from_transport(transport)
|
| 121 |
+
remote_filename = os.path.basename(local_filepath)
|
| 122 |
+
remote_filepath = os.path.join(FTP_DIR, remote_filename)
|
| 123 |
+
print(f"Uploading {local_filepath} to {remote_filepath}...")
|
| 124 |
+
sftp.put(local_filepath, remote_filepath)
|
| 125 |
+
print("Upload successful.")
|
| 126 |
+
sftp.close()
|
| 127 |
+
transport.close()
|
| 128 |
+
except Exception as e:
|
| 129 |
+
print(f"SFTP upload failed: {e}")
|
| 130 |
+
gr.Warning(f"SFTP upload failed: {e}")
|
| 131 |
+
|
| 132 |
+
|
| 133 |
def calculate_new_dimensions(orig_w, orig_h):
|
| 134 |
if orig_w == 0 or orig_h == 0: return int(768), int(768)
|
| 135 |
if orig_w >= orig_h:
|
|
|
|
| 301 |
for idx, frame in enumerate(video_np):
|
| 302 |
progress(idx / len(video_np), desc="Saving video clip...")
|
| 303 |
video_writer.append_data(frame)
|
| 304 |
+
upload_to_sftp(output_path)
|
| 305 |
updated_clips_list = clips_list + [output_video_path]
|
| 306 |
counter_text = f"Clips created: {len(updated_clips_list)}"
|
| 307 |
return output_video_path, seed_ui, gr.update(visible=True), updated_clips_list, counter_text
|