Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -38,24 +38,34 @@ def initialize_models():
|
|
| 38 |
torch.backends.cudnn.allow_tf32 = True
|
| 39 |
|
| 40 |
print("Initializing Trellis pipeline...")
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
pipeline
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 51 |
|
| 52 |
print("Initializing translator...")
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
flux_pipe = None
|
| 60 |
|
| 61 |
print("Models initialized successfully")
|
|
@@ -63,6 +73,7 @@ def initialize_models():
|
|
| 63 |
|
| 64 |
except Exception as e:
|
| 65 |
print(f"Model initialization error: {str(e)}")
|
|
|
|
| 66 |
return False
|
| 67 |
|
| 68 |
def get_flux_pipe():
|
|
@@ -94,10 +105,11 @@ def free_memory():
|
|
| 94 |
# CUDA 메모리 정리
|
| 95 |
if torch.cuda.is_available():
|
| 96 |
torch.cuda.empty_cache()
|
|
|
|
| 97 |
|
| 98 |
# 임시 파일 정리
|
| 99 |
tmp_dirs = ['/tmp/transformers_cache', '/tmp/torch_home',
|
| 100 |
-
'/tmp/huggingface', '/tmp/cache']
|
| 101 |
|
| 102 |
for dir_path in tmp_dirs:
|
| 103 |
if os.path.exists(dir_path):
|
|
@@ -444,6 +456,9 @@ if __name__ == "__main__":
|
|
| 444 |
if torch.cuda.is_available():
|
| 445 |
print(f"Using GPU: {torch.cuda.get_device_name()}")
|
| 446 |
print(f"Available GPU memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.2f} GB")
|
|
|
|
|
|
|
|
|
|
| 447 |
|
| 448 |
# 디렉토리 생성
|
| 449 |
os.makedirs(TMP_DIR, exist_ok=True)
|
|
@@ -459,8 +474,9 @@ if __name__ == "__main__":
|
|
| 459 |
# Gradio 앱 실행
|
| 460 |
demo.queue(max_size=1).launch(
|
| 461 |
share=True,
|
| 462 |
-
max_threads=2,
|
| 463 |
show_error=True,
|
| 464 |
server_port=7860,
|
| 465 |
-
server_name="0.0.0.0"
|
|
|
|
| 466 |
)
|
|
|
|
| 38 |
torch.backends.cudnn.allow_tf32 = True
|
| 39 |
|
| 40 |
print("Initializing Trellis pipeline...")
|
| 41 |
+
try:
|
| 42 |
+
pipeline = TrellisImageTo3DPipeline.from_pretrained(
|
| 43 |
+
"JeffreyXiang/TRELLIS-image-large"
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
if pipeline is None:
|
| 47 |
+
raise ValueError("Pipeline initialization returned None")
|
| 48 |
+
|
| 49 |
+
if torch.cuda.is_available():
|
| 50 |
+
pipeline = pipeline.to("cuda")
|
| 51 |
+
# Half precision으로 변환
|
| 52 |
+
pipeline = pipeline.half()
|
| 53 |
+
|
| 54 |
+
except Exception as e:
|
| 55 |
+
print(f"Error initializing Trellis pipeline: {str(e)}")
|
| 56 |
+
raise
|
| 57 |
|
| 58 |
print("Initializing translator...")
|
| 59 |
+
try:
|
| 60 |
+
translator = translation_pipeline(
|
| 61 |
+
"translation",
|
| 62 |
+
model="Helsinki-NLP/opus-mt-ko-en",
|
| 63 |
+
device=0 if torch.cuda.is_available() else -1
|
| 64 |
+
)
|
| 65 |
+
except Exception as e:
|
| 66 |
+
print(f"Error initializing translator: {str(e)}")
|
| 67 |
+
raise
|
| 68 |
+
|
| 69 |
flux_pipe = None
|
| 70 |
|
| 71 |
print("Models initialized successfully")
|
|
|
|
| 73 |
|
| 74 |
except Exception as e:
|
| 75 |
print(f"Model initialization error: {str(e)}")
|
| 76 |
+
free_memory()
|
| 77 |
return False
|
| 78 |
|
| 79 |
def get_flux_pipe():
|
|
|
|
| 105 |
# CUDA 메모리 정리
|
| 106 |
if torch.cuda.is_available():
|
| 107 |
torch.cuda.empty_cache()
|
| 108 |
+
torch.cuda.synchronize()
|
| 109 |
|
| 110 |
# 임시 파일 정리
|
| 111 |
tmp_dirs = ['/tmp/transformers_cache', '/tmp/torch_home',
|
| 112 |
+
'/tmp/huggingface', '/tmp/cache', TMP_DIR]
|
| 113 |
|
| 114 |
for dir_path in tmp_dirs:
|
| 115 |
if os.path.exists(dir_path):
|
|
|
|
| 456 |
if torch.cuda.is_available():
|
| 457 |
print(f"Using GPU: {torch.cuda.get_device_name()}")
|
| 458 |
print(f"Available GPU memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.2f} GB")
|
| 459 |
+
|
| 460 |
+
# CUDA 메모리 설정
|
| 461 |
+
torch.cuda.set_per_process_memory_fraction(0.8) # GPU 메모리 사용량 제한
|
| 462 |
|
| 463 |
# 디렉토리 생성
|
| 464 |
os.makedirs(TMP_DIR, exist_ok=True)
|
|
|
|
| 474 |
# Gradio 앱 실행
|
| 475 |
demo.queue(max_size=1).launch(
|
| 476 |
share=True,
|
| 477 |
+
max_threads=2,
|
| 478 |
show_error=True,
|
| 479 |
server_port=7860,
|
| 480 |
+
server_name="0.0.0.0",
|
| 481 |
+
enable_queue=True
|
| 482 |
)
|