aiqtech commited on
Commit
7a9cd45
ยท
verified ยท
1 Parent(s): 57bc130

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -33
app.py CHANGED
@@ -40,9 +40,6 @@ def initialize_models(device):
40
  g.trellis_pipeline = TrellisImageTo3DPipeline.from_pretrained(
41
  "JeffreyXiang/TRELLIS-image-large"
42
  )
43
- if torch.cuda.is_available():
44
- print("Moving trellis_pipeline to CUDA")
45
- g.trellis_pipeline = g.trellis_pipeline.to("cuda")
46
 
47
  # ์ด๋ฏธ์ง€ ์ƒ์„ฑ ํŒŒ์ดํ”„๋ผ์ธ
48
  print("Loading flux_pipe...")
@@ -67,7 +64,7 @@ def initialize_models(device):
67
  g.translator = transformers_pipeline(
68
  "translation",
69
  model="Helsinki-NLP/opus-mt-ko-en",
70
- device=device if device != "cuda" else 0
71
  )
72
  print("Model initialization completed successfully")
73
 
@@ -79,11 +76,12 @@ def initialize_models(device):
79
  torch.cuda.empty_cache()
80
  torch.backends.cuda.matmul.allow_tf32 = True
81
  torch.backends.cudnn.benchmark = True
82
-
83
  # ํ™˜๊ฒฝ ๋ณ€์ˆ˜ ์„ค์ •
84
  os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:512"
85
  os.environ['SPCONV_ALGO'] = 'native'
86
  os.environ['SPARSE_BACKEND'] = 'native'
 
 
87
 
88
  # Hugging Face ํ† ํฐ ์„ค์ •
89
  HF_TOKEN = os.getenv("HF_TOKEN")
@@ -385,24 +383,6 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
385
  trial_id = gr.Textbox(visible=False)
386
  output_buf = gr.State()
387
 
388
- # Examples ๊ฐค๋Ÿฌ๋ฆฌ๋ฅผ ๋งจ ์•„๋ž˜๋กœ ์ด๋™
389
- if example_images:
390
- gr.Markdown("""### Example Images""")
391
- with gr.Row():
392
- gallery = gr.Gallery(
393
- value=example_images,
394
- label="Click an image to use it",
395
- show_label=True,
396
- elem_id="gallery",
397
- columns=12, # ํ•œ ์ค„์— 12๊ฐœ
398
- rows=2, # 2์ค„
399
- height=300, # ๋†’์ด ์กฐ์ •
400
- allow_preview=True,
401
- object_fit="contain" # ์ด๋ฏธ์ง€ ๋น„์œจ ์œ ์ง€
402
- )
403
-
404
-
405
-
406
  def load_example(evt: gr.SelectData):
407
  selected_image = Image.open(example_images[evt.index])
408
  trial_id_val, processed_image = preprocess_image(selected_image)
@@ -468,15 +448,10 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
468
 
469
  if __name__ == "__main__":
470
  try:
471
- # CUDA ์‚ฌ์šฉ ๊ฐ€๋Šฅ ์—ฌ๋ถ€ ํ™•์ธ
472
- device = "cuda" if torch.cuda.is_available() else "cpu"
473
  print(f"Using device: {device}")
474
 
475
- # CUDA ๋ฉ”๋ชจ๋ฆฌ ์ดˆ๊ธฐํ™”
476
- if torch.cuda.is_available():
477
- torch.cuda.empty_cache()
478
- torch.cuda.synchronize()
479
-
480
  # ๋ชจ๋ธ ์ดˆ๊ธฐํ™”
481
  initialize_models(device)
482
 
@@ -490,15 +465,30 @@ if __name__ == "__main__":
490
  except Exception as e:
491
  print(f"Warning: Initial preprocessing test failed: {e}")
492
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
493
  # Gradio ์ธํ„ฐํŽ˜์ด์Šค ์‹คํ–‰
494
  demo.queue() # ํ ๊ธฐ๋Šฅ ํ™œ์„ฑํ™”
495
  demo.launch(
496
- allowed_paths=[PERSISTENT_DIR, TMP_DIR], # TMP_DIR ์ถ”๊ฐ€
497
  server_name="0.0.0.0",
498
  server_port=7860,
499
  show_error=True,
500
- share=True, # share๋ฅผ True๋กœ ์„ค์ •
501
- enable_queue=True # ํ ํ™œ์„ฑํ™”
502
  )
503
 
504
  except Exception as e:
 
40
  g.trellis_pipeline = TrellisImageTo3DPipeline.from_pretrained(
41
  "JeffreyXiang/TRELLIS-image-large"
42
  )
 
 
 
43
 
44
  # ์ด๋ฏธ์ง€ ์ƒ์„ฑ ํŒŒ์ดํ”„๋ผ์ธ
45
  print("Loading flux_pipe...")
 
64
  g.translator = transformers_pipeline(
65
  "translation",
66
  model="Helsinki-NLP/opus-mt-ko-en",
67
+ device=device
68
  )
69
  print("Model initialization completed successfully")
70
 
 
76
  torch.cuda.empty_cache()
77
  torch.backends.cuda.matmul.allow_tf32 = True
78
  torch.backends.cudnn.benchmark = True
 
79
  # ํ™˜๊ฒฝ ๋ณ€์ˆ˜ ์„ค์ •
80
  os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:512"
81
  os.environ['SPCONV_ALGO'] = 'native'
82
  os.environ['SPARSE_BACKEND'] = 'native'
83
+ os.environ['CUDA_LAUNCH_BLOCKING'] = '1' # CUDA ์ž‘์—… ๋™๊ธฐํ™”
84
+
85
 
86
  # Hugging Face ํ† ํฐ ์„ค์ •
87
  HF_TOKEN = os.getenv("HF_TOKEN")
 
383
  trial_id = gr.Textbox(visible=False)
384
  output_buf = gr.State()
385
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
386
  def load_example(evt: gr.SelectData):
387
  selected_image = Image.open(example_images[evt.index])
388
  trial_id_val, processed_image = preprocess_image(selected_image)
 
448
 
449
  if __name__ == "__main__":
450
  try:
451
+ # CPU๋กœ ์ดˆ๊ธฐํ™”
452
+ device = "cpu"
453
  print(f"Using device: {device}")
454
 
 
 
 
 
 
455
  # ๋ชจ๋ธ ์ดˆ๊ธฐํ™”
456
  initialize_models(device)
457
 
 
465
  except Exception as e:
466
  print(f"Warning: Initial preprocessing test failed: {e}")
467
 
468
+ # Examples ๊ฐค๋Ÿฌ๋ฆฌ ์„ค์ •
469
+ if example_images:
470
+ gr.Markdown("""### Example Images""")
471
+ with gr.Row():
472
+ gallery = gr.Gallery(
473
+ value=example_images,
474
+ label="Click an image to use it",
475
+ show_label=True,
476
+ elem_id="gallery",
477
+ columns=12, # ํ•œ ์ค„์— 12๊ฐœ
478
+ rows=2, # 2์ค„
479
+ height=300, # ๋†’์ด ์กฐ์ •
480
+ allow_preview=True,
481
+ object_fit="contain" # ์ด๋ฏธ์ง€ ๋น„์œจ ์œ ์ง€
482
+ )
483
+
484
  # Gradio ์ธํ„ฐํŽ˜์ด์Šค ์‹คํ–‰
485
  demo.queue() # ํ ๊ธฐ๋Šฅ ํ™œ์„ฑํ™”
486
  demo.launch(
487
+ allowed_paths=[PERSISTENT_DIR, TMP_DIR],
488
  server_name="0.0.0.0",
489
  server_port=7860,
490
  show_error=True,
491
+ share=True
 
492
  )
493
 
494
  except Exception as e: