aiqtech commited on
Commit
03489b3
โ€ข
1 Parent(s): 4d86a9c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -32
app.py CHANGED
@@ -313,39 +313,52 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
313
  )
314
 
315
  if __name__ == "__main__":
316
- # 3D ์ƒ์„ฑ ํŒŒ์ดํ”„๋ผ์ธ
317
- pipeline = TrellisImageTo3DPipeline.from_pretrained(
318
- "JeffreyXiang/TRELLIS-image-large",
319
- use_auth_token=HF_TOKEN
320
- )
321
- pipeline.to("cuda") # .cuda() ๋Œ€์‹  .to("cuda") ์‚ฌ์šฉ
322
-
323
- # ์ด๋ฏธ์ง€ ์ƒ์„ฑ ํŒŒ์ดํ”„๋ผ์ธ
324
- pipe = FluxPipeline.from_pretrained(
325
- "black-forest-labs/FLUX.1-dev",
326
- torch_dtype=torch.bfloat16,
327
- use_auth_token=HF_TOKEN,
328
- device_map="auto" # device_map ์ถ”๊ฐ€
329
- )
330
-
331
- # Hyper-SD LoRA ๋กœ๋“œ
332
- pipe.load_lora_weights(
333
- hf_hub_download(
 
 
 
 
334
  "ByteDance/Hyper-SD",
335
  "Hyper-FLUX.1-dev-8steps-lora.safetensors",
336
- use_auth_token=HF_TOKEN
337
  )
338
- )
339
- pipe.fuse_lora(lora_scale=0.125)
340
-
341
- # ๋ฒˆ์—ญ๊ธฐ ์ดˆ๊ธฐํ™” (GPU ์„ค์ • ์ถ”๊ฐ€)
342
- translator = pipeline("translation",
343
- model="Helsinki-NLP/opus-mt-ko-en",
344
- device=0 if torch.cuda.is_available() else -1)
345
-
346
- try:
347
- pipeline.preprocess_image(Image.fromarray(np.zeros((512, 512, 3), dtype=np.uint8)))
348
- except:
349
- pass
 
 
 
 
 
 
 
350
 
351
- demo.launch(allowed_paths=[PERSISTENT_DIR])
 
 
 
313
  )
314
 
315
  if __name__ == "__main__":
316
+ # CUDA ์‚ฌ์šฉ ๊ฐ€๋Šฅ ์—ฌ๋ถ€ ํ™•์ธ
317
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
318
+ print(f"Using device: {device}")
319
+
320
+ try:
321
+ # 3D ์ƒ์„ฑ ํŒŒ์ดํ”„๋ผ์ธ
322
+ pipeline = TrellisImageTo3DPipeline.from_pretrained(
323
+ "JeffreyXiang/TRELLIS-image-large",
324
+ token=HF_TOKEN # use_auth_token ๋Œ€์‹  token ์‚ฌ์šฉ
325
+ )
326
+ pipeline.to(device)
327
+
328
+ # ์ด๋ฏธ์ง€ ์ƒ์„ฑ ํŒŒ์ดํ”„๋ผ์ธ
329
+ pipe = FluxPipeline.from_pretrained(
330
+ "black-forest-labs/FLUX.1-dev",
331
+ torch_dtype=torch.bfloat16,
332
+ token=HF_TOKEN, # use_auth_token ๋Œ€์‹  token ์‚ฌ์šฉ
333
+ device_map="auto"
334
+ )
335
+
336
+ # Hyper-SD LoRA ๋กœ๋“œ
337
+ lora_path = hf_hub_download(
338
  "ByteDance/Hyper-SD",
339
  "Hyper-FLUX.1-dev-8steps-lora.safetensors",
340
+ token=HF_TOKEN # use_auth_token ๋Œ€์‹  token ์‚ฌ์šฉ
341
  )
342
+ pipe.load_lora_weights(lora_path)
343
+ pipe.fuse_lora(lora_scale=0.125)
344
+
345
+ # ๋ฒˆ์—ญ๊ธฐ ์ดˆ๊ธฐํ™”
346
+ translator = pipeline(
347
+ "translation",
348
+ model="Helsinki-NLP/opus-mt-ko-en",
349
+ device=0 if torch.cuda.is_available() else -1
350
+ )
351
+
352
+ # ์ดˆ๊ธฐ ์ด๋ฏธ์ง€ ์ „์ฒ˜๋ฆฌ ํ…Œ์ŠคํŠธ
353
+ try:
354
+ test_image = Image.fromarray(np.zeros((512, 512, 3), dtype=np.uint8))
355
+ pipeline.preprocess_image(test_image)
356
+ except Exception as e:
357
+ print(f"Warning: Initial preprocessing test failed: {e}")
358
+
359
+ # Gradio ์ธํ„ฐํŽ˜์ด์Šค ์‹คํ–‰
360
+ demo.launch(allowed_paths=[PERSISTENT_DIR])
361
 
362
+ except Exception as e:
363
+ print(f"Error during initialization: {e}")
364
+ raise