Update app.py
Browse files
app.py
CHANGED
@@ -2,6 +2,9 @@ import gradio as gr
|
|
2 |
import os
|
3 |
import spaces
|
4 |
import sys
|
|
|
|
|
|
|
5 |
from copy import deepcopy
|
6 |
sys.path.append('./VADER-VideoCrafter/scripts/main')
|
7 |
sys.path.append('./VADER-VideoCrafter/scripts')
|
@@ -10,6 +13,10 @@ sys.path.append('./VADER-VideoCrafter')
|
|
10 |
|
11 |
from train_t2v_lora import main_fn, setup_model
|
12 |
|
|
|
|
|
|
|
|
|
13 |
examples = [
|
14 |
["Fairy and Magical Flowers: A fairy tends to enchanted, glowing flowers.", 'huggingface-hps-aesthetic',
|
15 |
8, 901, 384, 512, 12.0, 25, 1.0, 24, 10],
|
@@ -29,12 +36,18 @@ examples = [
|
|
29 |
|
30 |
model = setup_model()
|
31 |
|
32 |
-
@spaces.GPU(duration=180)
|
33 |
def gradio_main_fn(prompt, lora_model, lora_rank, seed, height, width, unconditional_guidance_scale, ddim_steps, ddim_eta,
|
34 |
frames, savefps):
|
35 |
global model
|
36 |
if model is None:
|
37 |
return "Model is not loaded. Please load the model first."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
video_path = main_fn(prompt=prompt,
|
39 |
lora_model=lora_model,
|
40 |
lora_rank=int(lora_rank),
|
|
|
2 |
import os
|
3 |
import spaces
|
4 |
import sys
|
5 |
+
from transformers import pipeline
|
6 |
+
|
7 |
+
|
8 |
from copy import deepcopy
|
9 |
sys.path.append('./VADER-VideoCrafter/scripts/main')
|
10 |
sys.path.append('./VADER-VideoCrafter/scripts')
|
|
|
13 |
|
14 |
from train_t2v_lora import main_fn, setup_model
|
15 |
|
16 |
+
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
examples = [
|
21 |
["Fairy and Magical Flowers: A fairy tends to enchanted, glowing flowers.", 'huggingface-hps-aesthetic',
|
22 |
8, 901, 384, 512, 12.0, 25, 1.0, 24, 10],
|
|
|
36 |
|
37 |
model = setup_model()
|
38 |
|
|
|
39 |
def gradio_main_fn(prompt, lora_model, lora_rank, seed, height, width, unconditional_guidance_scale, ddim_steps, ddim_eta,
|
40 |
frames, savefps):
|
41 |
global model
|
42 |
if model is None:
|
43 |
return "Model is not loaded. Please load the model first."
|
44 |
+
|
45 |
+
# 한글 입력 감지 및 번역
|
46 |
+
if any('\u3131' <= char <= '\u318E' or '\uAC00' <= char <= '\uD7A3' for char in prompt):
|
47 |
+
translated = translator(prompt, max_length=512)[0]['translation_text']
|
48 |
+
print(f"Translated prompt: {translated}")
|
49 |
+
prompt = translated
|
50 |
+
|
51 |
video_path = main_fn(prompt=prompt,
|
52 |
lora_model=lora_model,
|
53 |
lora_rank=int(lora_rank),
|