cocktailpeanut
commited on
Commit
•
fea70af
1
Parent(s):
1aec1ce
update
Browse files- app.py +5 -5
- src/utils/frame_interpolation.py +2 -2
- src/utils/util.py +2 -2
app.py
CHANGED
@@ -32,8 +32,8 @@ from src.audio2vid import get_headpose_temp, smooth_pose_seq
|
|
32 |
from src.utils.frame_interpolation import init_frame_interpolation_model, batch_images_interpolation_tool
|
33 |
|
34 |
if torch.backends.mps.is_available():
|
35 |
-
|
36 |
-
device = "cpu"
|
37 |
elif torch.cuda.is_available():
|
38 |
device = "cuda"
|
39 |
else:
|
@@ -46,7 +46,7 @@ if config.weight_dtype == "fp16":
|
|
46 |
else:
|
47 |
weight_dtype = torch.float32
|
48 |
|
49 |
-
if device == "cpu":
|
50 |
weight_dtype = torch.float32
|
51 |
|
52 |
audio_infer_config = OmegaConf.load(config.audio_inference_config)
|
@@ -382,7 +382,7 @@ with gr.Blocks() as demo:
|
|
382 |
a2v_headpose_video = gr.Video(label="Option: upload head pose reference video", sources="upload")
|
383 |
|
384 |
with gr.Row():
|
385 |
-
if device == "cpu":
|
386 |
a2v_size_slider = gr.Slider(minimum=256, maximum=1024, step=8, value=256, label="Video size (-W & -H)")
|
387 |
else:
|
388 |
a2v_size_slider = gr.Slider(minimum=256, maximum=1024, step=8, value=512, label="Video size (-W & -H)")
|
@@ -413,7 +413,7 @@ with gr.Blocks() as demo:
|
|
413 |
v2v_source_video = gr.Video(label="Upload source video", sources="upload")
|
414 |
|
415 |
with gr.Row():
|
416 |
-
if device == "cpu":
|
417 |
v2v_size_slider = gr.Slider(minimum=256, maximum=1024, step=8, value=256, label="Video size (-W & -H)")
|
418 |
else:
|
419 |
v2v_size_slider = gr.Slider(minimum=256, maximum=1024, step=8, value=512, label="Video size (-W & -H)")
|
|
|
32 |
from src.utils.frame_interpolation import init_frame_interpolation_model, batch_images_interpolation_tool
|
33 |
|
34 |
if torch.backends.mps.is_available():
|
35 |
+
device = "mps"
|
36 |
+
#device = "cpu"
|
37 |
elif torch.cuda.is_available():
|
38 |
device = "cuda"
|
39 |
else:
|
|
|
46 |
else:
|
47 |
weight_dtype = torch.float32
|
48 |
|
49 |
+
if device == "cpu" or device == "mps":
|
50 |
weight_dtype = torch.float32
|
51 |
|
52 |
audio_infer_config = OmegaConf.load(config.audio_inference_config)
|
|
|
382 |
a2v_headpose_video = gr.Video(label="Option: upload head pose reference video", sources="upload")
|
383 |
|
384 |
with gr.Row():
|
385 |
+
if device == "cpu" or device == "mps":
|
386 |
a2v_size_slider = gr.Slider(minimum=256, maximum=1024, step=8, value=256, label="Video size (-W & -H)")
|
387 |
else:
|
388 |
a2v_size_slider = gr.Slider(minimum=256, maximum=1024, step=8, value=512, label="Video size (-W & -H)")
|
|
|
413 |
v2v_source_video = gr.Video(label="Upload source video", sources="upload")
|
414 |
|
415 |
with gr.Row():
|
416 |
+
if device == "cpu" or device == "mps":
|
417 |
v2v_size_slider = gr.Slider(minimum=256, maximum=1024, step=8, value=256, label="Video size (-W & -H)")
|
418 |
else:
|
419 |
v2v_size_slider = gr.Slider(minimum=256, maximum=1024, step=8, value=512, label="Video size (-W & -H)")
|
src/utils/frame_interpolation.py
CHANGED
@@ -6,8 +6,8 @@ import bisect
|
|
6 |
import shutil
|
7 |
|
8 |
if torch.backends.mps.is_available():
|
9 |
-
|
10 |
-
device = "cpu"
|
11 |
elif torch.cuda.is_available():
|
12 |
device = "cuda"
|
13 |
else:
|
|
|
6 |
import shutil
|
7 |
|
8 |
if torch.backends.mps.is_available():
|
9 |
+
device = "mps"
|
10 |
+
#device = "cpu"
|
11 |
elif torch.cuda.is_available():
|
12 |
device = "cuda"
|
13 |
else:
|
src/utils/util.py
CHANGED
@@ -13,8 +13,8 @@ from einops import rearrange
|
|
13 |
from PIL import Image
|
14 |
|
15 |
if torch.backends.mps.is_available():
|
16 |
-
device = "cpu"
|
17 |
-
|
18 |
elif torch.cuda.is_available():
|
19 |
device = "cuda"
|
20 |
else:
|
|
|
13 |
from PIL import Image
|
14 |
|
15 |
if torch.backends.mps.is_available():
|
16 |
+
#device = "cpu"
|
17 |
+
device = "mps"
|
18 |
elif torch.cuda.is_available():
|
19 |
device = "cuda"
|
20 |
else:
|