Spaces:
Running
on
Zero
Running
on
Zero
File size: 2,064 Bytes
295978e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
#!/bin/bash
# Run on a single GPU
CUDA_VISIBLE_DEVICES=0 torchrun --node_rank=0 --nproc_per_node=1 --nnodes=1 \
--rdzv_endpoint=127.0.0.1:12345 \
--rdzv_conf=timeout=900,join_timeout=900,read_timeout=900 \
main.py humo/configs/inference/generate_1_7B.yaml \
dit.sp_size=1 \
generation.frames=97 \
generation.scale_t=7.0 \
generation.scale_i=4.0 \
generation.scale_a=7.5 \
generation.mode=TIA \
generation.height=480 \
generation.width=832 \
diffusion.timesteps.sampling.steps=50 \
generation.positive_prompt=./examples/test_case.json \
generation.output.dir=./output
# # Run on 2 GPUs
# CUDA_VISIBLE_DEVICES=0,1 torchrun --node_rank=0 --nproc_per_node=2 --nnodes=1 \
# --rdzv_endpoint=127.0.0.1:12345 \
# --rdzv_conf=timeout=900,join_timeout=900,read_timeout=900 \
# main.py humo/configs/inference/generate_1_7B.yaml \
# dit.sp_size=2 \
# generation.frames=97 \
# generation.scale_t=7.0 \
# generation.scale_i=4.0 \
# generation.scale_a=7.5 \
# generation.mode=TIA \
# generation.height=480 \
# generation.width=832 \
# diffusion.timesteps.sampling.steps=50 \
# generation.positive_prompt=./examples/test_case.json \
# generation.output.dir=./output
# # Run on 4 GPUs
# CUDA_VISIBLE_DEVICES=0,1,2,3 torchrun --node_rank=0 --nproc_per_node=4 --nnodes=1 \
# --rdzv_endpoint=127.0.0.1:12345 \
# --rdzv_conf=timeout=900,join_timeout=900,read_timeout=900 \
# main.py humo/configs/inference/generate_1_7B.yaml \
# dit.sp_size=4 \
# generation.frames=97 \
# generation.scale_t=7.0 \
# generation.scale_i=4.0 \
# generation.scale_a=7.5 \
# generation.mode=TIA \
# generation.height=480 \
# generation.width=832 \
# diffusion.timesteps.sampling.steps=50 \
# generation.positive_prompt=./examples/test_case.json \
# generation.output.dir=./output
|