export HF_HOME="/data/.huggingface" | |
echo "PWD: $(pwd)" | |
echo $HF_TOKEN > .hf_token | |
echo "LS: $(ls -als)" | |
while true; do nvidia-smi; sleep 600; done & | |
python -c "import torch; \ | |
print(f\"is availeble = {torch.cuda.is_available()}\"); \ | |
print(f\"device count = {torch.cuda.device_count()}\"); \ | |
print(f\"current device = {torch.cuda.current_device()}\")" | |
python -m serve.controller --host 0.0.0.0 --port 10000 & | |
P1=$! | |
sleep 30 | |
python -m serve.gradio_web_server --controller http://127.0.0.1:10000 --model-list-mode reload --share & | |
P2=$! | |
python -m interactive_demo --port 40000 --model_id prism-dinosiglip+7b & | |
P3=$! | |
# python -m interactive_demo --port 40001 --model_family llava-v15 --model_id llava-v1.5-7b --model_dir liuhaotian/llava-v1.5-7b & | |
# P4=$! | |
ls -als $HF_HOME | |
tree --du -h $HF_HOME | |
wait $P1 $P2 $P3 | |
# $P4 |