lisa-on-cuda / scripts /entrypoint.sh
alessandro trinca tornidor
[ci] try fix entrypoint.sh
e1f1c87
#!/usr/bin/env bash
if [ -z "$1" ];
then
echo "use no \$1 variable, set WORKDIR and XDG_CACHE_HOME as for docker container mode"
WORKDIR="/var/task"
XDG_CACHE_HOME="/data"
fi
MPLCONFIGDIR=${XDG_CACHE_HOME}/.cache/matplotlib
TRANSFORMERS_CACHE=${XDG_CACHE_HOME}/.cache/transformers
FASTAPI_STATIC=${XDG_CACHE_HOME}/static
ls -ld ${XDG_CACHE_HOME}/
ls -l ${XDG_CACHE_HOME}/
mkdir -p ${XDG_CACHE_HOME}/.cache
chmod 770 -R ${XDG_CACHE_HOME}/.cache
mkdir -p ${MPLCONFIGDIR}
mkdir -p ${TRANSFORMERS_CACHE}
mkdir -p ${FASTAPI_STATIC}
chmod 770 -R ${FASTAPI_STATIC}
ls -ld ${XDG_CACHE_HOME}/
ls -l ${XDG_CACHE_HOME}/
export WORKDIR
export XDG_CACHE_HOME
export MPLCONFIGDIR
export TRANSFORMERS_CACHE
export FASTAPI_STATIC
source ${WORKDIR}/venv/bin/activate
which python
python --version
free -m
which nvcc
nvcc -V
which nvidia-smi
nvidia-smi
pip list
which uvicorn
ls -l ${WORKDIR}/venv/bin/uvicorn
df -h / /data /home /var/task
echo "WORKDIR - /var/task"
ls -l ${WORKDIR}
echo "XDG_CACHE_HOME - /data"
if [ -z "$1" ];
then
echo "use no \$1 variable, show folder ${XDG_CACHE_HOME} content"
find ${XDG_CACHE_HOME}
fi
CUDA_VISIBLE_DEVICES=$(nvidia-smi --query-gpu=memory.free,index --format=csv,nounits,noheader | sort -nr | head -1 | awk '{ print $NF }')
echo "calculated CUDA_VISIBLE_DEVICES env variable: ${CUDA_VISIBLE_DEVICES}."
export CUDA_VISIBLE_DEVICES
PYTHONFILE="lisa_on_cuda.app.main"
#if [ -z "$1" ];
#then
# PYTHONFILE="app.main"
#fi
echo "running command 'uvicorn ${PYTHONFILE}:app --host 0.0.0.0 --port 7860'..."
uvicorn ${PYTHONFILE}:app --host 0.0.0.0 --port 7860
exit 0