llm-webui / Dockerfile
Ywung's picture
fix pyenv install
40375b1
FROM nvidia/cuda:11.3.1-cudnn8-devel-ubuntu18.04 as builder
# FROM nvidia/cuda:11.8.0-devel-ubuntu22.04 as builder
RUN apt-get update && \
apt-get install --no-install-recommends -y git vim build-essential curl && \
rm -rf /var/lib/apt/lists/*
RUN --mount=target=/root/packages.txt,source=packages.txt apt-get update && xargs -r -a /root/packages.txt apt-get install -y && rm -rf /var/lib/apt/lists/*
RUN curl https://pyenv.run | bash
RUN export PYENV_ROOT="$HOME/.pyenv" && \
command -v pyenv >/dev/null || export PATH="$PYENV_ROOT/bin:$PATH" && \
eval "$(pyenv init -)"
RUN pyenv install 3.10 && pyenv global 3.10 && pyenv rehash
# RUN pip install --no-cache-dir --upgrade pip==22.3.1 setuptools wheel && \
# pip install --no-cache-dir datasets "huggingface-hub>=0.12.1" "protobuf<4" "click<8.1" "pydantic~=1.0"
RUN git clone https://github.com/oobabooga/GPTQ-for-LLaMa /build
WORKDIR /build
RUN python3 -m venv /build/venv
RUN . /build/venv/bin/activate && \
pip3 install --no-cache-dir --upgrade pip==22.3.1 setuptools wheel && \
pip3 install --no-cache-dir datasets && \
pip3 install torch torchvision torchaudio && \
pip3 install -r requirements.txt
# https://developer.nvidia.com/cuda-gpus
# for a rtx 2060: ARG TORCH_CUDA_ARCH_LIST="7.5"
ARG TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-3.5;5.0;6.0;6.1;7.0;7.5;8.0;8.6+PTX}"
RUN . /build/venv/bin/activate && \
python3 setup_cuda.py bdist_wheel -d .
FROM nvidia/cuda:11.8.0-runtime-ubuntu22.04
LABEL maintainer="Your Name <your.email@example.com>"
LABEL description="Docker image for GPTQ-for-LLaMa and Text Generation WebUI"
RUN apt-get update && \
apt-get install --no-install-recommends -y python3-dev libportaudio2 libasound-dev git python3 python3-pip make g++ ffmpeg && \
rm -rf /var/lib/apt/lists/*
RUN --mount=type=cache,target=/root/.cache/pip pip3 install virtualenv
# Set up a new user named "user" with user ID 1000
RUN useradd -m -u 1000 user
# Switch to the "user" user
USER user
# Set home to the user's home directory
ENV HOME=/home/user \
PATH=/home/user/.local/bin:$PATH
# Set the working directory to the user's home directory
RUN mkdir $HOME/app
WORKDIR $HOME/app
ARG WEBUI_VERSION
RUN test -n "${WEBUI_VERSION}" && git reset --hard ${WEBUI_VERSION} || echo "Using provided webui source"
RUN virtualenv $HOME/app/venv
RUN . $HOME/app/venv/bin/activate && \
pip3 install --upgrade pip setuptools wheel && \
pip3 install torch torchvision torchaudio
COPY --chown=user --from=builder /build $HOME/app/repositories/GPTQ-for-LLaMa
RUN . $HOME/app/venv/bin/activate && \
pip3 install $HOME/app/repositories/GPTQ-for-LLaMa/*.whl
COPY --chown=user extensions/api/requirements.txt $HOME/app/extensions/api/requirements.txt
COPY --chown=user extensions/elevenlabs_tts/requirements.txt $HOME/app/extensions/elevenlabs_tts/requirements.txt
COPY --chown=user extensions/google_translate/requirements.txt $HOME/app/extensions/google_translate/requirements.txt
COPY --chown=user extensions/silero_tts/requirements.txt $HOME/app/extensions/silero_tts/requirements.txt
COPY --chown=user extensions/whisper_stt/requirements.txt $HOME/app/extensions/whisper_stt/requirements.txt
COPY --chown=user extensions/superbooga/requirements.txt $HOME/app/extensions/superbooga/requirements.txt
COPY --chown=user extensions/openai/requirements.txt $HOME/app/extensions/openai/requirements.txt
RUN --mount=type=cache,target=/root/.cache/pip . $HOME/app/venv/bin/activate && cd extensions/api && pip3 install -r requirements.txt
RUN --mount=type=cache,target=/root/.cache/pip . $HOME/app/venv/bin/activate && cd extensions/elevenlabs_tts && pip3 install -r requirements.txt
RUN --mount=type=cache,target=/root/.cache/pip . $HOME/app/venv/bin/activate && cd extensions/google_translate && pip3 install -r requirements.txt
RUN --mount=type=cache,target=/root/.cache/pip . $HOME/app/venv/bin/activate && cd extensions/silero_tts && pip3 install -r requirements.txt
RUN --mount=type=cache,target=/root/.cache/pip . $HOME/app/venv/bin/activate && cd extensions/whisper_stt && pip3 install -r requirements.txt
RUN --mount=type=cache,target=/root/.cache/pip . $HOME/app/venv/bin/activate && cd extensions/superbooga && pip3 install -r requirements.txt
RUN --mount=type=cache,target=/root/.cache/pip . $HOME/app/venv/bin/activate && cd extensions/openai && pip3 install -r requirements.txt
COPY --chown=user requirements.txt $HOME/app/requirements.txt
RUN . $HOME/app/venv/bin/activate && \
pip3 install -r requirements.txt
RUN cp $HOME/app/venv/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda118.so $HOME/app/venv/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cpu.so
COPY --chown=user . $HOME/app/
ENV CLI_ARGS="--listen-port 7860"
CMD . $HOME/app/venv/bin/activate && python3 server.py ${CLI_ARGS}