# syntax=docker/dockerfile:1 # Build as `docker build . -t localgpt`, requires BuildKit. # Run as `docker run -it --mount src="$HOME/.cache",target=/root/.cache,type=bind --gpus=all localgpt`, requires Nvidia container toolkit. FROM nvidia/cuda:12.1.1-devel-ubuntu22.04 ENV HOST 0.0.0.0 RUN apt-get update && apt-get upgrade -y \ && apt-get install -y git build-essential libpq-dev gcc \ wget ocl-icd-opencl-dev opencl-headers clinfo \ libclblast-dev libopenblas-dev software-properties-common\ g++-11 make python3 python-is-python3 pip \ && mkdir -p /etc/OpenCL/vendors && echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd ENV TRANSFORMERS_CACHE=/root/.cache COPY . . # setting build related env vars ENV CUDA_DOCKER_ARCH=all ENV LLAMA_CUBLAS=1 ENV PIP_ROOT_USER_ACTION=ignore # Install depencencies RUN python -m pip install --upgrade pip pytest cmake \ scikit-build setuptools fastapi uvicorn sse-starlette \ pydantic-settings starlette-context gradio huggingface_hub hf_transfer # ENV HOME=/home/user \ # PATH=/home/user/.local/bin:$PATH \ # PYTHONPATH=$HOME/app \ # PYTHONUNBUFFERED=1 \ # GRADIO_ALLOW_FLAGGING=never \ # GRADIO_NUM_PORTS=1 \ # GRADIO_SERVER_NAME=0.0.0.0 \ # GRADIO_THEME=huggingface \ # SYSTEM=spaces RUN CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install llama-cpp-python CMD sh -c "chmod -R 777 -c ./run.sh && ./run.sh"