File size: 1,396 Bytes
22d91a3
 
 
6a1e76c
175eaf8
d42a4e0
 
175eaf8
eab0ed4
 
 
6808494
175eaf8
 
 
 
22d91a3
41adfc5
e60e653
 
 
6f30ccb
e60e653
 
eab0ed4
e60e653
 
 
6a1e76c
 
 
 
 
 
 
 
 
d42a4e0
e305cfe
 
c00f459
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
# syntax=docker/dockerfile:1
# Build as `docker build . -t localgpt`, requires BuildKit.
# Run as `docker run -it --mount src="$HOME/.cache",target=/root/.cache,type=bind --gpus=all localgpt`, requires Nvidia container toolkit.
FROM nvidia/cuda:12.1.1-devel-ubuntu22.04

ENV HOST 0.0.0.0

RUN apt-get update && apt-get upgrade -y \
    && apt-get install -y git build-essential libpq-dev gcc \
    wget ocl-icd-opencl-dev opencl-headers clinfo \
    libclblast-dev libopenblas-dev software-properties-common\
    g++-11 make python3 python-is-python3 pip \
    && mkdir -p /etc/OpenCL/vendors && echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd

ENV TRANSFORMERS_CACHE=/root/.cache

COPY . .

# setting build related env vars
ENV CUDA_DOCKER_ARCH=all
ENV LLAMA_CUBLAS=1
ENV PIP_ROOT_USER_ACTION=ignore

# Install depencencies
RUN python -m pip install --upgrade pip pytest cmake \
    scikit-build setuptools fastapi uvicorn sse-starlette \
    pydantic-settings starlette-context gradio huggingface_hub hf_transfer

# ENV HOME=/home/user \
# 	PATH=/home/user/.local/bin:$PATH \
#     PYTHONPATH=$HOME/app \
# 	PYTHONUNBUFFERED=1 \
# 	GRADIO_ALLOW_FLAGGING=never \
# 	GRADIO_NUM_PORTS=1 \
# 	GRADIO_SERVER_NAME=0.0.0.0 \
# 	GRADIO_THEME=huggingface \
# 	SYSTEM=spaces

RUN CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install llama-cpp-python

CMD sh -c "chmod -R 777 -c ./run.sh && ./run.sh"