Alex0007 commited on
Commit
4b621cf
·
verified ·
1 Parent(s): 5674ee0

Create Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +45 -0
Dockerfile ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Using a standard Ubuntu base image
2
+ FROM ubuntu:22.04
3
+
4
+ # Environment variables
5
+ ENV CACHE_SIZE=16000
6
+ ENV MODEL_NAME="Qwen-7B-gguf"
7
+ ENV MODEL_FILE="qwen7b-q4_0.gguf"
8
+ ENV MODEL_USER="MatrixStudio"
9
+ ENV DEFAULT_MODEL_BRANCH="main"
10
+ ENV MODEL_URL="https://huggingface.co/${MODEL_USER}/${MODEL_NAME}/resolve/${DEFAULT_MODEL_BRANCH}/${MODEL_FILE}"
11
+
12
+ # Installing necessary packages
13
+ RUN apt-get update && apt-get upgrade -y \
14
+ && apt-get install -y build-essential python3 python3-pip wget curl git \
15
+ --no-install-recommends \
16
+ && apt-get clean && rm -rf /var/lib/apt/lists/*
17
+
18
+ # Setting up the working directory
19
+ WORKDIR /app
20
+
21
+ # Cloning the llama.cpp repository
22
+ RUN git clone https://github.com/ggerganov/llama.cpp.git
23
+
24
+ # Moving to the llama.cpp directory and building the project
25
+ WORKDIR /app/llama.cpp
26
+ RUN make
27
+
28
+ # List contents to verify the location of the server executable
29
+ RUN ls -la
30
+
31
+ # Model download process
32
+ RUN mkdir -p models/7B && \
33
+ wget -O models/7B/${MODEL_FILE} ${MODEL_URL}
34
+
35
+ # Changing ownership to a non-root user
36
+ RUN useradd -m -u 1000 user && chown -R user:user /app
37
+ USER user
38
+ ENV HOME=/home/user \
39
+ PATH=/home/user/.local/bin:$PATH
40
+
41
+ # Setting up the application
42
+ EXPOSE 8080
43
+
44
+ # Adjust the CMD to use the absolute path of the server executable
45
+ CMD ["/app/llama.cpp/server", "-m", "/app/llama.cpp/models/7B/${MODEL_FILE}", "-c", "16000"]