p2p-llm / Dockerfile
arpinfidel's picture
temp
48511d8
# Build stage
FROM golang:1.24-alpine AS builder
WORKDIR /app
# Copy go mod files
COPY go.mod go.sum ./
RUN go mod download
# Copy source files
COPY . .
# Build
RUN CGO_ENABLED=0 GOOS=linux go build -o /app/main
# Final stage
FROM ubuntu:22.04
# Copy built binary from builder
COPY --from=builder /app/main /app/main
RUN apt-get update && \
apt-get install -y \
build-essential \
libssl-dev \
zlib1g-dev \
libboost-system-dev \
libboost-filesystem-dev \
cmake \
git \
python3-pip \
curl \
wget && \
rm -rf /var/lib/apt/lists/*
# Install Python dependencies
RUN pip3 install huggingface-hub openai gradio
# Build llama.cpp
RUN git clone https://github.com/ggerganov/llama.cpp && \
cd llama.cpp && \
mkdir build && \
cd build && \
cmake .. -DLLAMA_BUILD_SERVER=ON -DLLAMA_BUILD_EXAMPLES=ON -DCMAKE_BUILD_TYPE=Release && \
cmake --build . --config Release --target llama-server -j $(nproc)
# Download model
RUN mkdir -p /models && \
wget -O /models/model.q8_0.gguf https://huggingface.co/unsloth/DeepSeek-R1-Distill-Qwen-1.5B-GGUF/resolve/main/DeepSeek-R1-Distill-Qwen-1.5B-Q8_0.gguf
# Copy startup script
COPY start.sh /start.sh
RUN chmod +x /start.sh
# Expose ports
EXPOSE 8080 3000
# Start services
CMD ["/start.sh"]