# Use Hugging Face TGI as the base image | |
FROM ghcr.io/huggingface/text-generation-inference:3.0.2 | |
# Set working directory | |
WORKDIR /app | |
# Create and set permissions for cache directories | |
RUN mkdir -p /data && chmod 777 /data | |
RUN mkdir -p /.cache && chmod 777 /.cache | |
RUN mkdir -p /.triton && chmod 777 /.triton | |
# Expose the model API on port 8080 | |
EXPOSE 8080 | |
# Set Hugging Face token for private models | |
ARG HF_TOKEN | |
ENV HF_TOKEN=${HF_TOKEN} | |
# Run the TGI server with Mistral 7B | |
CMD ["--model-id", "mistralai/Mistral-7B-v0.3", "--port", "8080"] | |