demollm / Dockerfile
ajeetsraina's picture
Added
f3ceba5
raw
history blame contribute delete
444 Bytes
# Specify a base image that contains the necessary dependencies for running Hugging Face LLMs.
FROM python:3.9
RUN pip install transformers
# Copy your Hugging Face LLM files into the container.
COPY . /app
# Set the working directory to the directory where your Hugging Face LLM files are located.
WORKDIR /app
# Expose port 8000 for your Hugging Face LLM.
EXPOSE 8000
# Start the Hugging Face LLM server.
CMD ["python", "inference.py"]