falcon-mini / Dockerfile
matthoffner's picture
Update Dockerfile
8a15493
FROM nvidia/cuda:11.0.3-base-ubuntu20.04
RUN export PATH="/usr/local/cuda/bin:$PATH"
RUN apt update && \
apt install --no-install-recommends -y build-essential python3 python3-pip wget curl git && \
apt clean && rm -rf /var/lib/apt/lists/*
# Set the working directory in the container to /app
WORKDIR /app
# Install cmake
RUN apt-get install -y wget && \
wget -qO- "https://cmake.org/files/v3.18/cmake-3.18.0-Linux-x86_64.tar.gz" | tar --strip-components=1 -xz -C /usr/local
# Copy the requirements.txt file into the container
COPY requirements.txt ./
# Install any needed packages specified in requirements.txt
RUN pip3 install --upgrade pip && \
pip3 install -r requirements.txt
RUN export PATH="/usr/local/cuda/bin:$PATH"
RUN pip install ctransformers --no-binary ctransformers
# Assume the specific file is hosted somewhere and is publicly accessible, replace the URL with the actual URL
#RUN wget -O falcon40b-instruct.ggmlv3.q2_K.bin https://huggingface.co/TheBloke/falcon-40b-instruct-GGML/raw/main/falcon40b-instruct.ggmlv3.q2_K.bin
# Change the ownership of the downloaded file to myuser
# Install git and clone the ggllm.cpp repository and build
#RUN apt-get install -y git && \
# git clone https://github.com/cmp-nct/ggllm.cpp && \
# cd ggllm.cpp && \
# rm -rf build && mkdir build && cd build && cmake -DGGML_CUBLAS=1 .. && cmake --build . --config Release
RUN useradd -m -u 1000 user
# RUN chown user:user falcon7b-instruct.ggmlv3.q4_0.bin
USER user
ENV HOME=/home/user \
PATH=/home/user/.local/bin:$PATH
WORKDIR $HOME/app
COPY --chown=user . $HOME/app
RUN ls -al
# Make port available to the world outside this container
EXPOSE 7860
# Run uvicorn when the container launches
CMD ["python3", "demo.py", "--host", "0.0.0.0", "--port", "7860"]