llama-2-7b-chat-ecom / Dockerfile
safihaider's picture
initial commit
a6affd6
raw
history blame contribute delete
866 Bytes
FROM nvidia/cuda:12.2.0-runtime-ubuntu20.04
FROM nvcr.io/nvidia/pytorch:22.08-py3
FROM python:3.9
WORKDIR /code
COPY ./requirements.txt /code/requirements.txt
RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
RUN useradd -m -u 1000 user
USER user
ENV HOME=/home/user \
PATH=/home/user/.local/bin:$PATH
WORKDIR $HOME/app
COPY --chown=user . $HOME/app
CMD export HF_HOME="./cache" && export HF_DATASETS_CACHE="./cache" && export TRANSFORMERS_CACHE="./cache" && gunicorn -b 0.0.0.0:7860 ChatController:app --timeout 700
# FROM python:3
# RUN apt update
# RUN apt-get -y install pciutils
# RUN lspci -vnn | egrep 'VGA|3D'
# # # RUN cat /etc/os-release
# RUN useradd -ms /bin/bash admin
# COPY . /app
# WORKDIR /app
# RUN chown -R admin:admin /app
# RUN chmod 755 /app
# USER admin
# RUN pip install -r requirements.txt
# CMD python train.py