# Use the NVIDIA official image with PyTorch 2.4.0 CUDA 12.4.1
# https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-24-05.html
FROM nvcr.io/nvidia/pytorch:24.05-py3

# Define environments
ENV MAX_JOBS=16
ENV FLASH_ATTENTION_FORCE_BUILD=TRUE
ENV VLLM_WORKER_MULTIPROC_METHOD=spawn

# Define installation arguments
ARG INSTALL_BNB=false
ARG INSTALL_VLLM=false
ARG INSTALL_DEEPSPEED=false
ARG INSTALL_FLASHATTN=false
ARG PIP_INDEX=https://pypi.org/simple

# Set the working directory
WORKDIR /app

# Install the requirements
COPY requirements.txt /app

RUN chown -R root /usr/lib* && echo "alias nvid='watch -n 1 nvidia-smi'" >> /root/.bashrc && echo "mkdir -p /data/nfs11/nfs && mount -t nfs 10.201.14.11:/data/nfs11/nfs /data/nfs11/nfs" >> /root/.bashrc

RUN export DEBIAN_FRONTEND=noninteractive && apt-get update && apt-get upgrade -y && \
    apt-get remove openssh-client -y && \
    apt-get install -y software-properties-common && \
    add-apt-repository -y ppa:deadsnakes/ppa && \
    apt-get update && apt-get install -y build-essential gcc g++ make cmake git wget curl vim iputils-ping \
    krb5-user locales language-pack-en tzdata cron zip dnsutils jq fuse ssh openssh-server \
    net-tools pdsh ninja-build nfs-common sudo libuser \
    build-essential zlib1g-dev libncurses5-dev libgdbm-dev libnss3-dev libssl-dev libreadline-dev libffi-dev htop tmux && ln -fs /usr/share/zoneinfo/Asia/Shanghai /etc/localtime


RUN pip config set global.index-url "$PIP_INDEX" && \
    pip config set global.extra-index-url "$PIP_INDEX" && \
    python -m pip install --upgrade pip && \
    python -m pip install torch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0 --index-url https://download.pytorch.org/whl/cu124 && \
    python -m pip install -r requirements.txt

# Copy the rest of the application into the image
COPY . /app

# Install the LLaMA Factory
RUN EXTRA_PACKAGES="metrics"; \
    if [ "$INSTALL_BNB" == "true" ]; then \
        EXTRA_PACKAGES="${EXTRA_PACKAGES},bitsandbytes"; \
    fi; \
    if [ "$INSTALL_VLLM" == "true" ]; then \
        EXTRA_PACKAGES="${EXTRA_PACKAGES},vllm"; \
    fi; \
    if [ "$INSTALL_DEEPSPEED" == "true" ]; then \
        EXTRA_PACKAGES="${EXTRA_PACKAGES},deepspeed"; \
    fi; \
    pip install -e ".[$EXTRA_PACKAGES]"

# Rebuild flash attention
ENV MAX_JOBS=20
RUN pip uninstall -y transformer-engine flash-attn && \
    if [ "$INSTALL_FLASHATTN" == "true" ]; then \
        pip uninstall -y ninja && pip install ninja && \
        git clone https://github.com/Dao-AILab/flash-attention.git flash_attention && \
        cd flash_attention/hopper && python setup.py install \
    fi

# https://github.com/microsoft/DeepSpeed/blob/master/docs/_tutorials/advanced-install.md?plain=1
RUN git clone https://github.com/microsoft/DeepSpeed.git && \
    cd DeepSpeed && \
    DS_BUILD_CPU_ADAM=1 TORCH_CUDA_ARCH_LIST="All" python setup.py bdist_wheel && \
    pip install dist/deepspeed-*.whl && \
    cd ../ && rm -rf DeepSpeed

# Set up volumes
VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/app/data", "/app/output" ]

# Expose port 7860 for the LLaMA Board
ENV GRADIO_SERVER_PORT 7860
EXPOSE 7860

# Expose port 8000 for the API service
ENV API_PORT 8000
EXPOSE 8000
