FROM nvidia/cuda:11.8.0-devel-ubuntu22.04 as builder

# apt换本地源
RUN sed -i 's#deb http://archive.ubuntu.com/ubuntu#deb http://nexus.nas.local/repository/ubuntu-jammy#g' /etc/apt/sources.list && \
    apt-get update -y

RUN --mount=type=cache,target=/var/cache/apt,sharing=locked,rw apt-get update && \
    apt-get install --no-install-recommends -y git vim build-essential python3-dev python3-venv && \
    rm -rf /var/lib/apt/lists/*
RUN git clone --depth=1 http://gitcache.nas.local/github.com/oobabooga/GPTQ-for-LLaMa /build

# pip换本地源
RUN python3 -m venv /build/venv && . /build/venv/bin/activate && \
    pip3 config set --global install.trusted-host devpi.nas.local && \
    pip3 config set --global global.index-url http://devpi.nas.local/root/aliyun && \
    pip3 config set --global global.extra-index-url "http://devpi.nas.local/root/pytorch http://devpi.nas.local/root/pypi"

WORKDIR /build

RUN --mount=type=cache,target=/root/.cache/pip,rw \
    python3 -m venv /build/venv && \
    . /build/venv/bin/activate && \
    pip3 install --upgrade pip setuptools wheel && \
    pip3 install torch==2.0.1+cu118 torchvision==0.15.2+cu118 torchaudio==2.0.2+cu118 && \
    pip3 install -r requirements.txt

# https://developer.nvidia.com/cuda-gpus
# for a rtx 2060: ARG TORCH_CUDA_ARCH_LIST="7.5"
ARG TORCH_CUDA_ARCH_LIST="7.5"
RUN . /build/venv/bin/activate && \
    TORCH_CUDA_ARCH_LIST="7.5" python3 setup_cuda.py bdist_wheel -d .

# # https://developer.nvidia.com/cuda-gpus
# # for a rtx 2060: ARG TORCH_CUDA_ARCH_LIST="7.5"
# ARG TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-3.5;5.0;6.0;6.1;7.0;7.5;8.0;8.6+PTX}"
# RUN . /build/venv/bin/activate && \
#     python3 setup_cuda.py bdist_wheel -d .

FROM nvidia/cuda:11.8.0-runtime-ubuntu22.04

LABEL maintainer="Your Name <your.email@example.com>"
LABEL description="Docker image for GPTQ-for-LLaMa and Text Generation WebUI"

ENV DEBIAN_FRONTEND=noninteractive PIP_PREFER_BINARY=1

# apt换本地源
RUN sed -i 's#deb http://archive.ubuntu.com/ubuntu#deb http://nexus.nas.local/repository/ubuntu-jammy#g' /etc/apt/sources.list && \
    apt-get update -y

RUN --mount=type=cache,target=/var/cache/apt,sharing=locked,rw apt-get update && \
    apt-get install --no-install-recommends -y python3-dev libportaudio2 libasound-dev git python3 python3-pip make g++ ffmpeg && \
    rm -rf /var/lib/apt/lists/*

# pip换本地源
RUN pip config set --global install.trusted-host devpi.nas.local && \
    pip config set --global global.index-url http://devpi.nas.local/root/aliyun && \
    pip config set --global global.extra-index-url "http://devpi.nas.local/root/pytorch http://devpi.nas.local/root/pypi"

ENV WORKSPACE=/text-generation-webui

ARG WEBUI_VERSION
RUN git clone http://gitcache.nas.local/github.com/oobabooga/text-generation-webui.git ${WORKSPACE} && \ 
    test -n "${WEBUI_VERSION}" && git reset --hard ${WEBUI_VERSION} || echo "Using provided webui source"

# Create virtualenv
RUN --mount=type=cache,target=/root/.cache/pip,rw \
    pip3 install --upgrade pip setuptools wheel && \
    pip3 install torch==2.0.1+cu118 torchvision==0.15.2+cu118 torchaudio==2.0.2+cu118 sentence_transformers xformers

# Copy and install GPTQ-for-LLaMa
COPY --from=builder /build ${WORKSPACE}/repositories/GPTQ-for-LLaMa
RUN --mount=type=cache,target=/root/.cache/pip,rw \
    pip3 install ${WORKSPACE}/repositories/GPTQ-for-LLaMa/*.whl

# Install main requirements
RUN --mount=type=cache,target=/root/.cache/pip,rw \
    {\
        grep --regexp='https://github.com/\S*\.whl' < ${WORKSPACE}/requirements_noavx2.txt | awk '{sub(/https:\/\/github.com/,"http://nexus.nas.local/repository/github-blos", $0); print $0 }'; \
        grep --regexp='https://github.com/\S*\.git' < ${WORKSPACE}/requirements_noavx2.txt | awk '{sub(/https:\/\/github.com/,"http://gitcache.nas.local/github.com", $0); print $0 }'; \
        grep -v -E 'https://github.com/\S*\.git|https://github.com/\S*\.whl' < ${WORKSPACE}/requirements_noavx2.txt; \
    } > /tmp/requirements.txt.tmp; \
    pip3 install -r /tmp/requirements.txt.tmp && \
    rm /tmp/requirements.txt.tmp

# RUN cp /usr/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda118.so /usr/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cpu.so

# Install extension requirements
RUN --mount=type=cache,target=/root/.cache/pip,rw \
    for ext in ${WORKSPACE}/extensions/*/requirements.txt; do \
        cd "$(dirname "$ext")"; \
        {\
            grep --regexp='https://github.com/\S*\.whl' < requirements.txt | awk '{sub(/https:\/\/github.com/,"http://nexus.nas.local/repository/github-blobs", $0); print $0 }'; \
            grep --regexp='https://github.com/\S*\.git' < requirements.txt | awk '{sub(/https:\/\/github.com/,"http://gitcache.nas.local/github.com", $0); print $0 }'; \
            grep -v -E 'https://github.com/\S*\.git|https://github.com/\S*\.whl' < requirements.txt; \
        } > /tmp/requirements.txt.tmp; \
        pip3 install -r /tmp/requirements.txt.tmp; \
        rm /tmp/requirements.txt.tmp; \
    done

WORKDIR ${WORKSPACE}

RUN pip3 uninstall -y flash-attn && \
    pip3 install http://nexus.nas.local/repository/github-blobs/Dao-AILab/flash-attention/releases/download/v2.3.0/flash_attn-2.3.0+cu117torch2.0cxx11abiFALSE-cp310-cp310-linux_x86_64.whl

COPY . /docker

ENV CLI_ARGS=""

ENTRYPOINT [ "/docker/entrypoint.sh" ]
CMD python3 server.py --listen ${CLI_ARGS}
