ARG BASE_IMAGE=registry.openanolis.cn/openanolis/anolisos:8
FROM $BASE_IMAGE
ARG DEBIAN_FRONTEND=noninteractive
ENV TZ=Asia/Shanghai
ENV CONDA_DIR /opt/conda
ENV PATH="${CONDA_DIR}/bin:${PATH}"
ENV arch=x86_64
SHELL ["/bin/bash", "-c"]
COPY docker/rcfiles /tmp/resources
COPY docker/jupyter_plugins /tmp/resources/jupyter_plugins

RUN yum install -y anolis-epao-release epel-release && \
    yum install -y nvidia-driver nvidia-driver-cuda gcc-c++ gcc which wget make
# RUN curl -L -O https://developer.download.nvidia.com/compute/cuda/11.6.2/local_installers/cuda_11.6.2_510.47.03_linux.run && sh cuda_11.6.2_510.47.03_linux.run --silent --toolkit
RUN cd /tmp && wget https://zreloj.oss-cn-hangzhou.aliyuncs.com/cuda_11.8.0_520.61.05_linux.run && sh /tmp/cuda_11.8.0_520.61.05_linux.run --silent --toolkit && rm /tmp/cuda_11.8.0_520.61.05_linux.run

RUN yum reinstall -y ca-certificates && \
    yum clean all && \
    yum install -y glibc-locale-source wget git strace gdb openmpi-devel curl \
    strace vim libSM tzdata langpacks-zh_CN \
    wqy-microhei-fonts libXext gcc gcc-c++ make cmake ninja-build git-lfs git 

RUN localedef -c -i zh_CN -f UTF-8 zh_CN.UTF-8 && \
    ln -fs /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \
    yum clean all

ENV LANG=zh_CN.UTF-8 LANGUAGE=zh_CN.UTF-8 LC_ALL=zh_CN.UTF-8

#install and config python
ARG PYTHON_VERSION=3.8.18
# Miniconda3-py37_23.1.0-1-Linux-x86_64.sh is last python3.7 version
RUN if [ "$PYTHON_VERSION" = "3.7.13" ] ; then \
    wget --quiet https://mirrors.aliyun.com/anaconda/miniconda/Miniconda3-py37_23.1.0-1-Linux-x86_64.sh -O ./miniconda.sh && \
    /bin/bash  miniconda.sh -b -p /opt/conda && \
    rm  -f miniconda.sh && \
    ln  -s /opt/conda/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \
    echo ". /opt/conda/etc/profile.d/conda.sh" >> ~/.bashrc && \
    cp /tmp/resources/conda.tuna  ~/.condarc && \
    source /root/.bashrc && \
    conda install --yes python==${PYTHON_VERSION} && \
    pip config set global.index-url https://mirrors.aliyun.com/pypi/simple && \
    pip config set install.trusted-host mirrors.aliyun.com;\
else \
    wget --quiet https://mirrors.aliyun.com/anaconda/miniconda/Miniconda3-latest-Linux-${arch}.sh -O ./miniconda.sh && \
    /bin/bash  miniconda.sh -b -p /opt/conda && \
    rm  -f miniconda.sh && \
    ln  -s /opt/conda/etc/profile.d/conda.sh /etc/profile.d/conda.sh && \
    echo ". /opt/conda/etc/profile.d/conda.sh" >> ~/.bashrc && \
    cp /tmp/resources/conda.tuna  ~/.condarc && \
    source /root/.bashrc && \
    conda install --yes python==${PYTHON_VERSION} && \
    pip config set global.index-url https://mirrors.aliyun.com/pypi/simple && \
    pip config set install.trusted-host mirrors.aliyun.com;\
fi

ARG USE_GPU=True

# install pytorch
ARG TORCH_VERSION=2.0.1+cu118
ARG CUDATOOLKIT_VERSION=cu118
RUN if [ "$USE_GPU" = "True" ] ; then \
        pip install --retries 10 --no-cache-dir https://zreloj.oss-cn-hangzhou.aliyuncs.com/torch-2.0.1%2Bcu118-cp38-cp38-linux_x86_64.whl torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/$CUDATOOLKIT_VERSION; \
    else \
        pip install --retries 10 --no-cache-dir torch==$TORCH_VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cpu; \
    fi

# install tensorflow
ARG TENSORFLOW_VERSION=1.15.5
RUN if [ "$USE_GPU" = "True" ] ; then \
        if [ "$TENSORFLOW_VERSION" = "1.15.5" ] ; then \
            pip install --no-cache-dir tensorflow==$TENSORFLOW_VERSION -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html; \
        else \
            pip install --no-cache-dir tensorflow==$TENSORFLOW_VERSION; \
         fi \
    else \
        # only python 3.7 has tensorflow 1.15.5
        if [ "$PYTHON_VERSION" = "3.7.13" ] ; then \
            pip install --no-cache-dir tensorflow==$TENSORFLOW_VERSION; \
        elif [ "$TENSORFLOW_VERSION" = "1.15.5" ] ; then \
            pip install --no-cache-dir numpy==1.18.5 https://modelscope.oss-cn-beijing.aliyuncs.com/releases/dependencies/tensorflow-1.15.5-cp38-cp38-linux_x86_64.whl; \
        else \
            pip install --no-cache-dir tensorflow==$TENSORFLOW_VERSION; \
        fi \
    fi

# mmcv-full<=1.7.0 for mmdet3d compatible
RUN if [ "$USE_GPU" = "True" ] ; then \
        CUDA_HOME=/usr/local/cuda TORCH_CUDA_ARCH_LIST="5.0 5.2 6.0 6.1 7.0 7.5 8.0 8.6" MMCV_WITH_OPS=1 MAX_JOBS=8 FORCE_CUDA=1 pip install --no-cache-dir 'mmcv-full<=1.7.0' && pip cache purge; \
    else \
        MMCV_WITH_OPS=1 MAX_JOBS=8 pip install --no-cache-dir 'mmcv-full<=1.7.0' && pip cache purge; \
    fi

# default shell bash
ENV SHELL=/bin/bash
# install special package
RUN if [ "$USE_GPU" = "True" ] ; then \
        pip install https://zreloj.oss-cn-hangzhou.aliyuncs.com/dgl-1.1.3%2Bcu118-cp38-cp38-manylinux1_x86_64.whl -f https://data.dgl.ai/wheels/$CUDATOOLKIT_VERSION/repo.html; \
    else \
        pip install --no-cache-dir dgl==0.9.0 dglgo -f https://data.dgl.ai/wheels/repo.html; \
    fi

# copy install scripts
COPY docker/scripts/install_unifold.sh   docker/scripts/install_apex.sh /tmp/

# for uniford
RUN if [ "$USE_GPU" = "True" ] ; then \
        bash /tmp/install_unifold.sh; \
    else \
     echo 'cpu unsupport uniford'; \
    fi

RUN if [ "$USE_GPU" = "True" ] ; then \
       export TORCH_CUDA_ARCH_LIST="6.0;6.1;7.0;7.5;8.0;8.6+PTX" && pip install --no-cache-dir https://cr-images-pub.oss-cn-hangzhou.aliyuncs.com/root/modelscope/pointnet2-0.0.0.zip; \
    else \
     echo 'cpu unsupport Pointnet2'; \
    fi

# 3d supports
COPY docker/scripts/install_colmap.sh /tmp/
RUN if [ "$USE_GPU" = "True" ] ; then \
        bash /tmp/install_colmap.sh; \
    else \
     echo 'cpu unsupport colmap'; \
    fi

COPY docker/scripts/install_tiny_cuda_nn.sh /tmp/
RUN if [ "$USE_GPU" = "True" ] ; then \
        bash /tmp/install_tiny_cuda_nn.sh \
    else \
     echo 'cpu unsupport tiny_cudann'; \
    fi

COPY docker/scripts/install_pytorch3d_nvdiffrast.sh /tmp/
RUN if [ "$USE_GPU" = "True" ] ; then \
        bash /tmp/install_pytorch3d_nvdiffrast.sh; \
    else \
     echo 'cpu unsupport pytorch3d nvdiffrast'; \
    fi
# end of 3D
# install apex after deepspeed
COPY docker/scripts/install_apex.sh /tmp/
RUN if [ "$USE_GPU" = "True" ] ; then \
        bash /tmp/install_apex.sh; \
    else \
     echo 'cpu unsupport apex'; \
    fi

# ======================================================================

RUN yum install -y iputils net-tools iproute && \
    yum clean all
# install modelscope
COPY requirements /var/modelscope
RUN pip install --no-cache-dir --upgrade pip && \
    pip install --no-cache-dir torch==$TORCH_VERSION -r /var/modelscope/framework.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
    pip install --no-cache-dir torch==$TORCH_VERSION -r /var/modelscope/audio.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
    pip install --no-cache-dir torch==$TORCH_VERSION -r /var/modelscope/cv.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
    pip install --no-cache-dir torch==$TORCH_VERSION -r /var/modelscope/multi-modal.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
    pip install --no-cache-dir torch==$TORCH_VERSION -r /var/modelscope/nlp.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
    pip install --no-cache-dir torch==$TORCH_VERSION -r /var/modelscope/science.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
    pip install --no-cache-dir torch==$TORCH_VERSION -r /var/modelscope/tests.txt -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html && \
    pip cache purge

# install  jupyter plugin
RUN mkdir -p /root/.local/share/jupyter/labextensions/ && \
    cp -r  /tmp/resources/jupyter_plugins/*  /root/.local/share/jupyter/labextensions/

COPY docker/scripts/modelscope_env_init.sh /usr/local/bin/ms_env_init.sh
# python3.8 pip install git+https://github.com/jin-s13/xtcocoapi.git@v1.13
# pip install git+https://github.com/gatagat/lap.git@v0.4.0
RUN pip install --no-cache-dir text2sql_lgesql==1.3.0 \
         https://cr-images-pub.oss-cn-hangzhou.aliyuncs.com/root/modelscope/xtcocotools-1.13.zip \
         https://cr-images-pub.oss-cn-hangzhou.aliyuncs.com/root/modelscope/lap-0.4.0.zip \
         detectron2==0.3 -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html --force --no-deps

RUN conda install -y mpi4py
RUN pip install --no-cache-dir torch==$TORCH_VERSION paint_ldm \
        'mmcls>=0.21.0' 'mmdet>=2.25.0' 'decord>=0.6.0' pai-easycv ms_swift \
        ipykernel fasttext fairseq deepspeed -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html

ARG USE_GPU
# for cpu install cpu version faiss, faiss depends on blas lib, we install libopenblas TODO rename gpu or cpu version faiss
RUN if [ "$USE_GPU" = "True" ] ; then \
        pip install --no-cache-dir funtextprocessing kwsbp==0.0.6 faiss==1.7.2 safetensors typeguard==2.13.3 scikit-learn librosa==0.9.2 funasr -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html; \
    else \
        pip install --no-cache-dir funtextprocessing kwsbp==0.0.6 https://modelscope.oss-cn-beijing.aliyuncs.com/releases/dependencies/faiss-1.7.2-py37-none-linux_x86_64.whl safetensors typeguard==2.13.3 scikit-learn librosa==0.9.2 funasr -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html; \
    fi

# curl -L https://github.com/wenet-e2e/wenet/tarball/master > wenet.tar.gz
RUN pip install --no-cache-dir https://cr-images-pub.oss-cn-hangzhou.aliyuncs.com/root/modelscope/wenet.tar.gz adaseq --no-deps
COPY examples /modelscope/examples

# # for pai-easycv setup compatiblity issue
ENV SETUPTOOLS_USE_DISTUTILS=stdlib

# curl -L https://github.com/facebookresearch/detectron2/tarball/master > detectron2.tar.gz
RUN if [ "$USE_GPU" = "True" ] ; then \
        CUDA_HOME=/usr/local/cuda TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0 7.5 8.0 8.6" pip install --no-cache-dir  'https://cr-images-pub.oss-cn-hangzhou.aliyuncs.com/root/modelscope/detectron2.tar.gz'; \
    else \
        echo 'cpu unsupport detectron2'; \
    fi

# torchmetrics==0.11.4 for ofa
RUN pip install --no-cache-dir jupyterlab torchmetrics==0.11.4 tiktoken transformers_stream_generator 'protobuf<=3.20.0' bitsandbytes basicsr
COPY docker/scripts/install_flash_attension.sh /tmp/install_flash_attension.sh
RUN if [ "$USE_GPU" = "True" ] ; then \
        bash /tmp/install_flash_attension.sh; \
    else \
        echo 'cpu unsupport flash attention'; \
    fi

RUN yum clean all

ENTRYPOINT ["/bin/bash"]
