### Builder Container
FROM cvisionai/wget:latest as wget

### 下载spark-3.2.3-bin-hadoop3.2安装包
RUN cd / && \
    wget --no-check-certificate https://dlcdn.apache.org/spark/spark-3.2.3/spark-3.2.3-bin-hadoop3.2.tgz

### Final Container
FROM openjdk:8-jre-slim

LABEL maintainer="weishujie <weishujie163@gmail.com>"

ENV BASE_IMAGE  openjdk:8-jre-slim

RUN set -ex && \
    sed -i "s@http://deb.debian.org@https://mirrors.aliyun.com@g" /etc/apt/sources.list && \
    apt-get update && \
    ln -s /lib /lib64 && \
    export DEBIAN_FRONTEND=noninteractive && \
    apt install -y bash tini libc6 libpam-modules krb5-user libnss3 wget bzip2 && \
    rm /bin/sh && \
    ln -sv /bin/bash /bin/sh && \
    echo "auth required pam_wheel.so use_uid" >> /etc/pam.d/su && \
    chgrp root /etc/passwd && chmod ug+rw /etc/passwd && \
    rm -rf /var/cache/apt/*

ENV SPARK_VERSION   3.2.3
ENV HADOOP_VERSION  hadoop3.2
ENV SCALA_VERSION   2.12

ENV SPARK_HOME      /opt/spark
ENV SPARK_CONF_DIR  $SPARK_HOME/conf
ENV SPARK_CLASSPATH $SPARK_HOME/cluster-conf

ENV PYTHONHASHSEED  0
ENV CONDA_DIR       /opt/conda
ENV SHELL           /bin/bash

ENV PATH            $PATH:$SPARK_HOME/bin:$CONDA_DIR/bin

ARG MINICONDA_VERSION=22.11.1-1
ARG MINICONDA_MD5=473e5ecc8e078e9ef89355fbca21f8eefa5f9081544befca99867c7beac3150d
ARG PYTHON_VERSION=3.7.15

ARG spark_uid=185

### 安装 spark
### 从build镜像中拷贝下载的spark安装包
COPY --from=wget /spark-${SPARK_VERSION}-bin-${HADOOP_VERSION}.tgz /
RUN tar -zxvf /spark-${SPARK_VERSION}-bin-${HADOOP_VERSION}.tgz -C /opt/ && \
    ln -s /opt/spark-${SPARK_VERSION}-bin-${HADOOP_VERSION} $SPARK_HOME && \
    rm -f /spark-${SPARK_VERSION}-bin-${HADOOP_VERSION}.tgz && \
    mkdir -p $SPARK_HOME/work-dir && \
    mkdir -p $SPARK_HOME/spark-warehouse && \
    mkdir -p $SPARK_HOME/cluster-conf \

# install Conda (https://github.com/jupyter/docker-stacks/blob/6d42503c684f3de9b17ce92a6b0c952ef2d1ecd8/base-notebook/Dockerfile#L78-L101)
RUN mkdir -p $CONDA_DIR && \
    cd /tmp && \
    # https://repo.anaconda.com/miniconda/Miniconda3-py38_22.11.1-1-Linux-x86_64.sh
    wget --quiet https://repo.anaconda.com/miniconda/Miniconda3-py37_${MINICONDA_VERSION}-Linux-x86_64.sh && \
    /bin/bash Miniconda3-py37_${MINICONDA_VERSION}-Linux-x86_64.sh -f -b -p $CONDA_DIR && \
    rm Miniconda3-py37_${MINICONDA_VERSION}-Linux-x86_64.sh && \
    echo "conda ${MINICONDA_VERSION}" >> $CONDA_DIR/conda-meta/pinned && \
    conda config --add channels http://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/conda-forge && \
    conda config --add channels http://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main/ && \
    conda config --add channels http://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/ && \
#    conda config --add channels http://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/msys2/ && \
#    conda config --system --prepend channels conda-forge && \
    conda config --system --set auto_update_conda false && \
    conda config --set show_channel_urls true && \
#    conda config --system --set channel_priority strict && \
    if [ ! $PYTHON_VERSION = 'default' ]; then conda install --yes python=$PYTHON_VERSION; fi && \
    conda list python | grep '^python ' | tr -s ' ' | cut -d '.' -f 1,2 | sed 's/$/.*/' >> $CONDA_DIR/conda-meta/pinned && \
    conda install --yes conda && \
    conda install --yes pip && \
    conda install --yes numpy scipy pandas scikit-learn && \
    conda install --yes pyarrow && \
    conda update --all --yes && \
    conda clean --all -f -y

COPY conf/* $SPARK_CONF_DIR/
# $SPARK_HOME/conf gets cleaned by Spark on Kubernetes internals, create and add to classpath another directory for logging and other configs
COPY conf/* $SPARK_HOME/cluster-conf/
COPY entrypoint.sh /opt/
COPY Dockerfile /my_docker/

WORKDIR $SPARK_HOME/work-dir
RUN chmod g+w /opt/spark/work-dir

ENTRYPOINT [ "/opt/entrypoint.sh" ]

# Specify the User that the actual main process will run as
USER ${spark_uid}
