# mkdir bigDataDocker && cd bigDataDocker

# 1. 安装包准备
cd base
#MySQL使用华为鲲鹏镜像站https://obs-mirror-ftp4.obs.cn-north-4.myhuaweicloud.com/database/mysql.5.7.28-rpm.tar
wget https://obs-mirror-ftp4.obs.cn-north-4.myhuaweicloud.com/database/mysql.5.7.28-rpm.tar \
    && tar -xvf mysql.5.7.28-rpm.tar \
    && mv aarch64/* ./ \
    && rm -rf aarch64
wget https://dlcdn.apache.org/dolphinscheduler/1.3.9/apache-dolphinscheduler-1.3.9-bin.tar.gz
wget http://archive.apache.org/dist/flume/1.9.0/apache-flume-1.9.0-bin.tar.gz
wget https://datax-opensource.oss-cn-hangzhou.aliyuncs.com/202209/datax.tar.gz
wget https://archive.apache.org/dist/hadoop/common/hadoop-3.1.3/hadoop-3.1.3.tar.gz
wget https://archive.apache.org/dist/kafka/2.4.1/kafka_2.11-2.4.1.tgz
# wget https://cdn.mysql.com/archives/mysql-5.7/mysql-5.7.28-1.el7.x86_64.rpm-bundle.tar
# wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh
wget https://dlcdn.apache.org/zookeeper/zookeeper-3.5.10/apache-zookeeper-3.5.10-bin.tar.gz
wget https://download.majingyi.com/software/apache-hive-3.1.2-bin.tar.gz --no-check-certificate
wget https://archive.apache.org/dist/hbase/2.0.5/hbase-2.0.5-bin.tar.gz
wget https://download.majingyi.com/software/apache-phoenix-5.0.0-HBase-2.0-bin.tar.gz
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.6.0.tar.gz
wget https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v6.6.0/elasticsearch-analysis-ik-6.6.0.zip
wget https://codeload.github.com/redis/redis/tar.gz/refs/tags/3.2.5

cd ../hadoop162
wget https://artifacts.elastic.co/downloads/kibana/kibana-8.4.3-linux-aarch64.tar.gz
wget https://archive.apache.org/dist/spark/spark-3.0.0/spark-3.0.0-bin-hadoop3.2.tgz
wget https://github.com/zendesk/maxwell/releases/download/v1.28.1/maxwell-1.28.1.tar.gz

# 创建镜像
 docker build -t bigdata_java8:"1.0.0" -f java8/Dockerfile_java8 ./java8
 docker build -t bigdata_base:"1.0.0" -f base/Dockerfile_base ./base
 docker build -t bigdata_162:"1.0.0" -f hadoop162/Dockerfile_162 ./hadoop162
 docker build -t bigdata_163:"1.0.0" -f hadoop163/Dockerfile_163 ./hadoop163
 docker build -t bigdata_164:"1.0.0" -f hadoop164/Dockerfile_164 ./hadoop164

# 启动容器
docker run -d --name hadoop162 -p 58162:22 bigdata_162:1.0.0
docker run -d --name hadoop163 -p 58163:22 bigdata_163:1.0.0
docker run -d --name hadoop164 -p 58164:22 bigdata_164:1.0.0

firewall-cmd --zone=public --add-port=58162-58164/tcp --permanent
firewall-cmd --reload

sudo sh -c "echo '172.17.0.2 hadoop162' >> /etc/hosts"
sudo sh -c "echo '172.17.0.3 hadoop163' >> /etc/hosts"
sudo sh -c "echo '172.17.0.4 hadoop164' >> /etc/hosts"

echo "开始设置虚拟机到容器的免密登录"
rm -rf ~/.ssh/known_hosts
for host in hadoop162 hadoop163 hadoop164 ; do
    sshpass -p linshimima888... ssh-copy-id  -o StrictHostKeyChecking=no atguigu@$host >/dev/null 2>&1
done
for host in hadoop162 hadoop163 hadoop164 ; do
    sshpass -p linshimima888... ssh-copy-id  -o StrictHostKeyChecking=no root@$host >/dev/null 2>&1
done
echo "完成设置虚拟机到容器的免密登录"

echo "第一次启动容器, 需要对容器做一些初始化操作, 请耐心等待..."
ssh atguigu@hadoop162 "~/bin/ssh_auto_copy.sh"
echo "初始化完成!!!"
echo "修改默认时区为东八区"
ssh atguigu@hadoop162 "~/bin/xcall sudo cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime"

#docker create -it \
#  --privileged \
#  --name hadoop162 \
#  --hostname hadoop162 \
#  --add-host=hadoop"${hosts[0]}":${pre_ip}."${hosts[0]}" \
#  --add-host=hadoop"${hosts[1]}":${pre_ip}."${hosts[1]}" \
#  --add-host=hadoop"${hosts[2]}":${pre_ip}."${hosts[2]}" \
#  bigdata_162:1.0.0 \
#  /usr/sbin/init