#!/bin/bash
docker pull centos:8
docker images
docker run -d --name=centos --privileged centos:8 /usr/sbin/init
docker exec -it centos bash

# 备份
tar -czvf backup.tgz * --remove-files

# 下载源
centos8 √
wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-vault-8.5.2111.repo
curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-vault-8.5.2111.repo

centos6
wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-vault-6.10.repo
curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-vault-6.10.repo

centos7
wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo


# 清除生成缓存
yum clean all && yum makecache

# 安装 OpenJDK 8 和 SSH 服务
yum install -y java-1.8.0-openjdk-devel openssh-clients openssh-server

# 设置SSH 服务开机自启和启动
systemctl enable sshd && systemctl start sshd

# 停止容器并保存新镜像
docker stop centos
docker commit centos java_ssh


# Hadoop 单机容器
# 下载hadoop 3.2.3
https://dlcdn.apache.org/hadoop/common/hadoop-3.2.3/hadoop-3.2.3.tar.gz
# 创建容器 hadoop_single
docker run -d --name=hadoop_single --privileged java_ssh /usr/sbin/init
# 拷贝下载包
docker cp <你存放hadoop压缩包的路径> hadoop_single:/root/
# 进入容器 /root
cd /root
tar -zxvf hadoop-3.2.3.tar.gz
mv hadoop-3.2.3 /usr/local/hadoop
# 配置环境变量
echo "export HADOOP_HOME=/usr/local/hadoop" >> /etc/bashrc
echo "export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin" >> /etc/bashrc

# 配置了 hadoop 内置的环境变量
echo "export JAVA_HOME=/usr" >> $HADOOP_HOME/etc/hadoop/hadoop-env.sh
echo "export HADOOP_HOME=/usr/local/hadoop" >> $HADOOP_HOME/etc/hadoop/hadoop-env.sh

hadoop version

adduser hadoop
yum install -y passwd sudo
passwd hadoop
chown -R hadoop /usr/local/hadoop

ssh-keygen -t rsa
ssh-copy-id hadoop@172.17.0.2

cd $HADOOP_HOME/etc/hadoop

# core-site.xml
<property>
    <name>fs.defaultFS</name>
    <value>hdfs://<你的IP>:9000</value>
</property>

# hdfs-site.xml
<property>
    <name>dfs.replication</name>
    <value>1</value>
</property>


# 格式化文件结构
hdfs namenode -format


# 启动 HDFS
start-dfs.sh

# http://你的容器IP:9870





