#!/bin/bash

set -e
HADOOP_CONF_DIR=/usr/local/hadoop-3.3.6/etc/hadoop
HBASE_CONF_DIR=/usr/local/hbase-2.5.10/conf

# Allow the container to be started with `--user`
if [[ "$1" = 'zkServer.sh' && "$(id -u)" = '0' ]]; then
    chown -R zookeeper "$ZOO_DATA_DIR" "$ZOO_DATA_LOG_DIR" "$ZOO_LOG_DIR"
    exec gosu zookeeper "$0" "$@"
fi

# Generate the config only if it doesn't exist
if [[ ! -f "$ZOO_CONF_DIR/zoo.cfg" ]]; then
    CONFIG="$ZOO_CONF_DIR/zoo.cfg"
    {
        echo "dataDir=$ZOO_DATA_DIR"
        echo "dataLogDir=$ZOO_DATA_LOG_DIR"

        echo "tickTime=$ZOO_TICK_TIME"
        echo "initLimit=$ZOO_INIT_LIMIT"
        echo "syncLimit=$ZOO_SYNC_LIMIT"

        echo "autopurge.snapRetainCount=$ZOO_AUTOPURGE_SNAPRETAINCOUNT"
        echo "autopurge.purgeInterval=$ZOO_AUTOPURGE_PURGEINTERVAL"
        echo "maxClientCnxns=$ZOO_MAX_CLIENT_CNXNS"
        echo "standaloneEnabled=$ZOO_STANDALONE_ENABLED"
        echo "admin.enableServer=$ZOO_ADMINSERVER_ENABLED"
    } >> "$CONFIG"
    if [[ -z $ZOO_SERVERS ]]; then
      ZOO_SERVERS="server.1=localhost:2888:3888;2181"
    fi

    for server in $ZOO_SERVERS; do
        echo "$server" >> "$CONFIG"
    done

    if [[ -n $ZOO_4LW_COMMANDS_WHITELIST ]]; then
        echo "4lw.commands.whitelist=$ZOO_4LW_COMMANDS_WHITELIST" >> "$CONFIG"
    fi

    for cfg_extra_entry in $ZOO_CFG_EXTRA; do
        echo "$cfg_extra_entry" >> "$CONFIG"
    done
fi

# Write myid only if it doesn't exist
if [[ ! -f "$ZOO_DATA_DIR/myid" ]]; then
    echo "${ZOO_MY_ID:-1}" > "$ZOO_DATA_DIR/myid"
fi



#配置hadoop-env.sh
echo 'export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64 
export HADOOP_HOME=/usr/local/hadoop-3.3.6 
export HADOOP_SSH_OPTS="-o BatchMode=yes -o StrictHostKeyChecking=no -o ConnectTimeout=10s" 
export HADOOP_PID_DIR=${HADOOP_HOME}/pids' >> $HADOOP_CONF_DIR/hadoop-env.sh

#配置core-site.xml
core_content='<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
 <name>fs.defaultFS</name>
<value>hdfs://master:9000</value>
 </property>
<property>
 <name>hadoop.tmp.dir</name>
 <value>/usr/local/hadoop-3.3.6/hdfsdata</value>
 </property>
 <property> 
 <name>hadoop.http.staticuser.user</name>
<value>chunyu</value>
</property>
</configuration>
'

# 将内容写入core-site.xml文件
echo "$core_content" > ${HADOOP_CONF_DIR}/core-site.xml

#配置hdfs-site.xml
hdfs_content='<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
 <name>dfs.replication</name>
<value>2</value>
</property> 
<property>
<name>dfs.permissions.enabled</name>
<value>false</value>
</property> 
<property>
<name>dfs.namenode.http-address</name>
<value>master:9870</value>
</property>
</configuration>
'
# 将内容写入hdfs-site.xml文件
echo "$hdfs_content" > ${HADOOP_CONF_DIR}/hdfs-site.xml

#配置mapred-site.xml
mapred_content='<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.application.classpath</name>
<value>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*:$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*</value>
</property>
</configuration>
'
# 将内容写入mapred-site.xml文件
echo "$mapred_content" > ${HADOOP_CONF_DIR}/mapred-site.xml

#配置yarn-site.xml
yarn_content='<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>master</value>
</property> 
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.env-whitelist</name>
<value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_HOME,PATH,LANG,TZ,HADOOP_MAPRED_HOME</value>
</property>
</configuration>
'
# 将内容写入yarn-site.xml文件
echo "$yarn_content" > ${HADOOP_CONF_DIR}/yarn-site.xml

#配置workers
echo 'master 
worker1 
worker2 ' > ${HADOOP_CONF_DIR}/workers

#配置hbase-env.sh
hbase_env_content='
export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64 
export HBASE_HOME=/usr/local/hbase-2.5.10 
export HBASE_PID_DIR=/usr/local/hbase-2.5.10/pids  
export HBASE_MANAGES_ZK=false
'
# 将内容写入hbase-env.sh文件
echo "$hbase_env_content" >> ${HBASE_CONF_DIR}/hbase-env.sh

#配置hbase-site.xml
hbase_content='<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
 <property>
 <name>hbase.rootdir</name>
 <value>hdfs://master:9000/hbase</value>
 </property>
  <property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
 <property>
 <name>hbase.zookeeper.quorum</name>
<value>master:2181,worker1:2181,worker2:2181</value>
 </property>
<property>
<name>hbase.zookeeper.property.dataDir</name>
<value>/data</value>
</property>    
<property>
<name>hbase.unsafe.stream.capability.enforce</name>
<value>false</value>
</property>
<property>
<name>hbase.wal.provider</name>
<value>filesystem</value>
</property>
</configuration>
'
# 将内容写入hbase-site.xml文件
echo "$hbase_content" > ${HBASE_CONF_DIR}/hbase-site.xml

#配置regionservers
echo 'worker1 
worker2 ' > ${HBASE_CONF_DIR}/regionservers

#配置hosts
sudo sh -c " echo 'master 192.168.18.130
worker1 192.168.18.131
worker2 192.168.18.132' >> /etc/hosts"

exec "$@"