#!/usr/bin/env bash
#
# author:yang
# script name:sub_install_kakfka.sh
# description:用于安装 kafka服务，如果是安装集群，可以配合parent_zk_kafka_install.sh使用，单台安装也可以直接执行此脚本。
source /etc/profile

DOWNLOAD_URL=http://mirror.bit.edu.cn/apache/kafka/2.2.1/kafka_2.12-2.2.1.tgz
DOWNLOAD_DIR=/usr/local/src
INSTALL_DIR=/usr/local
KAFKA_FILE=$(echo $DOWNLOAD_URL|awk -F'/' '{print $NF}')
KAFKA_DIR=$(echo ${KAFKA_FILE}|awk -F'.tgz' '{print $1}') 
LINK=/usr/local/kafka
INSTALL_USER=root
RUN_USER=kafka


CURRENT_USER=$(whoami)
LOG_DIR=/tmp/install_kafka_${CURRENT_TIME}.log

function mk_data_dir() {
    data_dirs=($(echo $1|tr ' ,' ' '))

    for data_dir in ${data_dirs};do
        [ -d ${data_dir} ]||mkdir -p ${data_dir}||{ echo "$(date +"%F %H:%M:%S") [ERROR] 创建${data_dir}失败，请检查.";exit 30; }
        [ $? -eq 0 ]&& chown -R ${RUN_USER}:${RUN_USER} ${data_dir}
    done;
}

function kafka_install() {

    # 判断下载目录是否存在，如果不存在则创建.

    [ -f  ${DOWNLOAD_DIR} ] && { echo $(date +"%F %H:%M:%S") [ERROR] ${DOWNLOAD_DIR}是一个文件，请重新指定下载目录.>> ${LOG_DIR};exit 10; }
    [ -d ${DOWNLOAD_DIR} ]|| mkdir ${DOWNLOAD_DIR}
    [ $? -eq 0 ]|| { echo $(date +"%F %H:%M:%S") [ERROR] ${DOWNLOAD_DIR}创建失败，请检查权限配置.>> ${LOG_DIR};exit 11; }

    # 下载压缩包
    # 1.检查是否存在压缩包,存在则重命名。
    [ -f ${DOWNLOAD_DIR}/${KAFKA_FILE} ]&& mv ${DOWNLOAD_DIR}/${KAFKA_FILE} ${DOWNLOAD_DIR}/${KAFKA_FILE}_$(date +"%F_%H:%M:%S")
    cd ${DOWNLOAD_DIR} && curl -o ${KAFKA_FILE} ${DOWNLOAD_URL}
    # 2.解压压缩包.
    tar xf ${KAFKA_FILE} -C ${INSTALL_DIR}
    [ $? -eq 0 ]||{ echo $(date +"%F %H:%M:%S") [ERROR]  ${KAFKA_FILE}下载失败. >> ${LOG_DIR};exit 14;}

    # 判断/usr/local目录下是否存在 ${LINK}目录或者文件
    [ -d ${LINK} -o -f ${LINK} ] && { echo $(date +"%F %H:%M:%S") [ERROR] ${LINK}是一个目录或者文件，请处理后重新安装 >> ${LOG_DIR};exit 12; }

    # 创建软连接
    ln -sv ${INSTALL_DIR}/${KAFKA_DIR} ${LINK}
    [ $? -eq 0 ]||{ echo $(date +"%F %H:%M:%S") [ERROR] ${LINK}软链接创失败. >>  ${LOG_DIR};exit 13; }


    # 修改安装目录为${RUN_USER}用户
    chown -R ${RUN_USER}:${RUN_USER} ${LINK}/
   
    echo "export PATH=\$PATH:${LINK}/bin" > /etc/profile.d/kafka.sh
    source /etc/profile
    echo "kafka服务安装完成，若环境变未生效，请执行'srouce /etc/profile命令.'"
}

function kafka_config() {
    local date_time=$(date +%F-%H:%M:%S)
    local conf_file=${LINK}/config/server.properties
    # local host_ip=$(ifconfig|grep 'inet'|grep -v "127.0.0"|awk '{print $2}')
    local ip=$1
    local broker_id=$2
    local zk=$3
    local log_dirs=$4
    # 备份配置文件
    
    cp ${conf_file} ${conf_file}_${date_time}
    echo -n "即将开始生成配置文件";
    # 设置 broker.id
    sed -i "s@^broker.id=.*@broker.id=${broker_id}@g" ${conf_file}
    mk_data_dir $4
    # 修改数据纯粹目录.
    sed -i "s@log.dirs=.*@log.dirs=${log_dirs}@g" ${conf_file};
    # 设置网络线程数
    param=`cat /proc/cpuinfo | grep "cpu cores"| uniq`
    treads="num.network.threads="${param:0-1:1}
    sed -i "s@num.network.threads=.*@${treads}@g" ${conf_file}
    # 设置 io 线程数
    treads="num.io.threads="$((${param:0-1}+${param:0-1}))
    sed -i "s@num.io.threads=.*@${treads}@g" ${conf_file}
    # 设置默认副本数量
    sed -i "s@offsets.topic.replication.factor=.*@offsets.topic.replication.factor=3@g" ${conf_file}
    sed -i "s@transaction.state.log.replication.factor=.*@transaction.state.log.replication.factor=3@g" ${conf_file}
    sed -i "s@transaction.state.log.min.isr=1@transaction.state.log.min.isr=2@g" ${conf_file}
    # 设置 socket 允许的最大缓存。
    sed -i "s@socket.request.max.bytes=.*@socket.request.max.bytes=209715200@g" ${conf_file}
    # 设置 zk 链接
    sed -i "s@zookeeper.connect=.*@zookeeper.connect=${zk}@g" ${conf_file}
    # 设置 zk 超时时间
    sed -i "s@zookeeper.connection.timeout.ms=.*@zookeeper.connection.timeout.ms=100000@g" ${conf_file}
    # 设置 监听地址
    sed -i "s@#listeners=.*@listeners=PLAINTEXT://${ip}:9092,CONTROLLER://${ip}:9093@g" ${conf_file}

    # 设置解析模式
    sed -i "s@listener.security.protocol.map=.*@listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL,CONTROLLER:PLAINTEXT@g" ${conf_file}
    # 设置控制器监听地址
    sed -i "s@control.plane.listener.name=.*@control.plane.listener.name=CONTROLLER://${ip}:9093@g" ${conf_file}
    
    # 组协调器延时执行reblance 时间间隔
    sed -i "s@group.initial.rebalance.delay.ms=.*@group.initial.rebalance.delay.ms=3000@g" ${conf_file}
    # 配置自动创建 topic,此处应该慎重，由于是测试环境，开启。
    echo "" >> ${conf_file}
    echo "# Enable auto creation of topic on the server" >> ${conf_file}
    echo 'auto.create.topics.enable = true' >> ${conf_file}
    echo '' >> ${conf_file}
    echo "# Enables delete topic. Delete topic through the admin tool will have no effect if this config is turned off" >>  ${conf_file}
    echo 'delete.topic.enable=true' >>  ${conf_file}
    echo '' >> ${conf_file}
    echo '# The largest record batch size allowed by Kafka. If this is increased and there are consumers older than 0.10.2, the consumers fetch size must also be increased so that the they can fetch record batches this large.' >> ${conf_file}
    echo '# In the latest message format version, records are always grouped into batches for efficiency. In previous message format versions, uncompressed records are not grouped into batches and this limit only applies to a single record in that case.' >> ${conf_file}  
    echo '# This can be set per topic with the topic level max.message.bytes config.' >> ${conf_file}
    # 设置最大可接受消息为30M。
    echo 'message.max.bytes=31457280' >> ${conf_file}
}

# 判断执行安装用户是否为root用户,如果执行失败，将错误日志写入安装日志中。

[ "${CURRENT_USER}" != "${INSTALL_USER}" ] && { echo $(date +"%F %H:%M:%S") [ERROR] please run user as root! >> ${LOG_DIR};exit 1; }

# 判断运行用户是否存在，如果不存在则创建
id ${RUN_USER}  &> /dev/null || useradd ${RUN_USER}

input_ip=$1
broker_id=$2
zk_conn=$3
log_dirs=$4

host_ip=$(ifconfig|grep 'inet'|grep -v '127.0.0.*'|awk '{print $2}')

if [ $input_ip != $host_ip ];then
    echo "$(date +"%F %H:%M:%S") [ERROR] 输入IP:${input_ip}与本机IP:${host_ip}不一致,将直接使用${host_ip}." >> ${LOG_DIR}
fi;

kafka_install;
kafka_config  ${input_ip} ${broker_id} ${zk_conn} ${log_dirs};
