#!/bin/bash

# 生成一个所有主机相同的 kafka 配置文件
conf_kafka() {

  echo_info "conf kafka"
  Xstrings=','
  k=0
  local value_arr=($HRS_ZOOKEEPER_HOSTS)
  for i in ${value_arr[@]}; do
    k=$(expr $k + 1)
    if [ $k -eq 1 ]; then 
      AA="$i:$HRS_ZOOKEEPER_clientPort"
    else
      AA="${AA}${Xstrings}${i}:$HRS_ZOOKEEPER_clientPort"
    fi
  done
  cat >/tmp/server.properties <<EOF
# The id of the broker. This must be set to a unique integer for each broker.
broker.id=0

delete.topic.enable=true
auto.create.topics.enable=false
message.max.bytes=3145728

# The number of threads handling network requests
num.network.threads=3
# The number of threads doing disk I/O
num.io.threads=8
# The send buffer (SO_SNDBUF) used by the socket server
socket.send.buffer.bytes=102400
# The receive buffer (SO_RCVBUF) used by the socket server
socket.receive.buffer.bytes=102400
# The maximum size of a request that the socket server will accept (protection against OOM)
socket.request.max.bytes=104857600
# The interval at which log segments are checked to see if they can be deleted according
# to the retention policies
log.retention.check.interval.ms=300000
# Timeout in ms for connecting to zookeeper
zookeeper.connection.timeout.ms=6000
# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
# This value is recommended to be increased for installations with data dirs located in RAID array.
num.recovery.threads.per.data.dir=1
# The default number of log partitions per topic.
num.partitions=1
# The minimum age of a log file to be eligible for deletion
log.retention.hours=168
# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
# segments don't drop below log.retention.bytes.
log.retention.bytes=307374182400
# The maximum size of a log segment file. When this size is reached a new log segment will be created.
log.segment.bytes=1073741824

# *****  CHANGED START *****

listeners=PLAINTEXT://HRSLISTENERS:$HRS_KAFKA_listenersPort
log.dirs=$HRS_KAFKA_LOGSDIR
zookeeper.connect=${AA}
auto.create.topics.enable=false

# *****  CHANGED END  *****
  
EOF

}

# 参数：start / stop
kafka_ctl()
{
  echo ""
  echo_info "Kafka $1 ... ..."
  if [ "$1" == "start" ]; then echo_info "log : $HRS_KAFKA_LOGSDIR/hrskfk.log"; fi
  local value_arr=($HRS_KAFKA_HOSTS)
  for i in ${value_arr[@]}; do
    echo_info "deal on ${i} ... ..."
    if [ "$1" == "start" ]; then
      ssh -o StrictHostKeyChecking=no $i "nohup $HRS_KAFKA_HOME/bin/kafka-server-start.sh $HRS_KAFKA_HOME/config/server.properties > $HRS_KAFKA_LOGSDIR/hrskfk.log 2>&1 &"
    else
      ssh -o StrictHostKeyChecking=no $i "if [ -f \"$HRS_KAFKA_HOME/bin/kafka-server-stop.sh\" ]; then $HRS_KAFKA_HOME/bin/kafka-server-stop.sh; fi"
    fi
  done

  # 检查进程是否已经启动
  for i in ${value_arr[@]}; do
    if [ "$1" == "start" ]; then
      waiting_proc_status $HRS_KAFKA_listenersPort 0 "$i start" $i
    else
      waiting_proc_status $HRS_KAFKA_listenersPort 1 "$i stop" $i
    fi
  done
  sleep 1
  echo_info "Kafka $1 done."
}

# 安装 kafka
setup_kafka()
{
  echo_start "$1 ... ..."
  confirm_operate "install kafka"
  if [ $? -eq 0 ]; then
    if [ "x$HRS_KAFKA_HOSTS" = "x" ]; then
      echo_warn "The variable HRS_KAFKA_HOSTS does not assign a value, Unable to install! "
      echo_passed "$1 SKIP"
      return 0
    fi
    if [ ! -f $XDIR/conf/kfk/kafka-server-stop.sh ] || [ ! -f $XDIR/conf/kfk/kafka-run-class.sh ]; then
      die "missing hyren kafka shell."
    fi
    # 停止kafka
    kafka_ctl "stop"
    sleep 3
    conf_kafka
    local kfk_brokerid=0  
    local value_arr=($HRS_KAFKA_HOSTS)
    for i in ${value_arr[@]}; do
      kfk_brokerid=$(expr $kfk_brokerid + 1 )
      scp  $XDIR/packages/kafka_2.11-0.10.2.1.tgz root@${i}:/tmp/ >> $SETUP_LOGFILE || { die "scp kfk to [${i}] fail."; }
      ssh -o StrictHostKeyChecking=no $i "rm -rf $HRS_KAFKA_LOGSDIR && rm -rf $HRS_KAFKA_HOME && mkdir -p $HRS_KAFKA_HOME && mkdir -p $HRS_KAFKA_LOGSDIR && tar -zxf /tmp/kafka_2.11-0.10.2.1.tgz -C $HRS_KAFKA_HOME && mv $HRS_KAFKA_HOME/kafka_2.11-0.10.2.1/* $HRS_KAFKA_HOME && rm -rf $HRS_KAFKA_HOME/kafka_2.11-0.10.2.1" || { die "set kfk env on [${i}] fail."; }
      sleep 1
      scp  /tmp/server.properties root@${i}:$HRS_KAFKA_HOME/config/ >> $SETUP_LOGFILE || { die "scp property file to [${i}] fail."; }
      ssh -o StrictHostKeyChecking=no $i "sed -i \"s/broker.id=0/broker.id=$kfk_brokerid/\" $HRS_KAFKA_HOME/config/server.properties" || { die "modify bokid on [${i}] fail."; }
      # 如果主机名有特殊字符，则下面的替换需要斜线改成百分号
      ssh -o StrictHostKeyChecking=no $i "sed -i \"s/HRSLISTENERS/$i/\" $HRS_KAFKA_HOME/config/server.properties" || { die "modify listener on [${i}] fail."; }
      scp $XDIR/conf/kfk/*.sh root@${i}:$HRS_KAFKA_HOME/bin/ >> $SETUP_LOGFILE || { die "scp kfksh to [${i}] fail."; }
      echo "scp: $XDIR/conf/kfk/*.sh root@${i}:$HRS_KAFKA_HOME/bin/" >> $SETUP_LOGFILE

      # 设置环境变量
      #ssh -o StrictHostKeyChecking=no $i 'bash -s' < $XDIR/util/func_rmot_setprofile.sh "HRS\ KAFKA\ ENV" "HRSCOMMENT" "$HYREN_USER"
      ssh -o StrictHostKeyChecking=no $i 'bash -s' < $XDIR/util/func_rmot_setprofile.sh "HRS_KAFKA_HOME" "$HRS_KAFKA_HOME" "$HYREN_USER"
      ssh -o StrictHostKeyChecking=no $i 'bash -s' < $XDIR/util/func_rmot_setprofile.sh "HRS_KAFKA_LOGSDIR" "$HRS_KAFKA_LOGSDIR" "$HYREN_USER"
      ssh -o StrictHostKeyChecking=no $i 'bash -s' < $XDIR/util/func_rmot_setprofile.sh "HRS_KAFKA_listenersPort" "$HRS_KAFKA_listenersPort" "$HYREN_USER"

      echo_info "setup [$i] done."
    done
    echo_success "$1"
    sleep 3
    kafka_ctl "start"
    echo_success "start kafka"
    echo_info "****************************************"
    echo_info "KFKRUN HOME       dir  : $HRS_KAFKA_HOME"
    echo_info "KFKRUN LOGS       dir  : $HRS_KAFKA_LOGSDIR"
    echo_info "KFKRUN START     file  : $HRS_KAFKA_LOGSDIR/hrskfk.log"
    echo_info "KFKRUN LISTENERS PORT  : $HRS_KAFKA_listenersPort"
    echo_info "****************************************"

    echo_passed "$1 done."
  else
    echo_passed "$1 SKIP"
  fi
}
