#!/bin/bash
set -e
shopt -s expand_aliases

#
# 为加速日常的ssh登陆, mongo shell登陆,拷贝文件, 查看状态等工作, 特实现此脚本
# 详细使用说明见 https://iwiki.woa.com/pages/viewpage.action?pageId=368432570
# 
# 缺失的region 和master, 拼音简写 间的映射,请自行补充
#
# 此脚本正常工作需要oss 各组件间的相互配合,特别是etcd, ES, 非常依赖于它们
#
# By sirchen
#

# TODO
# 1. 获取给定region下的所有集群名
# 打印 balancer 状态； ssh 登陆时排除无关log以及打印系统oom log
# 支持log rotate
# print table's stats, config.databases,config.chunks, chunk move and so on
# 一键给集群所有mongod 部上监控,并提示要在prometheus 中配置什么信息
# worker 内存占用
# 给整个集群部署exporter,及给它设置 memory quota
# 如何找一个region 下最大容量的(分片)集群/CRUD 最大的(分片)集群/内存使用最大的(分片)集群等
# 如何获得所有mongos的流量以判断是否有热点

# 关联数组:https://blog.csdn.net/uestcyms/article/details/97511768
# region -> etcd on coop
# 将金融区也纳入了这个map中,毕竟合作区与金融区是通的且金融区region很少
# 这应该视为region -> etcd的映射,从etcd中去取primary master
# 如果发生裁撤变动,注意要修正地址 (http://tapd.oa.com/cmongo/markdown_wikis/show/#1210115521000049315@toc1)
declare -A coop_region_etcd_map=([gz]=9.139.10.218 [gz_test]=100.67.37.199 [sh]=9.143.240.70 [bj]=10.53.193.90 [sh_test]=10.247.10.101
        [hk]=9.131.202.147 [nm]=10.116.104.10 [cd]=100.99.37.135 [cq]=100.98.41.4 [szjr]=100.83.160.133
        [dg]=100.120.15.221 [mx]=100.102.6.205 [yd]=9.16.82.10 [xjp]=100.78.31.232 [tg]=9.17.142.18
        [els]=9.28.79.25 [yt]=9.32.206.84 [sz]=9.122.168.211 [tj]=10.53.169.4 [nj]=9.109.41.26
        [tw]=9.125.82.10 [shjr]=10.48.22.207 [md]=9.12.143.41 [bm]=9.6.88.241 [hg]=10.165.143.145
        [szjr]=100.83.160.133 [shjr]=10.48.22.207 [sh_tcb]=100.105.53.112 [gz_tcb]=9.228.245.229)

# sh_k的密码user_00可能不对,需root登陆
declare -A idc_region_etcd_map=([hk_internal]=9.26.125.139 [sh_ckv]=10.62.180.123 [sz_ckv]=100.94.134.150 [tj_arch]=9.92.167.143
        [tj_ckv]=9.92.237.123 [sz_arch]=10.175.133.166 [sh_arch]=10.246.146.240 [sz_music]=100.65.128.127
        [hk_international]=9.74.164.92 [xian_test]=10.198.11.83 [sh_k]=9.97.121.247)

# 由于用集群名取得的region 是中文,所以这里加上中文到拼音的映射
# 这里将 合作区与IDC区的映射放到一起了,可能不太好
declare -A region_map=([广州]=gz [广州测试专区]=gz_test [上海]=sh [北京]=bj [上海测试]=sh_test
        [香港]=hk [南美]=nm [成都]=cd [重庆]=cq [深圳金融]=szjr
        [德国]=dg [美西]=mx [印度]=yd [新加坡]=xjp [泰国]=tg
        [俄罗斯]=els [亚太地区]=yt [深圳]=sz [天津]=tj [南京]=nj
        [台湾]=tw [上海金融]=shjr [美东]=md [北美]=bm [韩国]=hg
        [上海_ckv冷数据]=sh_ckv [深圳_ckv冷数据(SNG)]=sz_ckv [天津_ckv冷数据]=tj_ckv [香港内部云]=hk_internal [深圳基架]=sz_arch
        [上海基架]=sh_arch [天津基架]=tj_arch [深圳音乐]=sz_music [香港国际版]=hk_international [上海K歌]=sh_k
        [西安测试]=xian_test [上海TCB]=sh_tcb [广州TCB]=gz_tcb)

declare -A region_English_2_Chinese_map=([gz]=广州 [gz_test]=广州测试专区 [sh]=上海 [bj]=北京 [sh_test]=上海测试专区
        [hk]=香港 [nm]=南美 [cd]=成都 [cq]=重庆 [szjr]=深圳金融
        [dg]=德国 [mx]=美西 [yd]=印度 [xjp]=新加坡 [tg]=泰国
        [els]=俄罗斯 [yt]=亚太地区 [sz]=深圳 [tj]=天津 [nj]=南京
        [tw]=台湾 [shjr]=上海金融 [md]=美东 [bm]=北美 [hg]=韩国
        [sh_ckv]=上海_ckv冷数据 [sz_ckv]=深圳_ckv冷数据\(SNG\) [tj_ckv]=天津_ckv冷数据 [hk_internal]=香港内部云 [sz_arch]=sz_深圳基架
        [sh_arch]=上海基架 [tj_arch]=天津基架 [sz_music]=深圳音乐 [hk_international]=香港国际版 [sh_k]=上海K歌
        [xian_test]=西安测试 [sh_tcb]=上海TCB [gz_tcb]=广州TCB [bjjr]=北京金融)

# 合作区的元数据至今能访问到海外的集群，然而和海外的集群比如在实例个数上不一致，所以访问海外实例还是进海外那一页面
declare -A coop_region_English_2_Chinese_map=([gz]=广州 [gz_test]=广州测试专区 [sh]=上海 [bj]=北京 [sh_test]=上海测试专区
        [hk]=香港 [nm]=南美 [cd]=成都 [cq]=重庆
        [dg]=德国 [mx]=美西 [yd]=印度 [xjp]=新加坡 [tg]=泰国
        [els]=俄罗斯 [yt]=亚太地区 [sz]=深圳 [tj]=天津 [nj]=南京
        [tw]=台湾 [md]=美东 [bm]=北美 [hg]=韩国
        [xian_test]=西安测试 [sh_tcb]=上海TCB [gz_tcb]=广州TCB)

declare -A overseas_region_English_2_Chinese_map=(
        [hk]=香港 [nm]=南美
        [dg]=德国 [mx]=美西 [yd]=印度 [xjp]=新加坡 [tg]=泰国
        [els]=俄罗斯 [yt]=亚太地区
        [tw]=台湾 [md]=美东 [bm]=北美 [hg]=韩国)

declare -A finance_region_English_2_Chinese_map=([szjr]=深圳金融 [shjr]=上海金融 [bjjr]=北京金融)

declare -A region_es_map=([coop]=9.246.23.99:9200 [idc]=10.50.96.130:9200 [finance]=100.83.160.133:9200 [idc_test]=9.92.177.202:9200)

region=''

# 默认查看的是合作区网络
network='coop'
# 默认是合作区的es
ES=${region_es_map[$network]}

cluster=''
perf_interval=10
lines=10000
no_pri=false
no_conn=false
Override=false
# 拷贝文件到远程这个目录
temp_dir="/tmp"
exporter_addr="9.254.211.188"
MONGODS_INFO_HEADER="db_size_kB,rs_name,wt_files_num,RSS_kB,mem_used,root_dir,ip"
var_log_messages_keywords="out of memory|killed process|page allocation failure| error|cancel|blocked for more than|Linux version |Unable to allocate memory"

alias ssh_cmd="sshpass -p \"$PASSWD\" ssh -o \"StrictHostKeyChecking no\" -p 36000 -o ConnectTimeout=3"
alias shell_cmd="mongo mongodb://$cluster:$cluster-$USER_SALT-$MD5_SALT@${h}/admin"

# 本地执行的输出log文件放到这里
mkdir -p tmp

Global_worker_array=()

# 它在ssh 上不有效,得用 printf , ssh user_00@xxx "printf '[\33[01;32m  green  \33[01;37m]\n'"
echo_with_color() {
    echo -e "\n\033[34m$1\033[0m"
}

echo_red() {
    echo -e "\033[31m$1\033[0m"
}

# italics 可能在tmux下不生效
echo_cyan() {
    echo -e "\e[3;36m$1\e[0m"
}
echo_yellow() {
    echo -e "\033[33m$1\033[0m"
}
echo_gray() {
    echo -e "\033[90m$1\033[0m"
}
echo_magenta() {
    echo -e "\e[35m$1\e[0m"
}

error() {
    echo -e "\n\033[31m$1\033[0m"
    exit 1
}

get_heap_profile() {
    previous_rss_kB=0
    while true; do
        worker_rss_kB=`cat /proc/$(pidof worker)/status | grep -i VmRSS | awk '{print $2}'`
        diff=`echo "$worker_rss_kB-$previous_rss_kB" | bc`
        # 每增加 10G 就profile 一次
        if [ $diff -gt 10475760 ]; then
            curl -s http://9.59.11.158:6060/debug/pprof/heap -o profile/heap-$(date '+%m%d_%H:%M:%S')-$worker_rss_kB.prof
            curl -s http://9.59.11.158:6060/debug/pprof/profile -o profile/cpu-$(date '+%m%d_%H:%M:%S')-$worker_rss_kB.prof
            previous_rss_kB=$worker_rss_kB
        fi
        sleep 10
    done
}

analyze_slowlog() {
    preminute=""
    max=99
    min=100000
    count=0
    first=true
    while read l; do
        date1=`echo "$l" | awk -F . '{print $1}'`
        minute=${date1%:*}
        t=`echo "$l" | awk '{print $NF}' | awk -F "ms" '{print $1}'`
        if [ "$minute" != "$preminute" -a $first = false ]; then
            # print previous statistics
            printf "%s   %-8s%-10s%-10s\n" "$preminute" $count $min $max
            count=1
            preminute=$minute
            max=$t
            min=$t
        else
            first=false
            preminute=$minute
            count=$((count+1))
            if [ $t -ge $max ]; then
                max=$t
            fi
            if [ $t -le $min ]; then
                min=$t
            fi
        fi
    done < $1
    printf "%s   %-8s%-10s%-10s\n" "$preminute" $count $min $max
}

# 一次性获取某个容器进程的所有信息,避免多次ssh
get_pid_info() {
    con_id=$1
    _ip=${con_id%%:*}
    _type=$2
    container_num=${con_id##*:}

    # 暂只关心mongod 机器的部署情况
    if [ "$_type" = "mongod" ]; then
        # 默认按dbsize 排序
        # 管道后的变量都得转义
        _containers_info_on_this_host=`ps aux | grep 'mongod.conf' | grep -o '/data.*[0-9]' | egrep -v 'bash|grep' | awk '{print \$3}' | while read l; do con_num="\${l##*/}"; _mem_quota=\$(cat /sys/fs/cgroup/memory/container-$con_num/memory.limit_in_bytes); dbsize=\`du -s \$l/db | awk '{print \$1}'\`; replName=\`grep -o cmgo-.* \$l/conf/mongod.conf\`; wtFileNum=\`find \$l/db -name '*.wt' | wc -l\`;  rss=\`ps aux | grep "\$l" | grep -v grep | awk '{print \$6}'\`; _mem_ratio=\`awk "BEGIN{print \$rss * 1024/\$_mem_quota*100}"\`; echo "\$dbsize,\$replName,\$wtFileNum,\$rss,\${_mem_ratio}%,\$l,\$_ip" ; done | sort -rn`
    fi

    # 不要仅仅用 container_num 筛选,排除如 bash -l 本身造成的进程
    ps_line=`ps aux | grep "$container_num" | egrep 'bin/mongod|bin/mongos|bin/proxy' | grep -v grep`
    local_pid=$(echo "$ps_line" | awk '{print $2}')
    lines=$(echo "$local_pid" | wc -l)

    worker_pid=`ps aux | grep '[/]worker' | awk '{print $2}'`
    if [ x"$worker_pid" = x ]; then
        worker_pid=none
    else
        _worker_mem="`cat /proc/$worker_pid/status | grep RSS | awk '{print $2}'` KB"
    fi

    if [ "$lines" -gt 1 ]; then
        # 正常情况下不应该出现多行, 但可能还是会出现 . https://git.woa.com/sirchen/cmg-tool/issues/4
        echo "$local_pid"
        return
    fi

    pid=$local_pid
    
    res=""
    if [ x"$pid" != x ]; then
        exe=`readlink -e /proc/$pid/exe`
        # https://git.code.oa.com/sirchen/cmg-tool/issues/5 这里存在问题,需要fix
        root_directory=$(dirname $(dirname $exe))
        top_dir=`echo $root_directory | awk -F / '{print FS $2}'`
        cwd=`readlink -e /proc/$pid/cwd`
        if [ -e "$root_directory/db" ]; then
            db_size=`du -sh $root_directory/db | awk '{print $1}'`
        else
            # 不要让为空,否则调用方取值时错位
            db_size="none"
        fi
        df_partition_line=`df -h | grep "$top_dir" || echo none`
        if [ x"$df_partition_line" != x"none" ]; then
            total_disk=`echo "$df_partition_line" | awk '{print $2}'`
            total_disk_util=`echo "$df_partition_line" | awk '{print $5}'`
        else
            total_disk="none"
            total_disk_util="none"
        fi

        memory_quota=0
        cpu_core_num=0
        # 默认只有mongod 才有quota
        if [ "$_type" = "mongod" -o "$_type" = "mongos" ]; then
            memory_quota=`echo "$(cat /sys/fs/cgroup/memory/container-$container_num/memory.limit_in_bytes)/1024/1024" | bc`
            cpu_core_num=`echo "$(cat /sys/fs/cgroup/cpu/container-$container_num/cpu.cfs_quota_us) / $(cat /sys/fs/cgroup/cpu/container-$container_num/cpu.cfs_period_us) " | bc`
        fi

        containers_count=`ps aux | grep '[c]ontainers/.*.conf' | wc -l`
        _total_mem_readable=`free -h | grep Mem | awk '{print $2}'`
        _total_mem=`free | grep Mem | awk '{print $2}'`
        _used_mem=`free | grep Mem | awk '{print $3}'`
        _used_mem_ratio="\033[31m`awk -v a=$_total_mem -v b=$_used_mem 'BEGIN{printf("%.f\n", b/a*100)}'`%\033[0m"
        cpu_and_mem="`lscpu | grep '^CPU(s):' | awk '{print $2}'`c$_total_mem_readable, $_used_mem_ratio"

        # 获取boot time
        # who -b | sed -n 's/[[:space:]]*system boot//p'
        # 可查看登陆用户和重启时间
        # last | grep reboot
        exporter_info=`/usr/sbin/pidof mongodb_exporter | tr ' ' '\n'  | while read l; do echo -n "pid=$l "; cat /proc/$l/status | grep RSS; done | tr '\n' ', '`

        # 可以作为返回值, 以 & 分隔,假设变量中不包含 &
        res="$pid&$root_directory&$cwd&$db_size&$total_disk&$total_disk_util&$memory_quota&$cpu_core_num&$containers_count&$cpu_and_mem&$exporter_info&$_worker_mem&$_containers_info_on_this_host"
    else
        echo ""
    fi
    echo "$res&$worker_pid"
}

# 暂只是供 stop 命令调用
__get_pid() {
    con_id=$1
    container_num=${con_id##*:}

    # 不要仅仅用 container_num 筛选,排除如 bash -l 本身造成的进程
    ps_line=`ps aux | grep "$container_num" | egrep 'bin/mongod|bin/mongos|bin/proxy' | grep -v grep`
    echo "$ps_line" | awk '{print $2}' | tr '\n' ' '
}

__get_exporter_pid() {
    cluster=$1
    # 假设mongodb_exporter 的启动参数中有 cluster 名称
    ps_line=`ps aux | grep "mongodb_exporter.*$cluster" | grep -v grep`
    echo "$ps_line" | awk '{print $2}' | tr '\n' ' '
}

__get_node_exporter_pid() {
    cluster=$1
    ps_line=`ps aux | grep 'node_exporter' | grep -v 'grep'`
    echo "$ps_line" | awk '{print $2}' | tr '\n' ' '
}

help() {
    echo_with_color "------------------------  $0 md5: $(md5sum $0 | cut -d' ' -f1)"
    # print all kyes
    echo_with_color "master map across all regions in coop network:"
    i=0
    for k in ${!coop_region_etcd_map[@]}; do
        i=$((i+1))
        mod=$((i%3))
        #echo "mod=$mod"
        if [ $mod = 0 ]; then
            printf "%-6s\t%-15s\n" $k ${coop_region_etcd_map[$k]}
        else
            printf "%-6s\t%-15s  |  " $k ${coop_region_etcd_map[$k]}
        fi
    done

    printf "\n\n"
    echo_with_color "master map across all regions in IDC network:"
    for k in ${!idc_region_etcd_map[@]}; do
        i=$((i+1))
        mod=$((i%3))
        #echo "mod=$mod"
        if [ $mod = 0 ]; then
            printf "%-10s\t%-15s\n" $k ${idc_region_etcd_map[$k]}
        else
            printf "%-10s\t%-15s  |  " $k ${idc_region_etcd_map[$k]}
        fi
    done
    printf "\n\n"

    echo_with_color "Command Examples:"
    printf "\t $0 <cmd> [-c cluster_name] [-n network] [-r region] [-i container_num]\n"
    printf "  Available cmd:\n"
    printf "\tstats\t\tinspect sync delay, rs.status, connection and so on\n"
    printf "\tscp\t\tcopy file to remote host\n"
    printf "\tssh\t\tssh login to remote host with user_00\n"
    printf "\tshell\t\tmongo shell connecting to remote host with root account\n"
    printf "\tmongostat\tcall mongostat to remote host\n"
    printf "\terror\t\tinspect ERROR log of one container, just for convenience\n"
    printf "\tmaster\t\tssh to master/etcd host\n"
    printf "\tetcdctl\t\tconnect to etcd host\n"

    printf "  Available options:\n"
    printf "\t-c\t\tcluster name(must given)\n"
    printf "\t-n\t\tnetwork section, available: coop, finance, idc, default coop\n"
    printf "\t-r\t\tregion PinYin acronym(used when region from ES is wrong)\n"
    printf "\t--ii\t\tinterval such as for commands such as mongostat\n"
    printf "\t-f\t\tfile when using scp, error\n"
    printf "\t-i\t\tordering number of expected container(get from commandline output, when -c is given)\n"

    echo_with_color "  Examples:"
    printf "   %-10s\t\t\t\t\t%-60s\n" "cmg" "show help message"
    printf "   %-10s\t\t%-60s\n" "cmg invalid-cmd -c cmgo-fcywo9uv" "invalid command, but still get node info of cluster: cmgo-fcywo9uv"
    printf "   %-10s\t\t%-60s\n" "cmg ssh -c cmgo-fcywo9uv -i 1" "ssh to remote host whose number matches the one in screen"
    printf "   %-10s\t\t%s\n" "cmg ssh -c cmgo-fcywo9uv -i 1" "ssh to remote host whose number matches the one in screen, and cd into the container directory"
    printf "   %-10s\t\t%-60s\n" "cmg shell -c cmgo-fcywo9uv -i 1" "mongo shell connecting to remote host whose number matches the one in screen"
    printf "   %-10s\t\t\t\t%-60s\n" "cmg master -r mx" "ssh to master according to given region"
    printf "   %-10s\t\t\t%-60s\n" "cmg master -c cmgo-hl5kqy3z" "ssh to master according to given cluster"
    printf "   %-10s\t\t\t\t\t%-60s\n" "cmg etcdctl -r mx --etcd_cmd ls" "connect to etcd according to given region"
    printf "   %-10s\t%-60s\n" "cmg etcdctl -c cmgo-hl5kqy3z --etcd_cmd \"get /cluster/cmgo-hl5kqy3z\"" "connect to etcd according to given cluster"
    printf "   %-10s\t\t\t%-60s\n" "cmg scp -c phoenix-backup -r sh_test -f st.log -i 0" "scp local file to remote host"
    printf "   %-10s\t\t\t%-60s\n" "cmg mstat -c phoenix-backup -r sh_test -i 1 -i 2" "mongostat, region is given when necessary"
    printf "   %-10s\t\t%-60s\n" "cmg l -c cmgo-hl5kqy3z -i 7" "get log"
    printf "   %-10s\t%-60s\n" "cmg perf -c cmgo-hl5kqy3z -i 7" "do perf and generate flamegraph"
    printf "   %-10s\t\t%-60s\n" "cmg ping -c cmgo-hl5kqy3z" "get ping latency between cluster's nodes  "
    printf "   %-10s\t\t\t%-60s\n" "cmg scan -r bj" "get all clusters in some region"
    echo_with_color "More New Usage refer to: https://iwiki.woa.com/pages/viewpage.action?pageId=368432570"
}

get_primary() {
    h=$1
    # 用isMaster 可能出现 vip 的情况(cmongo 做了侵入性改造)
    #cmd_str="mongo mongodb://$cluster:$cluster-$USER_SALT-$MD5_SALT@${h}/admin --eval \"JSON.stringify(db.isMaster())\"  | tail -n 1 | jq -r '.primary'"
    cmd_str="mongo --quiet mongodb://$cluster:$cluster-$USER_SALT-$MD5_SALT@${h}/admin --eval \"JSON.stringify(rs.status())\"  | jq -r '.' | grep -B 5 PRIMARY | grep name | grep -o '[0-9].*[0-9]'"
    #echo "=$cmd_str"
    Primary=$(eval "$cmd_str" || echo err)

    # 这样写为什么不行呢?
# /data/user_00/mongo_repo/mongo/mongo_wt3.6-v14/bin/mongo mongodb://phoenix-backup:phoenix-backup-bfd5e8-c4ff7d8f54eb311a0190ae73f7d1541e@10.108.117.196:7005/admin --eval "show dbs"
#MongoDB shell version: 3.2.8
#connecting to: mongodb://phoenix-backup:phoenix-backup-bfd5e8-c4ff7d8f54eb311a0190ae73f7d1541e@10.108.117.196:7005/admin
#2020-10-03T11:40:39.978+0800 E QUERY    [thread1] SyntaxError: missing ; before statement @(shell eval):1:5

}

show_sync_delay() {
    h=$1
    user=$2
    # 如果只有主节点,貌似第二条语句没机会执行,所以加了个 || 
    mongo mongodb://$user:$user-$USER_SALT-$MD5_SALT@${h}/admin --eval "db.printSlaveReplicationInfo()" | egrep -i -v 'mongo|session|shell' || echo_red "=== may has no slaves"
    mongo mongodb://$user:$user-$USER_SALT-$MD5_SALT@${h}/admin --eval "db.printReplicationInfo()" | egrep -i -v 'mongo|session|shell'
}

rs_status() {
    h=$1
    mongo mongodb://$cluster:$cluster-$USER_SALT-$MD5_SALT@${h}/admin --eval "rs.status()"
}
rs_conf() {
    h=$1
    mongo mongodb://$cluster:$cluster-$USER_SALT-$MD5_SALT@${h}/admin --eval "rs.conf()"
}

__get_parameter() {
    kernel_para=`/usr/sbin/sysctl kernel.pid_max kernel.threads-max`
    kernel_para_str=$kernel_para
    #user_proccess_num=`$(ulimit -u)`
    echo -e "\t\t$kernel_para_str"
}

print_remote_parameter() {
    con_id=$1
    HOST=${con_id%:*}
    IP=${HOST%:*}
    PORT=${HOST#*:}
    ssh_cmd user_00@$IP "$(declare -f __get_parameter); __get_parameter $con_id"
}

get_conns_and_pid_info() {
    con_id=$1
    _type=$2
    HOST=${con_id%:*}
    IP=${HOST%:*}
    PORT=${HOST#*:}
    # https://unix.stackexchange.com/questions/124498/about-escape-with-ssh-pipe
    # https://stackoverflow.com/questions/7114990/pseudo-terminal-will-not-be-allocated-because-stdin-is-not-a-terminal
    # https://superuser.com/questions/1005055/disable-permanently-added-host-warning-on-local-lan
    # 碰到一情况,ssh 连接很慢,但最终能连成
    # 暂不需要,用mongo current connection
    #estab_conns=$(sshpass -p $PASSWD ssh -o "StrictHostKeyChecking no" -o LogLevel=error -T -p 36000 user_00@${IP} << eof
    #ss -tan | grep ESTAB | grep $HOST | wc -l
#eof
#)

    _date=""
    if [ x$date1 != x ]; then _date="-$date1"; fi
    # grep 没找到会返回 1
    ssh_cmd -o LogLevel=error -T user_00@${IP} << eof | tail
    egrep -i "$var_log_messages_keywords" /var/log/messages${_date} | grep -v 'nscd:'
eof

    # 用一次ssh 把所有感兴趣的信息全部拿回,中间用特殊字符加以分隔
    Container_info=$(ssh_cmd user_00@$IP "$(declare -f get_pid_info); get_pid_info $con_id $_type" || echo "SSH_FAILED")
    #echo "debug: $con_id container info: $Container_info"
    Container_pid=`echo "$Container_info" | awk -F '&' '{print $1}'`
    Container_root_dir=`echo "$Container_info" | awk -F '&' '{print $2}'`
    Container_db_size=`echo "$Container_info" | awk -F '&' '{print $4}'`
    Container_disk_size=`echo "$Container_info" | awk -F '&' '{print $5}'`
    Container_disk_util=`echo "$Container_info" | awk -F '&' '{print $6}'`
    # 实际的memory quota. 如果非标调整了,etcd里的值就和实际值不一样
    real_memory_quota=`echo "$Container_info" | awk -F '&' '{print $7}'`
    real_cpu_core_num=`echo "$Container_info" | awk -F '&' '{print $8}'`
    Containers_count=`echo "$Container_info" | awk -F '&' '{print $9}'`
    Cpu_and_mem=`echo "$Container_info" | awk -F '&' '{print $10}'`
    Mongodb_exporter_info=''
    #Mongodb_exporter_info=`echo "$Container_info" | awk -F '&' '{print $11}'`
    Worker_rss=`echo "$Container_info" | awk -F '&' '{print $12}'`
    Containers_info_on_this_host=`echo "$Container_info" | tr '\n' '*' | awk -F '&' '{print $13}' | tr '*' '\n'`
    Worker_pid=`echo "$Container_info" | awk -F '&' '{print $14}'`

    # 必须要写成 || echo 的形式,否则一旦 grep 失败,就会返回 $?=1
    # 有时可能不准确,见 https://git.woa.com/sirchen/cmg-tool/issues/2
    Disk_partition=`echo $Container_root_dir | egrep -o '/data[[0-9]*' || echo err`
    #if [ $Disk_partition = "err" ]; then
    #    echo_red "\tcannot find disk partition on /data or /data{n} for $c, may be DOWN"
    #fi
}

# Refer to master.pb.go ClusterStatus
get_cluster_status() {
    case $1 in
    0)
        Cluster_status="INIT" ;;
    1)
        Cluster_status="RUNNING" ;;
    2)
        Cluster_status="RESIZE" ;;
    3)
        Cluster_status="BACKUP" ;;
    4)
        Cluster_status="RESTORE" ;;
    5)
        Cluster_status="ISOLATION" ;;
    6)
        Cluster_status="DELETING" ;;
    7)
        Cluster_status="READ_ONLY" ;;
    8)
        Cluster_status="AUTO_TASK" ;;
    9)
        Cluster_status="RESTORE_HOT" ;;
    *)
        echo_red "unknown status: $1"
    esac
}

get_cluster_info() {
    Clusterinfo=$(etcdctl --endpoint=http://${ETCD}:2379 get /cluster/$cluster)
    echo "$Clusterinfo" | jq > tmp/$cluster-info.json
    Cluster_status_num=$(echo "$Clusterinfo" | jq '.base_info.status')
    get_cluster_status $Cluster_status_num
}

get_worker_version() {
    c=$1
    ip=${c%%:*}

    Worker_version=$(ssh_cmd -o LogLevel=error -T user_00@${ip} << eof
        full_ver=\$(./worker/bin/worker --version | cut -d ':' -f2)
        commit=\$(echo "\$full_ver" | cut -d- -f1)
        ver_num=\$(echo "\$full_ver" | cut -d- -f2)
        worker_md5=\$(md5sum ./worker/bin/worker | awk '{print \$1}')
        echo "\${commit:0:8}-\${ver_num}/\${worker_md5}"
eof
)
}

get_mongo_version() {
    c=$1
    ip=${c%%:*}
    container_num=${c##*:}

    Mongo_version=$(ssh_cmd -o LogLevel=error -T user_00@${ip} << eof
    ps_line=\`ps aux | grep "$container_num" | grep -v grep | grep -v 'bash -c'\`
    #echo "ddd: \$ps_line"

    pid=\`echo "\$ps_line" | awk '{print \$2}'\`
    if [ x\$pid = x ]; then
        mongo_git_version="error"
        echo -e "\033[31mcannot find pid, $c may be DOWN !\033[0m"
    else
        exe=\`readlink -e /proc/\$pid/exe\`
        #echo "pid=\$pid,ddd=\$exe"
        mongo_md5=\`md5sum \$exe | awk '{print \$1}'\`

        root_dir=\`dirname \$(dirname \$exe)\`
        #echo "fff=\$root_dir"

        # mongod 执行需要新的libstdc++库,执行时PRELOAD
        ld_preload=\$(find \$root_dir/bin -name 'libstdc++*' | head -n 1)
        mongo_version_desc=\$(LD_PRELOAD=\$ld_preload \$exe --version)
        # 假设git version 那行以空格做分隔
        mongo_git_version=\$(echo "\$mongo_version_desc" | grep 'git version' | awk '{print \$3}')
        _mongo_git=\$(echo "\$mongo_git_version" | awk -F '-' '{print \$1}')
        _mongo_ver_number=\$(echo "\$mongo_git_version" | awk -F '-' '{print \$2}')
        mongo_version=\$(echo "\$mongo_version_desc" | egrep 'db version |mongos version ' | awk '{print \$3}')
    fi

    # 缩短commit长度
    echo "\${mongo_version}_\${_mongo_git:0:8}-\${_mongo_ver_number}/\${mongo_md5}"
eof
)

}

__get_all_Containers_array() {
    Has_mongos=`echo "$Clusterinfo" | jq 'has("mongos_list")'`
    g_has_mongos=true
    mongos_count=0
    if [ $Has_mongos = "true" ]; then
        # 新版本名称上叫 mongos_list
        Mongos_container=`echo "$Clusterinfo" | jq -r '.mongos_list[].container_name'`
        mongos_count=`echo "$Clusterinfo" | jq -r '.mongos_list | length'`
        mongos_quotas=(`echo $Clusterinfo | jq --compact-output -r '.mongos_list[].quota'`)
    else
        # 老版本名称上叫proxy_list
        has_proxy=`echo "$Clusterinfo" | jq 'has("proxy_list")'`
        if [ $has_proxy = "true" ]; then
            Mongos_container=`echo "$Clusterinfo" | jq -r '.proxy_list[].container_name'`
            mongos_count=`echo "$Clusterinfo" | jq -r '.proxy_list | length'`
            mongos_quotas=(`echo $Clusterinfo | jq --compact-output -r '.proxy_list[].quota'`)
        else
            # 如果没有mongos和proxy,则此标志为空
            g_has_mongos=false
        fi
    fi
    mongos_container_array=($Mongos_container)
    
    mongos1_con=$(echo "$Mongos_container" | head -n 1)
    Mongos1_host=${mongos1_con%:*}

    # cmongo 3.2 没有config_list
    has_config=`echo "$Clusterinfo" | jq 'has("config_list")'`
    if [ $has_config = "true" ]; then 
        Config_container=`echo "$Clusterinfo" | jq -r '.config_list[].container_name'`
    fi
    mongod_containers=`echo "$Clusterinfo" | jq -r '.rs_list[].mongod_list[].container_name'`
    Containers_array=(`echo $Config_container $Mongos_container $mongod_containers`)

    Rs_num=`echo "$Clusterinfo" | jq -r '.rs_list | length'`
    Instances_num=`echo "$Clusterinfo" | jq -r '.rs_list' | grep 'container_name' | wc -l`

    rs_names=`echo "$Clusterinfo" | jq -r '.rs_list[].rsName'`
    #mongod_container=`echo "$Clusterinfo" | jq -r '.rs_list[].mongod_list[].container_name'`
    Rs_name_ar=($(echo "$rs_names" | tr '\n' ' '))
}

get_cluster_host() {
    __get_all_Containers_array

    # 当指定 sh -i 或者 ssh -i 时, 不打印container list
    if [ x$id != x ]; then return; fi

    k=-1
    containers_str=''
    Ip_str=''
    found_primary=false

    if [ $has_config = "true" ]; then
        echo_with_color "config server containers:"
        for c in $Config_container; do
            k=$((k+1))
            h=${c%:*}
            container_num=${h##*:}

            if [ $found_primary = "false" ]; then
                get_primary $h
                if [ x$Primary = x"$h" ]; then
                    found_primary=true
                elif [ x$Primary = "err" ]; then
                    echo_red "=== $k -> $c may be DOWN"
                    continue
                fi
            fi

            if [ x$found_primary = x"true" -a x$Primary = x$h ]; then
                if [ $cmd = "stats" -a x$no_conn = x"false" ]; then
                    get_conns_and_pid_info $c configsvr
                    con_str=$(mongo mongodb://$cluster:$cluster-$USER_SALT-$MD5_SALT@${h}/admin --eval "JSON.stringify(db.serverStatus().connections)" || echo err)
                    if [[ "$con_str" =~ "err" ]]; then
                        # 挂的节点不影响正常节点的输出
                        echo_red "=== $k -> $c may be DOWN"
                        continue
                    fi
                    cur=$(echo "$con_str" | tail -n 1 | jq '.current')
                    # 打印出当前container 所处的第一级目录位置

                    # print cores, memory,disk info
                    if [ "$Container_info" = "SSH_FAILED" ]; then
                        echo -e "  $k -> $(echo_cyan $c) ($(echo_red P), conns:$cur)"
                    else
                        echo -e "  $k -> $(echo_cyan $c) ($(echo_red P), $Container_pid, $Disk_partition, $Container_db_size, conns:$cur) ($Cpu_and_mem, containers:$Containers_count)"
                    fi
                    if [ x$Worker_pid = x"none" ]; then
                        echo_red "worker on $c may be DOWN"
                    fi

                elif [ $cmd = "noauth" ]; then
                    res=`mongo mongodb://$cluster:$cluster-$USER_SALT-$MD5_SALT@${h}/admin --eval "db.runCommand({noAuthInfo: 1})"`
                    if [[ "$res" =~ "no such command" ]]; then
                        :
                    else
                        echo "$cluster config $h has noAuthInfo command"
                    fi
                    exit

                elif [ $cmd = "version" ]; then
                    get_mongo_version $c
                    get_worker_version $c
                    echo -e "  $k -> $(echo_cyan $c) ($(echo_red P), $Mongo_version, worker: $Worker_version)"
                    
                else
                    echo -e "  $k -> $(echo_cyan $c) ($(echo_red P))"
                fi
            else
                if [ $cmd = "stats" -a x$no_conn = x"false" ]; then
                    get_conns_and_pid_info $c configsvr
                    con_str=$(mongo mongodb://$cluster:$cluster-$USER_SALT-$MD5_SALT@${h}/admin --eval "JSON.stringify(db.serverStatus().connections)" || echo err)
                    if [[ "$con_str" =~ "err" ]]; then
                        # 挂的节点不影响正常节点的输出
                        echo_red "=== $k -> $c may be DOWN"
                        continue
                    fi
                    cur=$(echo "$con_str" | tail -n 1 | jq '.current')

                    # print cores, memory,disk info
                    if [ "$Container_info" = "SSH_FAILED" ]; then
                        echo -e "  $k -> $(echo_cyan $c) ($Container_pid, conns:$cur)"
                    else
                        echo -e "  $k -> $(echo_cyan $c) ($Container_pid, $Disk_partition, $Container_db_size, conns:$cur) ($Cpu_and_mem, containers:$Containers_count)"
                    fi
                    if [ x$Worker_pid = x"none" ]; then
                        echo_red "worker on $c may be DOWN"
                    fi

                elif [ $cmd = "version" ]; then
                    get_mongo_version $c
                    get_worker_version $c
                    echo -e "  $k -> $(echo_cyan $c) ($Mongo_version, worker: $Worker_version)"
                    
                else
                    echo -e "  $k -> $(echo_cyan $c)"
                fi
            fi

            containers_str="$containers_str $c"
            Ip_str="$Ip_str ${c%%:*}"
        done
    fi

    if [ $g_has_mongos = true ]; then
        File_mongos_para=tmp/$cluster-mongos-parameter.json
        > $File_mongos_para

        # 保持在不同情况下换行
        if [ $Rs_num -gt 1 -a $cmd = "stats" ]; then
            echo -n -e "\n\033[34mmongos containers:\033[0m"
        else
            echo -e "\n\033[34mmongos containers:\033[0m"
        fi
        # 要保证断了一个也能连得上
        for s_idx in $(seq 0 $((mongos_count-1))); do
            c=${mongos_container_array[$s_idx]}
            _container_quota=${mongos_quotas[$s_idx]}
            cpu_core_num=`echo "$_container_quota" | jq -r '.cpu_core_num'`
            mem_quota=`echo "$_container_quota" | jq -r '.memory'`

            container_quota="$_container_quota"

            k=$((k+1))
            h=${c%:*}
            if [ $cmd = "stats" -a x"$no_conn" = x"false" ]; then
                if [ $s_idx = 0 -a $Rs_num -gt 1 ]; then
                    # 怎样用一次连接获取到所有信息 ?
                    _balancerState=$(mongo --quiet mongodb://$cluster:$cluster-$USER_SALT-$MD5_SALT@${h}/admin --eval "sh.getBalancerState()" || echo err)
                    _balancerRunning=$(mongo --quiet mongodb://$cluster:$cluster-$USER_SALT-$MD5_SALT@${h}/admin --eval "sh.isBalancerRunning()" || echo err)
                    _balancerWindow=$(mongo --quiet mongodb://$cluster:$cluster-$USER_SALT-$MD5_SALT@${h}/admin --eval "sh.getBalancerWindow()" || echo err)
                    echo_red "(state: $_balancerState, running: $_balancerRunning, window: $_balancerWindow)"

                    File_mongos_serverStatus=tmp/$cluster-$h-serverStatus.json
                    serverStatus_str=$(mongo --quiet mongodb://$cluster:$cluster-$USER_SALT-$MD5_SALT@${h}/admin --eval "JSON.stringify(db.serverStatus(),null,2)" || echo err)
                    echo "$serverStatus_str" > $File_mongos_serverStatus
                elif [ $s_idx = 0 ]; then
                    # 让mongos containers: 打印换行 
                    a=1
                fi

                get_conns_and_pid_info $c mongos
                con_str=$(mongo mongodb://$cluster:$cluster-$USER_SALT-$MD5_SALT@${h}/admin --eval "JSON.stringify(db.serverStatus().connections)" || echo err)
                if [[ "$con_str" =~ "err" ]]; then
                    # 挂的节点不影响正常节点的输出
                    echo_red "=== $k -> $c may be DOWN"
                    continue
                fi

                cur=$(echo "$con_str" | tail -n 1 | jq '.current')

                _mongo_para=$(mongo --quiet mongodb://$cluster:$cluster-$USER_SALT-$MD5_SALT@${h}/admin --eval 'JSON.stringify(db.adminCommand( { getParameter: "*"}))' | jq -r . || echo err)
                echo "--------- $cluster mongos: $c" >> $File_mongos_para
                echo "$_mongo_para" >> $File_mongos_para

                serverStatus_str=$(mongo mongodb://$cluster:$cluster-$USER_SALT-$MD5_SALT@${h}/admin --eval "JSON.stringify(db.serverStatus(),null,2)" || echo err)
                f="tmp/$cluster-$h-$(date '+%Y%m%d%H').serverStatus.json"
                echo "$serverStatus_str" > $f
                marker_line=`awk '$0=="{" {print NR}' $f`
                tmp_remove_line=$((marker_line-1))
                sed -i "1,${tmp_remove_line}d" $f
                pid=$(cat "$f" | jq -r '.pid."$numberLong"')

                if [ "$Container_info" = "SSH_FAILED" ]; then
                    echo -e "  $k -> $(echo_cyan $c) ($pid, conns:$cur)"
                else
                    if [ "$cpu_core_num" != "$real_cpu_core_num" -o "$mem_quota" != "$real_memory_quota" ]; then
                        container_quota="$container_quota -> \033[31m{\"memory\":$real_memory_quota, \"cpu_core_num\":$real_cpu_core_num}\033[0m"
                    fi
                    echo -e "  $k -> $(echo_cyan $c) ($pid, $Disk_partition, conns:$cur) ($Cpu_and_mem, containers:$Containers_count)\t$container_quota"
                fi

                if [ x"$Mongodb_exporter_info" != x ]; then
                    echo_magenta "mongodb_exporter installed in $c : $Mongodb_exporter_info"
                fi

                if [ x$Worker_pid = x"none" ]; then
                    echo_red "worker on $c may be DOWN"
                fi

            elif [ $cmd = "version" ]; then
                get_mongo_version $c
                get_worker_version $c
                echo -e "  $k -> $(echo_cyan $c) ($Mongo_version, worker: $Worker_version)"
                    
            else
                echo -e "  $k -> $(echo_cyan $c)\t$container_quota"
            fi
            containers_str="$containers_str $c"
            Ip_str="$Ip_str ${c%%:*}"
        done  # end of mongos loop
    fi  

    primary_str=''
    # 所有RS mongod 参数全部放到这儿
    File_mongod_para=tmp/$cluster-mongod-parameter.json
    > $File_mongod_para
    # 做成csv 文件便于Excel中打开,同时也不妨碍 sort 排序
    File_mongods_on_mongodhost=tmp/all-mongods-on-$cluster-all-mongodnodes.csv
    echo "$MONGODS_INFO_HEADER" > $File_mongods_on_mongodhost
    File_mongod_serverStatus=''

    #rs_mongod_quota_list=`echo "$Clusterinfo" | jq -r ".rs_list[].mongod_list[].quota"`
    echo_with_color "mongod containers ($(echo_red "RS: $Rs_num, instances: $Instances_num")):"
    for rs_idx in $(seq 0 $((Rs_num-1))); do
        rs_cons=(`echo "$Clusterinfo" | jq -r ".rs_list[$rs_idx].mongod_list[].container_name"`)
        # 它是rs_conns 数组的个数
        rs_conns_len=`echo "$Clusterinfo" | jq -r ".rs_list[$rs_idx].mongod_list | length"`
        #rs_quota=(`echo "$Clusterinfo" | jq -r ".rs_list[$rs_idx].mongod_list[].quota"`)
        rs_name=`echo "$Clusterinfo" | jq -r ".rs_list[$rs_idx].rsName"`
        echo " $(echo_yellow $rs_name)"
        found_primary=false

        #for c in $rs_cons; do
        for rs_mongod_idx in $(seq 0 $((rs_conns_len-1))); do
            c=${rs_cons[$rs_mongod_idx]}
            k=$((k+1))
            h=${c%:*}

            _container_quota=`echo "$Clusterinfo" | jq --compact-output ".rs_list[$rs_idx].mongod_list[$rs_mongod_idx].quota"`
            cpu_core_num=`echo "$_container_quota" | jq -r '.cpu_core_num'`
            mem_quota=`echo "$_container_quota" | jq -r '.memory'`

            # 默认显示 etcd 里的quota 配置,如果实际配置与etcd中不一致,则同时也显示实际配置
            container_quota="$_container_quota"

            local_con_id=${c##*:}
            # when no_pri given, skip get_primary process, for it slows down
            if [ $found_primary = "false" -a $no_pri = "false" ]; then
                get_primary $h
                # Sometimes we get user-side vip from db.isMaster, which is not the real Primary Host
                # 虽然可以通过  db.runCommand({isMaster:1, internal: true}) 获得真正的ip,但是mongostat 用的是 db.runCommand({serverStatus:1}).repl 的结果,
                #   而serverStatus 命令并未对internal 做兼容
                if [[ x$Primary =~ "27017" ]]; then
                    echo_red "found $Primary from db.isMaster()"
                elif [ x$Primary = "err" ]; then
                    echo_red "=== $k -> $c may be DOWN"
                    continue
                fi
            fi
            if [ x$Primary = x"$h" ]; then
                found_primary=true
                primary_str="$primary_str $Primary"
            fi

            if [ x$found_primary = x"true" -a x$Primary = x$h ]; then
                if [ $cmd = "stats" -a x"$no_conn" = x"false" ]; then
                    get_conns_and_pid_info $c mongod
                    con_str=$(mongo mongodb://$cluster:$cluster-$USER_SALT-$MD5_SALT@${h}/admin --eval "JSON.stringify(db.serverStatus().connections)" || echo err)
                    if [[ "$con_str" =~ "err" ]]; then
                        # 挂的节点不影响正常节点的输出
                        echo_red "=== $k -> $c may be DOWN"
                        continue
                    fi

                    echo "$Containers_info_on_this_host" >> $File_mongods_on_mongodhost

                    serverStatus_str=$(mongo --quiet mongodb://$cluster:$cluster-$USER_SALT-$MD5_SALT@${h}/admin --eval "JSON.stringify(db.serverStatus(),null,2)" || echo err)
                    _mongo_para=$(mongo --quiet mongodb://$cluster:$cluster-$USER_SALT-$MD5_SALT@${h}/admin --eval 'JSON.stringify(db.adminCommand( { getParameter: "*"}))' | jq -r . || echo err)
                    echo "--------- $rs_name: $c" >> $File_mongod_para
                    echo "$_mongo_para" >> $File_mongod_para
                    wtConfig_str=`echo "$_mongo_para" | jq -r ".wiredTigerEngineRuntimeConfig"`

                    f="tmp/$rs_name-$h.serverStatus.json"
                    echo "$serverStatus_str" > $f

                    # 由于mongod可能很多,File_mongod_serverStatus 只加上主的 $f 文件,不加从了
                    File_mongod_serverStatus="$f"

                    pid=$(cat "$f" | jq -r '.pid."$numberLong"')

                    cur=$(echo "$con_str" | tail -n 1 | jq '.current')

                    if [ "$Container_info" = "SSH_FAILED" ]; then
                        echo -e "  $k -> $(echo_cyan $c) ($(echo_red P), $pid, conns:$cur)"
                    else
                        if [ "$cpu_core_num" != "$real_cpu_core_num" -o "$mem_quota" != "$real_memory_quota" ]; then
                            container_quota="$container_quota -> \033[31m{\"memory\":$real_memory_quota, \"cpu_core_num\":$real_cpu_core_num}\033[0m"
                        fi

                        echo -e "  $k -> $(echo_cyan $c) ($(echo_red P), $pid, $Disk_partition ($Container_disk_size, $(echo_red $Container_disk_util)), $Container_db_size, conns:$cur) ($Cpu_and_mem, containers:$Containers_count, worker_rss: $Worker_rss)\t$container_quota"
                        if [ x"$Mongodb_exporter_info" != x ]; then
                            echo_magenta "mongodb_exporter installed in $c : $Mongodb_exporter_info"
                        fi
                    fi
                    if [ x$Worker_pid = x"none" ]; then
                        echo_red "worker on $c may be DOWN"
                    fi

                    if [ "$wtConfig_str" != "" -a "$wtConfig_str" != "null" ]; then
                        echo -e "  \twiredTigerEngineRuntimeConfig: $wtConfig_str"
                    fi
                    #print_remote_parameter $c

                    # 检查一下 mongod进程的根目录是否在 /data1, /data2等
                    #echo "$Disk_partition" | egrep '^/data.*[[:digit:]]$' > /dev/null || echo_red "\tmongod $c readlink /proc/$pid/exe doesn't point to /data{n}/containers/$local_con_id/bin/mongod . Strange !"

                elif [ $cmd = "version" ]; then
                    get_mongo_version $c
                    get_worker_version $c
                    echo -e "  $k -> $(echo_cyan $c) ($(echo_red P), $Mongo_version, worker: $Worker_version)"
                
                else
                    echo -e "  $k -> $(echo_cyan $c) ($(echo_red P))\t$container_quota"
                fi
            else
                # these are secondaries
                if [ $cmd = "stats" -a x"$no_conn" = x"false" ]; then
                    get_conns_and_pid_info $c mongod
                    con_str=$(mongo mongodb://$cluster:$cluster-$USER_SALT-$MD5_SALT@${h}/admin --eval "JSON.stringify(db.serverStatus().connections)" || echo err)
                    if [[ "$con_str" =~ "err" ]]; then
                        # 挂的节点不影响正常节点的输出
                        echo_red "=== $k -> $c may be DOWN"
                        continue
                    fi

                    echo "$Containers_info_on_this_host" >> $File_mongods_on_mongodhost

                    serverStatus_str=$(mongo --quiet mongodb://$cluster:$cluster-$USER_SALT-$MD5_SALT@${h}/admin --eval "JSON.stringify(db.serverStatus(),null,2)" || echo err)

                    _mongo_para=$(mongo --quiet mongodb://$cluster:$cluster-$USER_SALT-$MD5_SALT@${h}/admin --eval 'JSON.stringify(db.adminCommand( { getParameter: "*"}))' | jq -r . || echo err)
                    echo "--------- $rs_name: $c" >> $File_mongod_para
                    echo "$_mongo_para" >> $File_mongod_para
                    wtConfig_str=`echo "$_mongo_para" | jq -r ".wiredTigerEngineRuntimeConfig"`

                    #wtConfig_str=$(mongo --quiet mongodb://$cluster:$cluster-$USER_SALT-$MD5_SALT@${h}/admin --eval 'JSON.stringify(db.adminCommand( { "getParameter": 1, "wiredTigerEngineRuntimeConfig":1}))' | jq -r ".wiredTigerEngineRuntimeConfig" || echo err)

                    f="tmp/$rs_name-$h.serverStatus.json"
                    echo "$serverStatus_str" > $f

                    pid=$(cat "$f" | jq -r '.pid."$numberLong"')

                    cur=$(echo "$con_str" | tail -n 1 | jq '.current')

                    # if cmd is stats, then print connections info
                    if [ "$Container_info" = "SSH_FAILED" ]; then
                        echo -e "  $k -> $(echo_cyan $c) ($pid, conns:$cur)"
                    else
                        if [ "$cpu_core_num" != "$real_cpu_core_num" -o "$mem_quota" != "$real_memory_quota" ]; then
                            container_quota="$container_quota -> \033[31m{\"memory\":$real_memory_quota, \"cpu_core_num\":$real_cpu_core_num}\033[0m"
                        fi

                        echo -e "  $k -> $(echo_cyan $c) ($pid, $Disk_partition ($Container_disk_size, $(echo_red $Container_disk_util)), $Container_db_size, conns:$cur) ($Cpu_and_mem, containers:$Containers_count, worker_rss: $Worker_rss)\t$container_quota"
                        if [ x"$Mongodb_exporter_info" != x ]; then
                            echo_magenta "mongodb_exporter installed: $Mongodb_exporter_info"
                        fi
                    fi
                    if [ x$Worker_pid = x"none" ]; then
                        echo_red "worker on $c may be DOWN"
                    fi

                    if [ "$wtConfig_str" != "" -a "$wtConfig_str" != "null" ]; then
                        echo -e "  \twiredTigerEngineRuntimeConfig: $wtConfig_str"
                    fi
                    #print_remote_parameter $c

                    # 检查一下 mongod进程的根目录是否在 /data1, /data2等
                    #echo "$Disk_partition" | egrep '^/data.*[[:digit:]]$' > /dev/null || echo_red "\tmongod $c readlink /proc/$pid/exe doesn't point to /data{n}/containers/$local_con_id/bin/mongod . Strange !"
                elif [ $cmd = "version" ]; then
                    get_mongo_version $c
                    get_worker_version $c
                    echo -e "  $k -> $(echo_cyan $c) ($Mongo_version, worker: $Worker_version)"
                
                else
                    echo -e "  $k -> $(echo_cyan $c)\t$container_quota"
                fi
            fi
            containers_str="$containers_str $c"
            Ip_str="$Ip_str ${c%%:*}"

            # 每个副本集打印一次mongostat状态
            if [ $rs_mongod_idx = $((rs_conns_len-1)) -a $cmd = "stats" ]; then
                mongostat --username $cluster --password $cluster-$USER_SALT-$MD5_SALT -h $h --authenticationDatabase admin --discover -n 2 | tail -n +3
            fi
        done
        if [ x$Primary = x -a $no_pri = "false" ]; then
            echo_red "$rs_name has no primary !!!"
        fi
    done
    #echo "containers_str=$containers_str"
    Containers_array=($containers_str)
    Ip_array=($Ip_str)


    len=${#Containers_array[@]}

    Primary_ar=($primary_str)

    return
}

print_when_in_READONLY_ISOLATION() {
    if [ x$Cluster_status = x"READ_ONLY" -o x$Cluster_status = x"ISOLATION" ]; then
        if [ $Cluster_status = "READ_ONLY" ]; then
            echo_with_color "This is RAED_ONLY cluster"
        else
            echo_with_color "This is ISOLATION cluster"
        fi
        echo "=== Job:"
        job_info=$(etcdctl --endpoint=http://$ETCD:2379 get /cluster_sync/$cluster/job || echo_red "no such job")
        if [[ ! $job_info =~ "no such job" ]]; then
            # 非常诡异的语法: https://github.com/stedolan/jq/issues/707
            oplog_start_ts_int64=$(echo "$job_info" | jq -r ".\"${cluster}_0\".oplog_start_ts")
            oplog_start_ts=$((oplog_start_ts_int64>>32))
            sync_src_node=$(echo "$job_info" | jq -r ".\"${cluster}_0\".sync_src_node")
            Sync_src_node_host=${sync_src_node%:*}
            echo "$job_info" | jq --compact-output
            echo_with_color "current oplog ts: $oplog_start_ts , $(date -d @$oplog_start_ts)"
        else
            echo_red "job deleted !!!"
        fi
        echo "=== Meta:"
        meta_info=$(etcdctl --endpoint=http://$ETCD:2379 get /cluster_sync/$cluster/meta || echo_red "no such meta")
        if [[ ! $meta_info =~ "no such meta" ]]; then
            sync_src_cluster=$(echo "$meta_info" | jq -r '.src_db')
            echo "$meta_info" | jq --compact-output
        else
            echo_red "meta deleted !!!"
        fi
    fi
}

# 进入到container目录,顺便打出最新的日志
tail_log_and_bash() {
    root_dir=\$1
    cd \$root_dir

    # mongos 的log 也叫proxy.log
    #if [ -e "log/proxy.log" ]; then
        # avoid useless log
        #egrep -v "\$filter" log/proxy.log \| tail -n 40

    #    tail -n 20 log/proxy.log
    #elif [ -e "log/mongod.log" ]; then
        #egrep -v "\$filter"  log/mongod.log \| tail -n 40

    #    tail -n 20 log/mongod.log
    #fi

    lsblk -l
    echo "---------- SIZE: \$\(du -sh db\)"
}

# 跑在ssh上,貌似必须对变量加转义
# https://www.unix.com/shell-programming-and-scripting/170834-cannot-create-variables-via-ssh-remote-machine.html
tail_latest_error() {
    root_dir=\$1
    file="\$2"
    lines=\$3
    cd \$root_dir
    if [ -e "log/proxy.log" ]; then
        log_file=proxy.log
    elif [ -e "log/mongod.log" ]; then
        log_file=mongod.log
    fi

    if [ \$file != x ]; then
        log_file=\${file#x}
    fi
    #echo "ddddddddd,root_dir=$root_dir,file=\$file,log_file=\$log_file"
    tail -n $lines log/\$log_file | egrep ' E '
    echo
    ls log
}

do_ssh() {
    ssh_host=$1
    #cmd="sshpass -p $PASSWD ssh -o \"StrictHostKeyChecking no\" -p 36000 user_00@${ssh_host}" 
    # 手工调bash命令必须得加上 -t
    #cmd_cd_container_dir="sshpass -p $PASSWD ssh -o \"StrictHostKeyChecking no\" -t -p 36000 user_00@${ssh_host} \"cd $root_dir && tail -n 20 log/mongod.log && bash -l\"" 
    cmd_cd_container_dir="ssh_cmd -t user_00@${ssh_host} \"$(declare -f tail_log_and_bash); tail_log_and_bash $root_dir\"" 

    echo_with_color "starting remote bash ..."
    eval "$cmd_cd_container_dir"
    # 在这里给出一些命令指导, 特别是查看日志时排除无关信息
    echo_with_color "You may need command to filter log:"
    echo -e "\tegrep -v 'start connection from|connection accepted from|Successfully authenticated as|end connection|FlowManager::dump_inlock cost|CMongoMongodStatsManager::dump_inlock|WARN:FlowManager::dump_inlock too many|received client metadata from|so no filter info|Starting new replica set monitor' log/mongod.log"
    # If ps on remote host, we'll see something like: `bash -c cd /data1/containers/68806989; bash -l`
    ssh_cmd -t user_00@${ssh_host} "cd $root_dir; bash -l" 
}

do_top() {
    echo_with_color "showing CPU of $con_id ..."
    ssh_cmd user_00@${IP} << eof
        f=/tmp/top.log
        >\$f
        while true; do
            echo -n "`date '+%H:%M:%S'` " >> \$f
            top -H -b -n1 | grep $pid >> \$f
            tail -n1 \$f
        done  
eof
    #topfile=${cluster}_${IP}_${pid}.log
    #sshpass -p $PASSWD scp -P 36000 user_00@${IP}:/tmp/top.log tmp/$topfile
}

do_install_rpm() {
    for ip in $ip_given; do
        echo_with_color "copying packages to $ip:/tmp ..."
        do_scp $ip "$File"

        # need source cmg.root.pwd ahead of time
        check_non_empty $ROOT_PASSWD "root passwd not sourced"
        echo_with_color "installing the new package ..."
        sshpass -p "$ROOT_PASSWD" ssh -o "StrictHostKeyChecking no" -p 36000 -o ConnectTimeout=3 -T root@${ip} << eof
            # 由于指定-f 时是 tmp/***.rpm, 而包被拷到了 /tmp下,所以这里首先进入 /
            cd /tmp
            rpm -Uvh $File
eof
    done
}

do_perf() {
    perf_ver=`ssh_cmd user_00@${IP} perf --version`
    perf_ver_num=`echo $perf_ver | awk -F . '{print $1}' | awk '{print $3}'`
    if [ $perf_ver_num -lt 4 ]; then
        # 3.x 版本perf 不支持 dwarf
        echo_red "perf version too old, need upgrading."
        echo_with_color "copying packages to $IP:/tmp ..."
        do_scp $IP "$File"

        # need source cmg.root.pwd ahead of time
        check_non_empty $ROOT_PASSWD "root passwd not sourced"
        echo_with_color "installing the new perf package ..."
        sshpass -p "$ROOT_PASSWD" ssh -o "StrictHostKeyChecking no" -p 36000 -o ConnectTimeout=3 -T root@${IP} << eof
            # 由于指定-f 时是 tmp/***.rpm, 而包被拷到了 /tmp下,所以这里首先进入 /
            cd /
            rpm -Uvh $File
eof
    fi

    if [ -e "tmp/FlameGraph.tar.bz2" ]; then
        do_scp $IP "tmp/FlameGraph.tar.bz2"
    fi

    echo_yellow "perf for ${perf_interval}s against $con_id ..."
    ssh_cmd -T user_00@${IP} << eof
    perf record -F 199 -ag --call-graph dwarf -p $pid -o /tmp/perf.data -- sleep 10 || echo -e '\033[31mPERF_ERROR\033[0m'
    if [ -e "/tmp/FlameGraph.tar.bz2" ]; then
        cd /tmp
        tar -xjf FlameGraph.tar.bz2
        cd FlameGraph
        echo "generating svg file, please wait ..."
        # 往往得在目标机器上生成火焰图,否则可能报缺少内核/库符号
        perf script -i ../perf.data | ./stackcollapse-perf.pl | ./flamegraph.pl > ../mongo.svg
    fi
eof

    #local_perf_data=perf.data.${cluster}_${IP}_${container_num}_$(date '+%m%d_%H%M%S')
    #echo_yellow "copying perf.data back to tmp/$local_perf_data ..."
    #sshpass -p $PASSWD scp -P 36000 user_00@${IP}:/tmp/perf.data tmp/$local_perf_data

    if [ -e "tmp/FlameGraph.tar.bz2" ]; then
        svgfile="${cluster}_${IP}_${container_num}_$(date '+%m%d_%H%M%S').svg"
        echo_yellow "copying svg file back to tmp/$svgfile ..."
        sshpass -p $PASSWD scp -P 36000 user_00@${IP}:/tmp/mongo.svg tmp/$svgfile
    fi
}

do_scp() {
    local_ip=$1
    local_file="$2"
    echo_with_color "\tscp $local_file to remote host: $local_ip:$temp_dir ..."
    cmd="sshpass -p $PASSWD scp -o \"StrictHostKeyChecking no\" -P 36000 $local_file user_00@${local_ip}:$temp_dir 2>&1" 
    #echo "$cmd"
    _res=`eval "$cmd"`
    if [[ "$res" =~ "file busy" ]]; then
        echo
    fi
    echo done
}

do_shell() {
    conid=${Containers_array[$id]}

    #shell_dir="mongo-shell-history"
    #mkdir -p $shell_dir
    #f2="$shell_dir/$cluster-${HOST}_$(date '+%Y%m%d').log"
    #echo -e "$(date "+%Y%m%d_%H%M")\n" > $f2

    # https://stackoverflow.com/questions/22565231/printing-mongo-query-output-to-a-file-while-in-the-mongo-shell
    #cmd="mongo mongodb://$cluster:$cluster-$USER_SALT-$MD5_SALT@${HOST}/admin | tee -a $f2"
    cmd="mongo --quiet mongodb://$cluster:$cluster-$USER_SALT-$MD5_SALT@${HOST}/admin"
    echo "connecting command: $cmd"
    eval "$cmd" 
}

# 老版本使用ES 来存储运营数据,换成了 mongod,这个函数就不用了
get_region_from_ES() {
    cluster=$1
    # How to avoid writing so many quotes
    region_chinese_stmt="curl -s -XPOST \"${ES}/cluster_stats/_search?pretty\" -d \"{ \"query\": { \"match\": {\\\"base_info.cluster_id\\\": \\\"$cluster\\\" } }, \"_source\":[\\\"region_name\\\", \\\"base_info.cluster_id\\\"] }\" | jq -r '.hits.hits[0]._source.region_name'"
    #echo "$region_chinese_stmt"
    region_chinese=$(eval "$region_chinese_stmt")
    #echo "$cluster located on $region_chinese"

    #region=`curl -s -XPOST "${ES}/cluster_stats/_search?pretty" -d "{ "query": { "match": {"base_info.cluster_id": "$cluster" } }, "_source":["region_name", "base_info.cluster_id"] }" | jq -r '.hits.hits[0]._source.region_name'`
}

get_region_from_mongo() {
    cluster=$1
    if [ $network = "idc" ]; then
        echo_with_color "trying to find region from db: internal_cloud"
        region_chinese=$(mongo --quiet mongodb://mongouser:2021%40CMongo@9.220.29.45:27017,9.220.29.114:27017,9.220.30.96:27017/admin?authSource=admin << eof | tail -n1
        use internal_cloud
        db.cluster_stats.data.findOne({"baseinfo.clusterid": "$cluster"}, {regionname: 1}).regionname
eof
)
        return
    fi

    echo_with_color "trying to find region from db: tencent_cloud_domestic"
    region_chinese=$(mongo --quiet mongodb://mongouser:2021%40CMongo@9.220.29.45:27017,9.220.29.114:27017,9.220.30.96:27017/admin?authSource=admin << eof | tail -n1
    use tencent_cloud_domestic
    db.cluster_stats.data.findOne({"baseinfo.clusterid": "$cluster"}, {regionname: 1}).regionname
eof
)

    # region_chinese contains @(shell):1:1
    if [[ $region_chinese =~ "(shell)" ]]; then 
        echo_with_color "not found from db: tencent_cloud_domestic, trying to find region from db: tencent_cloud_overseas"
        region_chinese=$(mongo --quiet mongodb://mongouser:2021%40CMongo@9.220.29.45:27017,9.220.29.114:27017,9.220.30.96:27017/admin?authSource=admin << eof | tail -n1
        use tencent_cloud_overseas
        db.cluster_stats.data.findOne({"baseinfo.clusterid": "$cluster"}, {regionname: 1}).regionname
eof
)
    fi
    if [[ $region_chinese =~ "(shell)" ]]; then
        echo_with_color "not found from db: tencent_cloud_overseas, trying to find region from db: finance_cloud"
        region_chinese=$(mongo --quiet mongodb://mongouser:2021%40CMongo@9.220.29.45:27017,9.220.29.114:27017,9.220.30.96:27017/admin?authSource=admin << eof | tail -n1
        use finance_cloud
        db.cluster_stats.data.findOne({"baseinfo.clusterid": "$cluster"}, {regionname: 1}).regionname
eof
)
    fi

    if [[ $region_chinese =~ "(shell)" ]]; then 
        echo_red "Chinese region obtained failed, please use -r to give region explicitly"
    fi
}

check_non_empty() {
    msg="$2"
    if [ x"$1" = x ]; then
        error "$msg"
    fi
}

calculate_time() {
    sec=$1
    if [ $sec -gt 3600 ]; then
        echo $(echo "scale=2; $sec/3600" | bc)h
    elif [ $sec -lt 60 ]; then
        echo ${sec}s
    else
        echo $(echo "scale=2; $sec/60" | bc)min
    fi
}

show_all_cluster_name_on_single_host() {
    ps aux | grep -o '[-]f.*mongod.conf' | awk -F ' ' '{print $2}' | while read l; do grep 'replSetName' $l | awk -F ':' '{print $2}' | tr -d ' ' ; done
}

# 把 /tmp/下的mongos, mongod, proxy 拷贝到container 目录
__move_file() {
    mongo_type=$1
    con_id=$2
    container_num=${con_id##*:}
    local_ip=${con_id%%:*}

    ssh_cmd -T user_00@$local_ip << eof
        # 目前container 根目录一般在 /data1/containers, /data2/containers, 或 /data/user_00/containers 下
        tmp_dir=\$(ls -1 / | egrep 'data?')
        for v in \$tmp_dir; do
            echo -e "\tchecking $container_num existence in /\$v/.../containers ..."
            if [ \$v = "data" ]; then
                # 如果是mongos,proxy,可能在 /data/user_00/containers下,或者非标操作导致mongod也在此下
                if [ -e /data/user_00/containers ]; then
                    root_dir=\$(find /data/user_00/containers -maxdepth 2 -name "$container_num")
                fi
            else
                # shard server mongod 应该要在 /data1,/data2 等下
                root_dir=\$(find /\$v -maxdepth 2 -name "$container_num")
            fi
            
            # 只要找到就退出 ,假设 一个container的 根目录不会同时存在于 /data1,/data2 和 /data 之下
            if [ x\$root_dir != x ]; then break; fi
        done
        
        wait_for_copy_ok() {
            src=\$1
            dst=\$2
            while true; do
                code=\$(cp \$src \$dst 2>&1)
                if [[ \$code =~ "Text file busy" ]]; then

                    # 在这里面不能再写成 \$container_num
                    ps_line=\`ps aux | grep "$container_num" | egrep 'bin/mongod|bin/mongos|bin/proxy' | grep -v grep\`
                    pids=\$(echo "\$ps_line" | awk '{print \$2}' | tr '\n' ' ')

                    pids=\$(eval echo "\$pids")
                    echo "ps_line=\$ps_line, pids=\$pids"

                    if [ x"\$pids" != x ]; then
                        echo -e "\tcp code: Text file busy, wait for copy ok, killing \$pids again ..."
                        kill -9 \$pids
                    fi
                    sleep 1
                else
                    echo -e "\t\033[31mcopy ok\033[0m"
                    break
                fi
            done
        }

        if [ x\$root_dir = x ]; then
            echo "$con_id root directory NOT FOUND !!!"
        else
            if [ $mongo_type = "mongos" ]; then 
                if [ -e $temp_dir/mongos ]; then
                    echo -e "\ttransfer $temp_dir/mongos to \$root_dir/bin"
                    wait_for_copy_ok $temp_dir/mongos \$root_dir/bin
                else
                    echo "mongos not copied to $temp_dir !!!"
                fi
            elif [ $mongo_type = "proxy" ]; then
                if [ -e $temp_dir/proxy ]; then
                    echo -e "\ttransfer $temp_dir/proxy to \$root_dir/bin"
                    wait_for_copy_ok $temp_dir/proxy \$root_dir/bin
                else
                    echo "proxy not copied to $temp_dir !!!"
                fi
            elif [ $mongo_type = "mongod" ]; then
                if [ -e $temp_dir/mongod ]; then
                    echo -e "\ttransfer $temp_dir/mongod to \$root_dir/bin"
                    wait_for_copy_ok $temp_dir/mongod \$root_dir/bin
                else
                    echo "mongod not copied to $temp_dir !!!"
                fi
            elif [ $mongo_type = "configsvr" ]; then
                if [ -e $temp_dir/mongod ]; then
                    echo -e "\ttransfer $temp_dir/mongod to \$root_dir/bin"
                    wait_for_copy_ok $temp_dir/mongod \$root_dir/bin
                else
                    echo "configsvr mongod not copied to $temp_dir !!!"
                fi
            fi
        fi
eof
}

__stop_worker() {
    ip1=$1

    pid=$(ssh_cmd user_00@$ip1 "$(declare -f __get_worker_pid); __get_worker_pid")
    
    # 去除首尾空格
    pid=$(eval echo "$pid")

    if [ x"$pid" = x ]; then
        echo_red "worker on $ip1 may have been DOWN. Check !"
    else
        echo -e "\tstopping worker with pid=$pid on $ip1 ..."
        ssh_cmd -T user_00@$ip1 "kill -9 $pid"

    fi

    if [ $Override = true ]; then
        ssh_cmd -T user_00@$ip1 << eof
            if [ -e $temp_dir/worker ]; then
                echo -e "\ttransfer $temp_dir/worker to ~/worker/bin"
                cp $temp_dir/worker ~/worker/bin
            else
                echo "worker not copied to $temp_dir !!!"
            fi
eof
    fi
}

stop_container() {
    con_id=$1
    mongo_type=$2

    HOST=${con_id%:*}
    IP=${HOST%:*}
    PORT=${HOST#*:}
    container_num=${con_id##*:}

    pid=$(ssh_cmd user_00@$IP "$(declare -f __get_pid); __get_pid $con_id")
    #echo "container $c pid info: $pid_info"
    
    # 去除首尾空格
    pid=$(eval echo "$pid")
    if [ x"$pid" = x ]; then
        echo_red "$con_id may have been DOWN. Check !"
        # 即使此container没启动,也得把文件拷过去
        if [ $Override = true ]; then
            __move_file $mongo_type $con_id
        fi
    else
        echo -e "\tstopping $con_id with pid=$pid ..."
        # kill
        ssh_cmd -T user_00@$IP "kill -9 $pid"

        if [ $Override = true ]; then
            __move_file $mongo_type $con_id
        fi
    fi
}

__stop_exporter() {
    con_id=$1

    #pid=$(ssh_cmd user_00@$exporter_addr "$(declare -f __get_exporter_pid); __get_exporter_pid $cluster")
    #pid=$(eval echo "$pid")
    #if [ x"$pid" = x ]; then
    #    echo_red "exporter on $con_id may not be started"
    #else

    ssh_cmd -T user_00@$exporter_addr << eof
        pids=\`ps aux | grep "mongodb_exporter.*$cluster" | grep -v grep | awk '{print \$2}'\`
        if [ "x\$pids" != "x" ]; then
            echo -e "\tstopping exporter against $con_id with pid=" \$pids ...
            kill \$pids
        fi
eof
}

do_stop_node_exporter() {
    pid=$(ssh_cmd user_00@$IP "$(declare -f __get_node_exporter_pid); __get_node_exporter_pid")
    pid=$(eval echo "$pid")
    if [ x"$pid" = x ]; then
        echo_red "node_exporter on $con_id may not be started"
    else
        echo -e "\tstopping node_exporter on $con_id with pid=$pid ..."
        ssh_cmd -T user_00@$IP "kill -9 $pid"
    fi
}

do_stop_mongos() {
    for c in $Mongos_container; do
        echo_with_color "\nstopping mongos: $c ..."
        if [ $Has_mongos = true ]; then
            stop_container $c mongos
        else
            stop_container $c proxy
        fi
    done
}
do_stop_configsvr() {
    for c in $Config_container; do
        echo_with_color "\nstopping configsvr: $c ..."
        stop_container $c configsvr
    done
}
do_stop_mongod() {
    for rs_idx in $(seq 0 $((Rs_num-1))); do
        rs_cons=`echo "$Clusterinfo" | jq -r ".rs_list[$rs_idx].mongod_list[].container_name"`
        rs_name=`echo "$Clusterinfo" | jq -r ".rs_list[$rs_idx].rsName"`
        for c in $rs_cons; do
            echo_with_color "\nstopping $c in RS $rs_name"
            stop_container $c mongod
        done
    done
}

__stop_exporters() {
    for rs_idx in $(seq 0 $((Rs_num-1))); do
        rs_cons=`echo "$Clusterinfo" | jq -r ".rs_list[$rs_idx].mongod_list[].container_name"`
        rs_name=`echo "$Clusterinfo" | jq -r ".rs_list[$rs_idx].rsName"`
        for c in $rs_cons; do
            echo_with_color "stopping all exporters in $cluster"
            __stop_exporter $c
            return
        done
    done
}

do_stop() {
    # 为确保安全,只能跑在测试环境里的集群上
    if [ "$region" != "sh_test" -a "$region" != "xian_test" ]; then
        if [ $Type != "exporter" ]; then
            error "stop must't be run on $region, only on test environment !!!"
        fi
    fi

    case $Type in
    mongos)
        # including mongos & proxy
        do_stop_mongos
        ;;
    mongod)
        do_stop_mongod
        ;;
    configsvr)
        do_stop_configsvr
        ;;
    exporter)
        __stop_exporters
        ;;
    "")
        echo -e "\n=== stopping all components "
        do_stop_mongos
        do_stop_configsvr
        do_stop_mongod
        ;;
    *)
        error "no such type: $Type"
    esac

    # 停止后,又很快被worker 拉起 (ContainerServer::supervisor), 该如何先拷贝后启动呢
}

__stop_mongos_worker() {
    for c in $Mongos_container; do
        ip1=${c%%:*}
        echo_with_color "\nstopping mongos worker $ip1 ..."
        if [[ ! ${Global_worker_array[@]} =~ "$ip1" ]]; then
            __stop_worker $ip1
            # 由于不同实例可能在同一台host上 , 已经stop 的worker 就不再处理
            Global_worker_array+=("$ip1")
        fi
    done
}

__stop_mongod_worker() {
    for rs_idx in $(seq 0 $((Rs_num-1))); do
        rs_cons=`echo "$Clusterinfo" | jq -r ".rs_list[$rs_idx].mongod_list[].container_name"`
        rs_name=`echo "$Clusterinfo" | jq -r ".rs_list[$rs_idx].rsName"`
        for c in $rs_cons; do
            ip1=${c%%:*}
            echo_with_color "\nstopping mongod worker: $ip1 in $rs_name ..."
            if [[ ! ${Global_worker_array[@]} =~ "$ip1" ]]; then
                __stop_worker $ip1
                # 由于不同实例可能在同一台host上 , 已经stop 的worker 就不再处理
                Global_worker_array+=("$ip1")
            fi
        done
    done
}

__stop_configsvr_worker() {
    for c in $Config_container; do
        ip1=${c%%:*}
        echo_with_color "\nstopping configsvr worker $ip1 ..."
        if [[ ! ${Global_worker_array[@]} =~ "$ip1" ]]; then
            __stop_worker $ip1
            # 由于不同实例可能在同一台host上 , 已经stop 的worker 就不再处理
            Global_worker_array+=("$ip1")
        fi
    done
}

# 可以选择只 stop 某个组件对应的workers
stop_worker() {
    # 为确保安全,只能跑在测试环境里的集群上
    if [ $region != "sh_test" -a $region != "xian_test" ]; then
        error "stop must't be run on $region, only on test environment !!!"
    fi

    case $Type in
    mongos)
        __stop_mongos_worker
        ;;
    mongod)
        __stop_mongod_worker
        ;;
    configsvr)
        __stop_configsvr_worker
        ;;
    "")
        echo -e "\n=== stopping all workers "
        __stop_mongos_worker
        __stop_mongod_worker
        __stop_configsvr_worker
        ;;
    *)
        error "no such type: $Type"
    esac
}

# worker 可能会自动拉起 mongos, mongod,所以 start 可能用得不多
# 但worker 如若未成功拉起,就需要它来start 各个组件
start_container() {
    con_id=$1
    mongo_type=$2

    HOST=${con_id%:*}
    IP=${HOST%:*}
    PORT=${HOST#*:}
    container_num=${con_id##*:}

    pid_info=$(ssh_cmd user_00@$IP "$(declare -f get_pid_info); get_pid_info $con_id")
    
    pid_info_arr=($pid_info)
    pid=${pid_info_arr[0]}
    root_dir=${pid_info_arr[1]}

    if [ x$pid != x ]; then
        echo_red "$con_id is alive, maybe started by worker. check version and md5 !"
    else
        # 对于未启动的container,暂通过这样的方式来找到它
        ssh_cmd -o LogLevel=error -T user_00@${IP} << eof

        ######## 不可以这样写, 管道不可传变量
        #ls / -1 | egrep 'data?' | while read v; do 

        # 目前container 根目录一般在 /data1/containers, /data2/containers, 或 /data/user_00/containers 下
        tmp_dir=\$(ls -1 / | egrep 'data?')
        for v in \$tmp_dir; do
            echo -e "\tchecking $container_num existence in /\$v/.../containers ..."
            if [ \$v = "data" ]; then
                # 如果是mongos,proxy,可能在 /data/user_00/containers下,或者非标操作导致mongod也在此下
                if [ -e /data/user_00/containers ]; then
                    root_dir=\$(find /data/user_00/containers -maxdepth 2 -name "$container_num")
                fi
            else
                # shard server mongod 应该要在 /data1,/data2 等下
                root_dir=\$(find /\$v -maxdepth 2 -name "$container_num")
            fi
            
            # 只要找到就退出 ,假设 一个container的 根目录不会同时存在于 /data1,/data2 和 /data 之下
            if [ x\$root_dir != x ]; then break; fi
        done
        
        if [ x\$root_dir = x ]; then
            echo "$con_id root directory NOT FOUND !!!"
        else
            echo -e "\tstarting $mongo_type: $con_id in \$root_dir ..."
            ld_preload=\$(find \$root_dir/bin -name 'libstdc++*' | head -n 1)

            if [ $mongo_type = "mongos" ]; then
                if [ -e \$root_dir/bin/mongos ]; then
                    # 貌似cmongo 集群的mongos,proxy配置文件都叫 proxy.conf
                    if [ -e \$root_dir/conf/proxy.conf ]; then
                        LD_PRELOAD=\$ld_preload \$root_dir/bin/mongos -f \$root_dir/conf/proxy.conf || echo "mongos start failed"
                    elif [ -e \$root_dir/conf/mongos.conf ]; then
                        LD_PRELOAD=\$ld_preload \$root_dir/bin/mongos -f \$root_dir/conf/mongos.conf || echo "mongos start failed"
                    fi
                fi
            elif [ $mongo_type = "proxy" ]; then
                if [ -e \$root_dir/bin/proxy ]; then
                    LD_PRELOAD=\$ld_preload \$root_dir/bin/proxy -f \$root_dir/conf/proxy.conf || echo "proxy start failed"
                fi
            elif [ $mongo_type = "mongod" -o $mongo_type = "configsvr" ]; then
                LD_PRELOAD=\$ld_preload \$root_dir/bin/mongod -f \$root_dir/conf/mongod.conf || echo "mongod start failed"
            fi

        fi

eof
    fi
}

do_start_exporter() {
    _c=$1
    _host=${_c%:*}

    ssh_cmd -o LogLevel=error -T user_00@${exporter_addr} << eof | tee -a tmp/$cluster-exporters.info
        #ps aux | grep mongodb_exporter
        # https://unix.stackexchange.com/questions/572798/how-can-i-start-a-long-running-background-process-via-ssh-and-immediately-disc
        
        if ps aux | grep -v grep | grep "mongodb_exporter.*$cluster.*$_host"; then
            echo -e "\n\033[31mexporter exists against $_c in $cluster\033[0m\n"
            exit
        fi

        while true; do
            max_port=\`ss -tlp | grep expor | awk '{print \$4}' | while read l; do echo "\${l##*:}"; done | sort -n | tail -n1\`
            if [ x"max_port" = x ]; then max_port=10000; fi
            new_port=\$((max_port+1))
            echo -e "\tstarting mongodb_exporter at '$exporter_addr:\$new_port' against $_c"

            # 可能 _host 不可用,连不了
            mongodb_exporter --mongodb.uri=mongodb://$cluster:$cluster-$USER_SALT-$MD5_SALT@${_host}/admin --mongodb.direct-connect --web.listen-address=:"\$new_port" > /dev/null 2>&1 &
            break
        done
eof
}

do_start_node_exporter() {
    check_non_empty "$Exporter_port" "node_exporter port not given"

    ssh_cmd -o LogLevel=error -T user_00@${IP} << eof
        echo -e "\n\033[33mstarting node_exporter at :$Exporter_port\033[0m\n"
        # https://unix.stackexchange.com/questions/572798/how-can-i-start-a-long-running-background-process-via-ssh-and-immediately-disc
        /tmp/node_exporter --web.listen-address=":$Exporter_port" --collector.vmstat.fields=".*"  > /dev/null 2>&1 &
eof
}

do_start_mongos() {
    for c in $Mongos_container; do
        echo_with_color "\nstarting mongos: $c ..."
        if [ $Has_mongos = true ]; then
            start_container $c mongos
        else
            start_container $c proxy
        fi
    done
}
do_start_configsvr() {
    for c in $Config_container; do
        echo_with_color "\nstarting configsvr: $c ..."
        start_container $c configsvr
    done
}
do_start_mongod() {
    for rs_idx in $(seq 0 $((Rs_num-1))); do
        rs_cons=`echo "$Clusterinfo" | jq -r ".rs_list[$rs_idx].mongod_list[].container_name"`
        rs_name=`echo "$Clusterinfo" | jq -r ".rs_list[$rs_idx].rsName"`
        for c in $rs_cons; do
            echo_with_color "\nstarting $c in RS $rs_name"
            start_container $c mongod
        done
    done
}

__start_exporters() {
    > tmp/$cluster-exporters.info
    for rs_idx in $(seq 0 $((Rs_num-1))); do
        rs_cons=`echo "$Clusterinfo" | jq -r ".rs_list[$rs_idx].mongod_list[].container_name"`
        rs_name=`echo "$Clusterinfo" | jq -r ".rs_list[$rs_idx].rsName"`
        for c in $rs_cons; do
            echo_with_color "starting mongodb_exporter against $c in $rs_name"
            do_start_exporter $c
        done
    done
    # print prometheus configurations
    prometheus_config=`cat tmp/$cluster-exporters.info | grep 'starting mongodb_exporter' | awk '{print $5}' | tr '\n' ','`
    echo_red "\nPlease paste ${prometheus_config::-1} into prometheus.yml\n"
}

monitor_exporters() {
    mem_limit=$1
    conn_limit=$2

    while true; do
        ps_line=`ps aux | grep 'mongodb_exporter' | grep -v grep | sort -k6 -n | tail -n1`
        max_rss_kB=`echo "$ps_line" | awk '{print $6}'`
        if [ $max_rss_kB -gt $mem_limit ]; then
            max_rss_pid=`echo "$ps_line" | awk '{print $2}'`
            echo "killing $max_rss_pid, since its rss: $max_rss_kB"
            kill -9 $max_rss_pid
        fi

        #ps aux | grep 'mongodb_exporter' | grep -v grep | while read l; do echo "$l" | awk -F '1e@' '{print $2}' | awk -F '/' '{print $1}' ; done

        # 在mongodb_exporter 机器上执行它,可观察到连接Mongod 的连接数目排序列表; 排队mongodb_exporter 的LISTEN和prometheus 的连接
        max_conns_exporters=`ss -tanp | grep expor | egrep -v 'LISTEN|9.185.145.109' | sort -k5 | awk '{print $5}' | uniq -c | sort -k1 -n | tail -n1`
        target_mongod=`echo "$max_conns_exporters" | awk '{print $2}'`
        target_mongod_conns=`echo "$max_conns_exporters" | awk '{print $1}'`
        max_conns_pid=`ss -tanp | grep expor | egrep -v 'LISTEN|9.185.145.109' | grep "$target_mongod" | head -n1 | awk -F 'pid=' '{print $2}' | awk -F ',' '{print $1}'`

        if [ x"$target_mongod_conns" != x ]; then
            if [ "$target_mongod_conns" -gt $conn_limit ]; then
                echo "killing $max_conns_pid, since: $max_conns_exporters"
                kill -9 $max_conns_pid
            fi
        fi
        sleep 5
    done
}

do_start() {
    case $Type in
    mongos)
        # including mongos & proxy
        do_start_mongos
        ;;
    mongod)
        do_start_mongod
        ;;
    configsvr)
        do_start_configsvr
        ;;
    exporter)
        __start_exporters
        ;;
    "")
        echo -e "\n=== starting all components "
        do_start_mongod
        do_start_configsvr
        do_start_mongos
        ;;
    *)
        error "no such type: $Type"
    esac
}

# -----------------
do_copy_mongos() {
    for c in $Mongos_container; do
        if [ $Has_mongos = true ]; then
            echo_with_color "\ncopying mongos to $c ..."
            do_scp ${c%%:*} $File_mongos
        else
            echo_with_color "\ncopying proxy to $c ..."
            do_scp ${c%%:*} $File_mongos
        fi
    done
}
do_copy_configsvr() {
    for c in $Config_container; do
        echo_with_color "\ncopying configsvr to $c ..."
        do_scp ${c%%:*} $File_mongod
    done
}
do_copy_mongod() {
    for rs_idx in $(seq 0 $((Rs_num-1))); do
        rs_cons=`echo "$Clusterinfo" | jq -r ".rs_list[$rs_idx].mongod_list[].container_name"`
        rs_name=`echo "$Clusterinfo" | jq -r ".rs_list[$rs_idx].rsName"`
        for c in $rs_cons; do
            echo_with_color "\ncopying mongod to $c in RS $rs_name"
            do_scp ${c%%:*} $File_mongod
        done
    done
}

# do it per mongod, DONOT do it for all mognods for safety
do_copy_exporter() {
    do_scp $IP $File
}

########## 把文件拷贝到远程container的 /tmp下,供 stop 时,直接移到 container bin 下
do_copy() {
    case $Type in
    mongos)
        check_non_empty "$File_mongos" "mongos file not given"
        do_copy_mongos
        ;;
    mongod)
        check_non_empty "$File_mongod" "mongod file not given"
        do_copy_mongod
        ;;
    configsvr)
        check_non_empty "$File_mongod" "mongod file not given"
        do_copy_configsvr
        ;;
    exporter)
        check_non_empty "$File" "exporter file not given"
        do_copy_exporter
        ;;
    "")
        check_non_empty "$File_mongos" "mongos file not given"
        check_non_empty "$File_mongod" "mongod file not given"
        do_copy_mongos
        do_copy_mongod
        do_copy_configsvr
        ;;
    *)
        error "no such Type: $Type"
    esac
}

__copy_mongos_worker() {
    for c in $Mongos_container; do
        echo_with_color "\ncopying worker to mongos: $c ..."
        do_scp ${c%%:*} $File
    done
}

__copy_mongod_worker() {
    for rs_idx in $(seq 0 $((Rs_num-1))); do
        rs_cons=`echo "$Clusterinfo" | jq -r ".rs_list[$rs_idx].mongod_list[].container_name"`
        rs_name=`echo "$Clusterinfo" | jq -r ".rs_list[$rs_idx].rsName"`
        for c in $rs_cons; do
            echo_with_color "\ncopying worker to mongod: $c in RS $rs_name"
            do_scp ${c%%:*} $File
        done
    done
}

__copy_configsvr_worker() {
    for c in $Config_container; do
        echo_with_color "\ncopying worker to configsvr: $c ..."
        do_scp ${c%%:*} $File
    done
}

copy_worker() {
    check_non_empty "$File" "worker file not given"

    echo "$File md5: $(md5sum $File | awk '{print $1}')" 
    case $Type in
    mongos)
        __copy_mongos_worker
        ;;
    mongod)
        __copy_mongod_worker
        ;;
    configsvr)
        __copy_configsvr_worker
        ;;
    "")
        __copy_mongos_worker
        __copy_mongod_worker
        __copy_configsvr_worker
        ;;
    *)
        error "no such Type: $Type"
    esac
}

get_masterlog_for_job() {
    echo_with_color "grep log/master1.log.$2 for JD:$1 ..."
    ssh_cmd -t user_00@${MASTER} "cd /usr/local/services/master-1.0; grep "JD:$1" log/master1.log.$2"
    #sshpass -p $PASSWD ssh -o "StrictHostKeyChecking no" -p 36000 -t user_00@${MASTER} "cd /usr/local/services/master-1.0; grep "$1" log/master1.log.$2 || echo 'log may not be in current master: $MASTER'"
}

# 记录常规 oss 命令
print_oss_cmd() {
    mastertest -master_address=$MASTER:8888 -cmd list_restore_ts -cluster $cluster
    # 查询某段时间是否可以回档
    mastertest -cluster sirchen-dev -cmd=checkrestore -time_stamp="2020-12-08 01:14:00"
    mastertest -cmd=restore -src_cluster sirchen-dev -dest_cluster sirchen-rocks40 -time_stamp="2020-12-08 00:00:00"
    mastertest -cmd query_noauth -cluster xxx -master_address xxx
    mastertest -cmd noauth -cluster noauth36 -enableNoAuth false
}

scan_region_with_ES() {
    for k in ${!region_map[@]}; do
        region_chinese=$k
        if [ x$Region_chinese_for_scan != x ]; then
            region_chinese=$Region_chinese_for_scan
        fi

        #etcd1=${coop_region_etcd_map[$k]}
        #printf "%-6s\t%-15s\n" $k $etcdctl
        #etcdctl --endpoint http://$etcd1:2379 ls /cluster | while read v; do echo "cluster: $v"; done

        echo "$region_chinese showing cluster info ..."

        scroll_init_done=false
        while true; do
            if [ $scroll_init_done = false ]; then
                # default size=10
                # 返回大量数据时用scroll
                region_scroll_res=`curl -s -XPOST "${ES}/cluster_stats/_search?pretty&size=100&scroll=1m" -d "{ \"query\": { \"match\": {\"region_name\": \"$region_chinese\" } }, \"_source\": [\"base_info.cluster_id\", \"base_info.mongod_subversion\", \"base_info.config_version\", \"base_info.proxy_version\",\"region_name\", \"base_info.max_client_conns\", \"base_info.status\", \"set_name\"] }"`

                scroll_init_done=true
                scroll_id=`echo "$region_scroll_res" | jq -r '._scroll_id'`
                hits_total=`echo $region_scroll_res | jq -r '.hits.total'`
                echo -e "\thits total: $hits_total"
            else
                region_scroll_res=`curl -s -XPOST "${ES}/_search/scroll?scroll=1m&pretty" -d "$scroll_id"`
                scroll_id=`echo "$region_scroll_res" | jq -r '._scroll_id'`
            fi
            hits_num=`echo "$region_scroll_res" | jq -r '.hits.hits | length'`

            if [ $hits_num -ne 0 ]; then
                # 先变成array
                cluster_id=(`echo "$region_scroll_res" | jq -r '.hits.hits[]._source.base_info.cluster_id' | tr '\n' ' '`)
                mongodv=(`echo "$region_scroll_res" | jq -r '.hits.hits[]._source.base_info.mongod_subversion' | tr '\n' ' '`)
                configv=(`echo "$region_scroll_res" | jq -r '.hits.hits[]._source.base_info.config_version' | tr '\n' ' '`)
                proxyv=(`echo "$region_scroll_res" | jq -r '.hits.hits[]._source.base_info.proxy_version' | tr '\n' ' '`)
                max_clientconns=(`echo "$region_scroll_res" | jq -r '.hits.hits[]._source.base_info.max_client_conns' | tr '\n' ' '`)
                cluster_st_num=(`echo "$region_scroll_res" | jq -r '.hits.hits[]._source.base_info.status' | tr '\n' ' '`)
                cluster_region=(`echo "$region_scroll_res" | jq -r '.hits.hits[]._source.region_name' | tr '\n' ' '`)
                cluster_set=(`echo "$region_scroll_res" | jq -r '.hits.hits[]._source.set_name' | tr '\n' ' '`)

                cluster_num=${#cluster_id[@]}

                for ((i=0; i<$cluster_num; i++)); do
                    get_cluster_status ${cluster_st_num[$i]}
                    echo "${cluster_id[$i]},${mongodv[$i]},${configv[$i]},${proxyv[$i]},${max_clientconns[$i]},${Cluster_status},${cluster_region[$i]},${cluster_set[$i]}" >> tmp/$f1
                done
            else
                break
            fi

            # 不要太快,免得对线上ES有影响
            usleep 200000
        done

        if [ x$Region_chinese_for_scan != x ]; then
            break
        fi
    done
}

__write_csv_fields() {
    l="$1"
    f1=$2
    # -j 不写入换行
    echo "$l" | jq -j '.baseinfo.clusterid' >> tmp/$f1 && echo -n "," >> tmp/$f1
    echo "$l" | jq -j '.baseinfo.mongodsubversion' >> tmp/$f1 && echo -n "," >> tmp/$f1
    echo "$l" | jq -j '.baseinfo.configversion' >> tmp/$f1 && echo -n "," >> tmp/$f1
    echo "$l" | jq -j '.baseinfo.proxyversion' >> tmp/$f1 && echo -n "," >> tmp/$f1
    echo "$l" | jq -j '.baseinfo.maxclientconns' >> tmp/$f1 && echo -n "," >> tmp/$f1
    echo "$l" | jq -j '.baseinfo.status' >> tmp/$f1 && echo -n "," >> tmp/$f1
    echo "$l" | jq -j '.baseinfo.replicatesetnum' >> tmp/$f1 && echo -n "," >> tmp/$f1
    echo "$l" | jq -j '.regionname' >> tmp/$f1 && echo -n "," >> tmp/$f1
    echo "$l" | jq -r '.setname' >> tmp/$f1
}

# 迭代所有集群过程很慢，如何改善 ??
extract_fields_from_region_compacted() {
    f1=$1
    echo "extract fields of different number of replicatesetnum(<=20) from tmp/region_compact.tmp ..."
    for ((i=1; i<=20; i++)); do
        # wt
        l=`grep "replicatesetnum\":$i" tmp/region_compact.tmp | grep 'mongo_wt-' | egrep -v 'debug|test' | head -n1`
        if [ x"$l" != "x" ]; then
            echo -e -n "  extracting \033[33m`echo "$l" | jq -j '.baseinfo.clusterid'`\033[0m ...\r"
            __write_csv_fields "$l" $f1
        fi
        l=`grep "replicatesetnum\":$i" tmp/region_compact.tmp | grep 'mongo_wt3.6' | egrep -v 'debug|test' | head -n1`
        if [ x"$l" != "x" ]; then
            echo -e -n "  extracting \033[33m`echo "$l" | jq -j '.baseinfo.clusterid'`\033[0m ...\r"
            __write_csv_fields "$l" $f1
        fi
        l=`grep "replicatesetnum\":$i" tmp/region_compact.tmp | grep 'mongo_wt4.0' | egrep -v 'debug|test' | head -n1`
        if [ x"$l" != "x" ]; then
            echo -e -n "  extracting \033[33m`echo "$l" | jq -j '.baseinfo.clusterid'`\033[0m ...\r"
            __write_csv_fields "$l" $f1
        fi

        # rocks
        l=`grep "replicatesetnum\":$i" tmp/region_compact.tmp | grep 'mongo_rocks' | egrep -v 'debug|test' | head -n1`
        if [ x"$l" != "x" ]; then
            echo -e -n "  extracting \033[33m`echo "$l" | jq -j '.baseinfo.clusterid'`\033[0m ...\r"
            __write_csv_fields "$l" $f1
        fi
    done

    echo -e "\n"
    # column 对中文支持不行
    head -n1 tmp/$f1 | awk -F ',' '{printf("\033[31m\t%-20s %-20s %-20s %-20s %-18s %-10s %-8s %-10s %-10s\033[0m\n",$1,$2,$3,$4,$5,$6,$7,$8,$9)}'
    tail -n +2 tmp/$f1 | awk -F ',' '{printf("\t%-20s %-20s %-20s %-20s %-18s %-10s %-8s %-10s %-10s\n",$1,$2,$3,$4,$5,$6,$7,$8,$9)}'
}

# deprecated ! 这个是把 mongodb_exporter 安装到了宿主机,由于mongodb_exporter 实现还有bug,有连接泄漏/内存暴满问题, 所以不安全
deploy_exporter() {
    # cmg stop-exporter -c groupCollectionFalse2 -r sh_test -i 1
    # cmg deploy-exporter -c groupCollectionFalse2 -r sh_test -i 1 -f bin/mongodb_exporter -p 9616
    # cmg start-exporter -c groupCollectionFalse2 -r sh_test -i 1 -p 9616

    check_non_empty "$id" "container number must be given"
    check_non_empty "$File" "exporter file not given"
    check_non_empty "$Exporter_port" "exporter port not given"
    Type=exporter
    do_stop_exporter
    do_copy
    do_start_exporter
    echo "$(date '+%Y%m%d %H:%M:%S') exporter deployed on $HOST in $cluster" >> which-node-setup-exporter
}

# ==================================

if [ $# -lt 1 ]; then
    help
    exit 0
fi

cmd=$1
shift

while [ $# -gt 0 ]; do
    case $1 in
    -n)
        # optional value: coop, idc, finance
        shift; network=$1; shift
        ES=${region_es_map[$network]}
        ;;
    -c)
        # cluster
        shift; cluster=$1; shift
        ;;
    -r)
        # 如果es 返回的region有误,比如上海测试的集群,返回的region却是上海, 此时手工用 -r 修正
        shift; region_given=$1; shift
        ;;
    -i)
        # container 编号
        shift; id=$1; shift
        ;;
    -job)
        shift; job_id=$1; shift
        ;;
    -d)
        shift; date1=$1; shift
        ;;
    -T)
        shift; perf_interval=$1; shift
        ;;
    -I)
        # mongostat interactive flag
        shift; interactive=true
        ;;
    -f)
        shift; File=$1; shift
        ;;
    -fd)
        # copy 时的mongod 文件路径
        shift; File_mongod=$1; shift
        ;;
    -fs)
        # copy 时的mongos 或 proxy 文件路径
        shift; File_mongos=$1; shift
        ;;
    --ip)
        # used in scp
        shift; ip_given="$1"; shift
        ;;
    --lines)
        # tail log file的行数
        shift; lines=$1; shift
        ;;
    --etcd_cmd)
        shift; etcd_cmd="$1"; shift
        ;;
    --no-pri)
        # DONOT get_primary which may take time
        shift; no_pri=true
        ;;
    --no-conn)
        # DONOT get connections which may take time
        shift; no_conn=true
        ;;
    -t)
        # mongod, mongos, proxy
        shift; Type=$1; shift
        ;;

    --override)
        # 当stop container时,指定要不要将 copy 过去的文件覆盖原执行文件
        # 最好每次都带上,以防止刚刚kill ,worker 就自动拉起,再把新执行copy 到bin 就会报 file busy
        shift; Override=true
        ;;

    -p)
        # exporter port
        shift; Exporter_port=$1; shift
        ;;

    -cpu)
        shift; cpu_quota=$1; shift
        ;;
    -mem)
        shift; mem_quota=$1; shift
        ;;
    -pwd)
        shift; PASSWD=$1; shift
        ;;

    *)
        error "invalid option: $1"
    esac
done

# 现只有发master, etcdctl子命令时才允许可以不给cluster参数
#if [ "$cmd" != "master" -o "$cmd" = "etcdctl" ]; then
#    check_non_empty "$cluster" "cluster must be given via -c"
#fi
check_non_empty "$PASSWD" "ENV PASSWRD (ssh passwod) must be set"
check_non_empty "$USER_SALT" "ENV USER_SALT (mongo passwd string) must be set"
check_non_empty "$MD5_SALT" "ENV MD5_SALT (mongo passwd string) must be set"

# Every time cluster is given, get its region immediately for later use
if [ x$cluster != x ]; then
    which jq > /dev/null || error "jq not in PATH, please install jq, maybe including its dependency package oniguruma"
    which etcdctl > /dev/null || error "etcdctl not in PATH"

    if [ $network = "idc" ]; then
        #get_region_from_ES $cluster
        get_region_from_mongo $cluster
    elif [ $network = "coop" ]; then
        get_region_from_mongo $cluster
    fi
    region=${region_map[$region_chinese]}
fi

# 如果es返回的region信息有误,则使用命令行的
if [ x$region_given != x ]; then
    region=$region_given
fi

# 获取到集群的region信息后,更新MASTER, ETCD地址
if [ x$network = x"coop" -o x$network = x"finance" ]; then
    if [ x$region != x ]; then
        ETCD=${coop_region_etcd_map[$region]}
    fi
elif [ x$network = x"idc" ]; then
    ETCD=${idc_region_etcd_map[$region]}
fi
echo_yellow "ETCD: $ETCD"

MASTER_HOST=$(etcdctl --endpoint http://$ETCD:2379 get /master/primary)
MASTER=${MASTER_HOST%:*}

if [ x$cluster != x ]; then
    if [ $cluster = "music_bi_song_profile_new" ]; then
        PASSWD=$MUSIC_PASSWD_1
    fi
    get_cluster_info

    # 应该此时打印一下master, worker 版本 !!
    cat tmp/$cluster-info.json | jq --compact-output '.base_info'
    custom_data=`cat tmp/$cluster-info.json | jq -r '.extra_info.custom_data'`
    echo_with_color "$cluster is in $region_chinese, $region, status: $Cluster_status, ES: $ES, $custom_data"

    # 像K歌 master只能从跳板机登入,所以这里的ssh 无法进行,所以要么过滤掉,要么加超时
    # 远程赋变量要转义 $ 
    # https://stackoverflow.com/questions/13032409/ssh-remote-variable-assignment
    pid_ver=$(ssh_cmd -T user_00@${MASTER} << eof || echo_red "Master SSH Timeout"
    ps_res=\$(ps aux | grep '[/]master' | grep -v bash)
    p=\$(echo "\$ps_res" | awk '{print \$2}')
    # https://stackoverflow.com/questions/5731234/how-to-get-the-start-time-of-a-long-running-linux-process
    _master_start_time="\033[31m\$(ps -p "\$p" -o lstart=)\033[0m"
    abs_master=\`readlink -e /proc/\$p/exe\`

    # see https://git.code.oa.com/sirchen/cmg-tool/issues/6
    cd "\$(dirname \${abs_master})"
    ver=\$(eval "./master --version")
    echo "\$p, \$_master_start_time, \$ver"
eof
)
    # print master's last failover time
    echo -e "etcd: $ETCD, primary master: $MASTER_HOST, pid+startTime+version : $pid_ver"

    echo "$cluster's clusterInfo in tmp/$cluster-info.json"

    # 有些命令就不必打印各node 信息了
    if [ $cmd != job -a $cmd != master ]; then
        get_cluster_host
    fi
    print_when_in_READONLY_ISOLATION
fi

# 如果给定了container编号,就打印出其相关信息
if [ x$id != x ]; then
    con_id=${Containers_array[$id]}
    HOST=${con_id%:*}
    IP=${HOST%:*}
    PORT=${HOST#*:}
    container_num=${con_id##*:}
    pid_info=$(ssh_cmd user_00@$IP "$(declare -f get_pid_info); get_pid_info $con_id" || echo "$IP-timeout")
    
    if [ x"$pid_info" != x"$IP-timeout" ]; then
        pid_info_arr=($(echo "$pid_info" | tr '&' ' '))
        pid=${pid_info_arr[0]}

        if [ x$pid = x ]; then
            echo_red "$con_id may have been crashed. Check !"
        else
            root_dir=${pid_info_arr[1]}
            working_dir=${pid_info_arr[2]}
            echo_with_color "$con_id on $cluster info:"
            echo -e "  pid=$pid, root_dir=$root_dir, working_dir=$working_dir\n"
        fi
    fi

elif [ x"$ip_given" != x ]; then
    IP="$ip_given"
fi

case $cmd in
ssh)
    check_non_empty "$id" "container ordering number should be given when ssh to remote host"
    do_ssh $IP
    ;;

scp)
    check_non_empty "$IP" "container ordering number or --ip should be given when using scp"
    check_non_empty "$File" "File must be given when scp"
    do_scp $IP $File
    ;;

perf)
    # 版本满足后,再执行: cmg perf -c xxx -i 6
    check_non_empty "$IP" "container ordering number not given"
    File="tmp/perf-4.14.105-1.0009.tl2.x86_64.rpm tmp/python-perf-4.14.105-1.0009.tl2.x86_64.rpm"

    # 硬编码要安装的文件, 在自己的tmp/ 准备好这两个文件
    if [ ! -e "tmp/perf-4.14.105-1.0009.tl2.x86_64.rpm" ]; then
        error "$File and tmp/FlameGraph.tar.bz2 not existed. Please download from http://mirrors.tencent.com/os/tlinux/Tlinux-Kernel-RPMs, and download FlameGraph.tar.bz2 from github, put them into tmp/"
    fi
    do_perf 
    ;;

top)
    # 会造成 ctrl c 后 bash 进程仍然存留,暂不用!
    check_non_empty "$IP" "container ordering number not given"
    ;;

customer)
    check_non_empty "$IP" "container ordering number not given"
    get_customers_on_host $IP
    ;;

shell|sh)
    check_non_empty "$id" "container ordering number should be given when connecting remote via mongo"
    do_shell
    ;;

rpm)
    do_install_rpm
    ;;

getquota)
    check_non_empty "$id" "container ordering number not given"
    ssh_cmd -T user_00@${IP} << eof
    # 如果是外部的变量,不用加 \$ 转义
    quota=\$(cat /sys/fs/cgroup/cpu/container-$container_num/cpu.cfs_quota_us)
    base=\$(cat /sys/fs/cgroup/cpu/container-$container_num/cpu.cfs_period_us)
    echo -e "\033[34m cpu core num for container $container_num: \$(echo "\$quota/\$base" | bc)\033[0m"
    echo "cpu cgroup.procs: \`cat /sys/fs/cgroup/cpu/container-$container_num/cgroup.procs\`"

    mem_size=\$(cat /sys/fs/cgroup/memory/container-$container_num/memory.limit_in_bytes)
    echo -e "\033[34m memory limits for container $container_num: \$(echo "\$mem_size/1024/1024" | bc) MB\033[0m"
    echo "mem cgroup.procs: \`cat /sys/fs/cgroup/memory/container-$container_num/cgroup.procs\`"
eof
    ;;

setquota)
    # cmg setquota -c mk-test-progress2 -i 8 -r sh_test -cpu 2 -mem 2

    check_non_empty "$id" "container ordering number not given"
    ssh_cmd -T user_00@${IP} << eof
    # 如果是外部的变量,不用加 \$ 转义
    if [ x$cpu_quota != "x" ]; then
        echo -e "\033[34madjusting cpu core num to $cpu_quota for container: $container_num\033[0m"
        if [ ! -e /sys/fs/cgroup/cpu/container-$container_num ]; then
            mkdir /sys/fs/cgroup/cpu/container-$container_num
            echo $pid > /sys/fs/cgroup/cpu/container-$container_num/cgroup.procs
        fi

        echo 100000 > /sys/fs/cgroup/cpu/container-$container_num/cpu.cfs_period_us
        echo "\$(($cpu_quota*100000))" > /sys/fs/cgroup/cpu/container-$container_num/cpu.cfs_quota_us
    fi

    # 单位为G
    if [ x$mem_quota != "x" ]; then
        echo -e "\033[34madjusting memory limits to $mem_quota for container: $container_num\033[0m"
        if [ ! -e /sys/fs/cgroup/memory/container-$container_num ]; then
            mkdir /sys/fs/cgroup/memory/container-$container_num
            echo $pid > /sys/fs/cgroup/memory/container-$container_num/cgroup.procs
        fi

        echo "\$(($mem_quota*1024*1024*1024))" > /sys/fs/cgroup/memory/container-$container_num/memory.limit_in_bytes
    fi
eof
    ;;

log|l)
    # 查看 $lines 内有没有错误日志
    check_non_empty "$id" "container ordering number should be given when checking container's log"
    # 由于给空的file失效,所以加个x保证传过去的字符串非空
    #ssh_cmd -t user_00@${IP} \"$(declare -f tail_latest_error); tail_latest_error \"$root_dir\" \"x$file\" \"$lines\"\"
    f=mongod.log
    if [[ "$Mongos_container" =~ "$con_id" ]]; then
        f=proxy.log
    fi
    if [ x$File != x ]; then f=$File; fi

    echo_yellow "getting log info from $con_id into tmp/$f.simplified ..."

    _date=""
    if [ x$date1 != x ]; then _date="-$date1"; fi

    ssh_cmd -T user_00@$IP << eof > tmp/$f.simplified
    cd "$root_dir"
    # 动作执行的命令及其结果变量需要转义
    fsize=\$(ls -l log/$f | awk '{print \$5}')
    if [ "\$fsize" -le 1073741824 ]; then
        # 怎么样只让这个定向到 $f.simplified,而其它命令不用呢?
        egrep -v 'start connection from| accepted from|Successfully authenticated as|end connection|FlowManager::dump_inlock cost|CMongoMongodStatsManager::dump_inlock|WARN:FlowManager::dump_inlock too many|received client metadata from|so no filter info| Starting new replica set monitor|task: UnusedLockCleaner|CmongoMongodStatsManager|Starting new replica set monitor' log/$f
    else
        echo "$f larger than 1G"
    fi

    # grep 没找到会返回 1
    egrep -i "$var_log_messages_keywords" /var/log/messages${_date}

    ls -lh log/
eof
    grep 'writeConflicts:[1-9]' tmp/$f.simplified | head -n10
    grep 'W:1' tmp/$f.simplified | head -n10

    echo_with_color "extracting slow log into tmp/$f.slowlog ..."
    grep 'protocol.*ms$' tmp/$f.simplified > tmp/$f.slowlog || true
    echo_with_color "analyzing slow log (last 2k lines) ..."
    tail -n 2000 tmp/$f.slowlog > a
    analyze_slowlog a
    echo
    tail -n 10 tmp/$f.slowlog
    echo
    echo_yellow "getting some key event from tmp/$f.simplified ..."
    egrep 'RESTART|git version|db version|transition to|W:|too many' tmp/$f.simplified

    echo
    echo_red "You can see more log details in tmp/$f.simplified"
    ;;

mongostat|mstat)
    check_non_empty "$id" "container ordering number should be given when using mongostat"
    which mongostat > /dev/null || error "mongostat is not in PATH"
    # 这样写会导致所有节点都显示 PRI,不显示 SEC,不知是为何
    #stmt="mongostat --uri mongodb://$cluster:$cluster-$USER_SALT-$MD5_SALT@${HOST}/admin?authSource=admin --discover"
    if [ x$interactive = x"true" ]; then
        stmt="mongostat --username $cluster --password $cluster-$USER_SALT-$MD5_SALT -h ${HOST} --authenticationDatabase admin --discover --interactive"
    else
        stmt="mongostat --username $cluster --password $cluster-$USER_SALT-$MD5_SALT -h ${HOST} --authenticationDatabase admin --discover"
    fi
    # -n 可指定输出 row 数量

    echo "$stmt"
    eval "$stmt"
    ;;

master|m)
    # ssh进入master/etcd机器
    echo_with_color "ssh to master: $MASTER on $region"
    # 搜索 grpc 是为了顺便打出master是何时启动的
    ssh_cmd -t user_00@${MASTER} "ps aux | egrep '[m]aster|[e]tcd'; cd /usr/local/services/master-1.0; egrep 'grpc' log/master1.log ; bash -l"
    ;;

etcdctl)
    check_non_empty "$etcd_cmd" "no etcd command given"
    echo_with_color "etcdctl connecting to etcd: $ETCD on $region"
    etcdctl --endpoint http://$ETCD:2379 $etcd_cmd
    ;;

stats)
    # get rs.status, sync delay and so on
    file=tmp/$cluster-RS-stats.json
    if [ -e $file ]; then
        mv $file $file.bak
    fi
    > $file
    oplog_status_file=tmp/$cluster-oplog-status.log
    for ((i=0; i<$Rs_num; i++)); do
        rs_name=${Rs_name_ar[$i]}
        rs_cons=(`echo "$Clusterinfo" | jq -r ".rs_list[$i].mongod_list[].container_name"`)
        P=${Primary_ar[$i]}

        if [ x$P = x ]; then
            # 如果主没找到,就从 rs_conn中拿
            c=${rs_cons[0]}
            P=${c%:*}
        fi

        mongo --quiet mongodb://$cluster:$cluster-$USER_SALT-$MD5_SALT@${P}/admin << eof >> $file
        print("--- rs.conf")
        rs.conf()
        print("--- rs.status")
        rs.status()
        print("--- db.printSlaveReplicationInfo & db.printReplicationInfo")
        db.printSlaveReplicationInfo()
        db.printReplicationInfo()

        //print("--- db.serverStatus")
        //db.serverStatus()

eof

        # 如果是只读灾备实例,则打印其主实例的sync info ,计算同步进度/延时
        # 目前只支持单分片下的只读灾备
        if [ x$Sync_src_node_host != x ]; then
            echo_with_color ">>> showing sync info from $Sync_src_node_host in $sync_src_cluster ..." | tee -a $file
            show_sync_delay $Sync_src_node_host $sync_src_cluster >> $file

            echo_with_color ">>> get oplog event time from $Sync_src_node_host in $sync_src_cluster, calculate sync latency and write data into $oplog_status_file" | tee -a $file
            # 本来getReplicationInfo可以获得,但其时间格式不太好转成timestamp
            oplog_first_json=$(mongo mongodb://$sync_src_cluster:$sync_src_cluster-$USER_SALT-$MD5_SALT@$Sync_src_node_host/admin << eof | grep '"ts"'
            use local
            db.oplog.rs.find({}, {ts:1}).sort({\$natural:1}).limit(1)
eof
)
            oplog_last_json=$(mongo mongodb://$sync_src_cluster:$sync_src_cluster-$USER_SALT-$MD5_SALT@$Sync_src_node_host/admin << eof | grep '"ts"'
            use local
            db.oplog.rs.find({}, {ts:1}).sort({\$natural:-1}).limit(1)
eof
)
            oplog_first=$(echo "$oplog_first_json" | egrep -o '[0-9]*' | head -n 1)
            oplog_last=$(echo "$oplog_last_json" | egrep -o '[0-9]*' | head -n 1)

            rollback_gap=$((oplog_start_ts-oplog_first))
            lag=$((oplog_last-oplog_start_ts))
            
            echo_red "$sync_src_cluster oplog first time: $oplog_first, oplog last time: $oplog_last, worker oplog_start_ts: $oplog_start_ts , rollback_gap: $(calculate_time $rollback_gap) , lag: $(calculate_time $lag)" | tee -a $file
            # 把历史数据存储起来
            echo "$(date '+%Y-%m-%d %H:%M:%S') $oplog_first $oplog_last $oplog_start_ts $rollback_gap $lag" >> $oplog_status_file
        fi

    done
    grep -B 4 '"hidden" : true' $file || echo

    # 根据这样的format,其实可以直接导出某一分片的barad的监控地址,便于在浏览器直接跳转
    # http://barad.isd.com/web/view.html?area=gz&namespace=qce/cmongo&groupName=replicaset&dimension={%22replicaset%22:%22cmgo-2s6xrsuh_0%22}

    echo_with_color ">>> show dbs from $Mongos1_host ..." | tee -a $file
    # if possible, show collections whose size larger than a certain amount
    if [ x$Mongos1_host != x ]; then
        # 不能把 后面的egrep 写到 eof 后面
        # 哪怕连不成功, tee 也是成功的,所以 $?=0
        dbs=$(mongo mongodb://$cluster:$cluster-$USER_SALT-$MD5_SALT@${Mongos1_host}/admin << eof | egrep -v 'shell|session|MongoDB|bye'
        show dbs
        //dbs=db.adminCommand({listDatabases:1})
eof
)
        echo "$dbs" >> $file
        dbs_lines=`echo "$dbs" | wc -l`
        if [ $dbs_lines -lt 10 ]; then
            echo "$dbs"
        else
            echo "see more in $file"
        fi
        echo "Total db number: $dbs_lines"
    fi
    # print oplog.rs stats
    echo_with_color "see more stats including rs.conf, rs.status, db.printSlaveReplicationInfo in $file"
    echo_gray "see mongod parameter in: $File_mongod_para"
    echo_gray "see mongos parameter in: $File_mongos_para"
    echo_gray "see mongod serverStatus in: $File_mongod_serverStatus"
    echo_gray "see mongos serverStatus in: $File_mongos_serverStatus"
    echo_gray "see mongod-info on all mongod nodes of $cluster in: $File_mongods_on_mongodhost"
    ;;

get_rs)
    check_non_empty "$ip_given" "ip not given"
    ssh_cmd -o LogLevel=error -T user_00@${IP} << eof
eof
    ;;

job)
    # Example: cmg job -r gz -job 2002223606 [-d 20211209]
    # cmg job -r shjr -job 2002890899
    check_non_empty "$job_id" "job id not given"
    jobdate=`date +%Y%m*`
    if [ x$date1 != x ]; then 
        jobdate="${date1}*"
    fi
    get_masterlog_for_job $job_id $jobdate
    echo_with_color "If you cannot find the log, it may not be located in current Master: $MASTER"
    ;;

backup)
    # egrep 'start to backup from candidate|try another|Backup cluster |backup job canceled'  查看分分片备份状态

    etcdctl --endpoint http://$ETCD:2379 get /backup/$cluster | jq -r
    # 列出可回档时间范围
    mastertest -master_address=$MASTER:8888 -cmd list_restore_ts -cluster $cluster
    # 查询某段时间是否可以回档
    #mastertest -cluster $cluster -cmd=checkrestore -time_stamp="2020-12-08 01:14:00"
    #mastertest -cluster $cluster -cmd=showcolls

    #mastertest -master_address xx -cmd cancel_task_job -job_to_be_cancelled 1234
    #mastertest -cmd backup -cluser $cluster -is_incremental true
    #mastertest -cmd hotBackup -cluser $cluster -is_incremental true
    #mastertest -cmd=restore -src_cluster sirchen-dev -dest_cluster sirchen-rocks40 -time_stamp="2020-12-08 00:00:00"
    ;;

version)
    echo -e "version already output"
    ;;

start)
    do_start
    ;;

stop)
    do_stop
    ;;

stop-worker)
    stop_worker
    ;;

copy)
    # copy mongos, mongod or proxy to host
    do_copy
    ;;

copy-worker)
    copy_worker
    ;;

update)
    # cmg update -c noauth40 --override -fs ./tmp/36/mongos -fd ./tmp/36/mongod
    # cmg update -c sirchen-dev --override -fd ./tmp/40/mongod -n idc_test -t mongod

    # 首先 copy mongod/mongos/proxy 到当前集群的所有组件 host 的/tmp目录里
    # 然后, stop + override ,从而保证新执行文件已经进入 container bin 目录
    # 注意: 尽量按版本来升级,4.0集群就只升级4.0的可执行文件, 因为 conf/下的配置文件可能不兼容导致启动失败
    if [ $Override != true ]; then error "override not true"; fi

    # 为确保安全,只能跑在测试环境里的集群上
    if [ $region != "sh_test" -a $region != "xian_test" ]; then
        error "stop must't be run on $region, only on test environment !!!"
    fi

    do_copy
    do_stop
    #do_start
    echo_with_color 'update done. If worker does not start them, use `start` manually.'
    ;;

update-worker)
    # cmg update-worker -c icmgo-bkb4qrht8b -n idc_test -f ./worker --override -t mongod
    # 为确保安全,只能跑在测试环境里的集群上
    if [ $region != "sh_test" -a $region != "xian_test" ]; then
        error "stop must't be run on $region, only on test environment !!!"
    fi

    copy_worker
    stop_worker
    ;;

deploy-exporter)
    # Deprecated
    #deploy_exporter
    ;;

start-node-exporter)
    do_start_node_exporter
    ;;

stop-node-exporter)
    do_stop_node_exporter
    ;;

deploy-node-exporter)
    check_non_empty "$File" "exporter file not given"
    check_non_empty "$Exporter_port" "node_exporter port not given"
    Type=exporter
    do_stop_node_exporter
    do_copy
    do_start_node_exporter
    ;;

ping)
    # 打印集群各节点之间的ping latency
    # cmg ping -c icmgo-uwr7oy94wc 
    # 有些节点的ping权限若被修改，则出现: ping: socket: Operation not permitted
    file=tmp/$cluster-ping-matrix.csv

    echo -n "$Ip_str" | tr ' ' ',' > $file
    # 添加一列表示是哪类节点
    echo ",type" >> $file

    if [ $has_config = "true" ]; then
        # write the headers
        #echo -n "," > $file
        #for ((i=0, i<${Ip_array[*]}; do
        #   echo "vk
        #done

        for v in $Config_container; do
            _ip=${v%%:*}
            echo_with_color "checking configsvr $_ip ping to others ..."

            echo -n "$_ip," >> $file
            ssh_cmd -T user_00@$_ip << eof >> $file 2>err || true
                for v in ${Ip_array[*]}; do
                    delay=\`ping -c1 \$v | grep time= | awk -F 'time=' '{print \$2}'\`
                    echo -n "\$delay,"
                done
eof
            res=`grep 'get rid of' err || echo NOT-FOUND`
            if [ "$res" != "NOT-FOUND" ]; then
                sed -i "/$_ip/d" ~/.ssh/known_hosts
                # 下次重新执行就不会发生这个了
            fi
            echo "configsvr" >> $file
        done
    fi

    if [ $g_has_mongos = "true" ]; then
        for v in $Mongos_container; do
            _ip=${v%%:*}
            echo_with_color "checking mongos $_ip ping to others ..."

            echo -n "$_ip," >> $file
            ssh_cmd -T user_00@$_ip << eof >> $file 2>err || true
                for v in ${Ip_array[*]}; do
                    delay=\`ping -c1 \$v | grep time= | awk -F 'time=' '{print \$2}'\`
                    echo -n "\$delay,"
                done
eof
            res=`grep 'get rid of' err || echo NOT-FOUND`
            if [ "$res" != "NOT-FOUND" ]; then
                sed -i "/$_ip/d" ~/.ssh/known_hosts
                # 下次重新执行就不会发生这个了
            fi
            echo "mongos" >> $file
        done
    fi

    # mongod
    for rs_idx in $(seq 0 $((Rs_num-1))); do
        rs_cons=(`echo "$Clusterinfo" | jq -r ".rs_list[$rs_idx].mongod_list[].container_name"`)
        # 它是rs_conns 数组的个数
        rs_conns_len=`echo "$Clusterinfo" | jq -r ".rs_list[$rs_idx].mongod_list | length"`
        rs_name=`echo "$Clusterinfo" | jq -r ".rs_list[$rs_idx].rsName"`

        for rs_mongod_idx in $(seq 0 $((rs_conns_len-1))); do
            c=${rs_cons[$rs_mongod_idx]}
            _ip=${c%%:*}
            echo_with_color "checking $_ip in $rs_name ping to others ..."

            echo -n "$_ip," >> $file
            ssh_cmd -T user_00@$_ip << eof >> $file 2>err || true
                for v in ${Ip_array[*]}; do
                    delay=\`ping -c1 \$v | grep time= | awk -F 'time=' '{print \$2}'\`
                    echo -n "\$delay,"
                done
eof
            res=`grep 'get rid of' err || echo NOT-FOUND`
            if [ "$res" != "NOT-FOUND" ]; then
                sed -i "/$_ip/d" ~/.ssh/known_hosts
                # 下次重新执行就不会发生这个了
            fi
            echo "$rs_name" >> $file
        done
    done

    # pretty output
    column -s, -t $file
    ;;

scan)
    # cmg scan -r shjr
    echo "Begin: $(date)"

    echo_with_color "coop region etcd map size: ${#coop_region_etcd_map[@]}"
    echo_with_color "region map size: ${#region_map[@]}"
    #for k in ${!coop_region_etcd_map[@]}; do

    f1="all-clusters.csv"
    if [ -e tmp/$f1 ]; then mv tmp/$f1 tmp/$f1.bak; fi
    _header="cluster,mongod_version,config_version,proxy_version,max_client_conns,status,rs_num,region,set"
    echo "$_header" > tmp/$f1

    #scan_region_with_ES

    for k in ${!region_English_2_Chinese_map[@]}; do
        usedb="tencent_cloud_domestic"
        region_chinese=${region_English_2_Chinese_map[$k]}
        if [ x$region_given != x ]; then
            region_chinese=${region_English_2_Chinese_map[$region_given]}
            f1="$region_given-$f1"
            if [ -e tmp/$f1 ]; then mv tmp/$f1 tmp/$f1.bak; fi
            echo "$_header" > tmp/$f1

            in_this_region=${overseas_region_English_2_Chinese_map[$region_given]}
            if [ x$in_this_region != x ]; then usedb="tencent_cloud_overseas"; fi

            in_this_region=${finance_region_English_2_Chinese_map[$region_given]}
            if [ x$in_this_region != x ]; then usedb="finance_cloud"; fi
        else
            in_this_region=${overseas_region_English_2_Chinese_map[$k]}
            if [ x$in_this_region != x ]; then usedb="tencent_cloud_overseas"; fi

            in_this_region=${finance_region_English_2_Chinese_map[$k]}
            if [ x$in_this_region != x ]; then usedb="finance_cloud"; fi
        fi
        echo "usedb=$usedb"

        tmpfile="tmp/region.tmp"
        _cluster_count=$(mongo --quiet mongodb://mongouser:2021%40CMongo@9.220.29.45:27017,9.220.29.114:27017,9.220.30.96:27017/admin?authSource=admin << eof | tail -n1
            // 不同的集群有使用不同的db 
            use $usedb
            db.cluster_stats.data.find({"regionname": "$region_chinese"}).itcount()
eof
)

        echo "begin to iterate $_cluster_count clusters in $region_chinese ..."
        > $tmpfile
        mongo --quiet mongodb://mongouser:2021%40CMongo@9.220.29.45:27017,9.220.29.114:27017,9.220.30.96:27017/admin?authSource=admin << eof >> $tmpfile
            use $usedb
            var cursor = db.cluster_stats.data.find({"regionname": "$region_chinese"})
            // https://stackoverflow.com/questions/12969604/hasnext-not-working-on-collection-in-javascript
            cursor.forEach(function(doc) {
                printjson(doc)
            })
eof
        sed -i '/switched to/ d' $tmpfile
        # 把json不能识图的类型给换了
        sed -i 's/NumberLong.*,/1,/' $tmpfile
        sed -i 's/NumberLong.*/1/' $tmpfile
        sed -i 's/ISODate.*,/1,/' $tmpfile
        sed -i 's/ISODate.*/1/' $tmpfile

        jq -c '.' $tmpfile > tmp/region_compact.tmp
        extract_fields_from_region_compacted $f1

        if [ x$region_given != x ]; then
            break
        fi
        echo_magenta "$region_chinese query done"
    done

    echo -e "\nEnd: $(date)\nAll results in tmp/$f1"
    ;;

help|h)
    echo_with_color "no help"
    
    ;;

*)
    #cat tmp/$Cluster-info.json | jq -r .
    error "no such command: $cmd"
esac

cheatsheet() {
    cat << eof
    echo_with_color "There're following common commands for references\n"
    # used by gdb breakpoint to avoid failover
    cfg=rs.conf(); cfg.settings.heartbeatTimeoutSecs=3600; cfg.settings.electionTimeoutMillis=3600000; rs.reconfig(cfg)
    cfg = rs.conf(); cfg.members[2].priority = 2;  rs.reconfig(cfg)
    cfg = rs.conf(); cfg.settings.chainingAllowed = false; rs.reconfig(cfg)
    rs.syncFrom("9.254.211.188:7003") # tempoarily change sync target

    db.runCommand({listShards:1}) / db.adminCommand({enablesharding:"sharddb"}) / db.runCommand({shardcollection: "sharddb.t", key : {id:1} }) / sh.shardCollection("foo.bar", { zip: 1 } ) / db.createCollection("t")
    db.adminCommand( { flushRouterConfig: 1 } )
    db.printShardingStatus() / db.tb2.getShardDistribution()
    db.runCommand({noAuth: true}) / db.runCommand({noAuth: true, persist: 1}) / db.runCommand({noAuthInfo: 1})
    db.runCommand({proxyStatus:1,dumpConnType:'aggregated'}) / db.runCommand({proxyStatus:1,dumpConnType:'all'})
    db.runCommand({mongodStatus:1,dumpConnType:'aggregated'}) / db.runCommand({mongodStatus:1,dumpConnType:'all'})
    db.getCollectionNames().length
    # 3.2 cmongo get shard key
    db.runCommand({getShardKeys:"new_user"})

    db.runCommand({"changeMongosConn":1,"maxshardConnectionPoolSize":1000,"token":NumberLong("12001087696442048")})
    db.runCommand({"changeMongosConn":1,"maxGlobalConnectionPoolSize":1000,"token":NumberLong("12001087696442048")})

    rs.printSecondaryReplicationInfo()

    # When using the mongo shell, see cursor.readPref() and Mongo.setReadPref()
    db.getMongo().getReadPrefMode() / db.getMongo().setReadPref("secondaryPreferred")
    db.getMongo().setCausalConsistency() / db.getMongo().isCausalConsistency()
    db.t.find({ }).readPref( "secondary")

    # tcmallocAggressiveMemoryDecommit:表示是否开启TcMalloc的aggressive decommit功能
    echo -e '\ndb.runCommand({"getParameter":1,"tcmallocAggressiveMemoryDecommit":1})'
    # tcmallocMaxTotalThreadCacheBytes:表示所有线程最大的cacheSize
    echo -e 'db.runCommand({"getParameter":1,"tcmallocMaxTotalThreadCacheBytes":1})'
    echo -e 'db.adminCommand({getParameter:1, featureCompatibilityVersion: 1})'
    db.adminCommand({"setParameter":1,"tcmallocMaxTotalThreadCacheBytes":NumberLong(1073741824)})
    echo 'db.serverStatus().tcmalloc.tcmalloc.formattedString'

    db.adminCommand( { movePrimary : "test_0", to : "shard1" } )       

    echo_red "Tx:"

    echo_red "Backup/Restore:"
    echo 'mongodump --uri mongodb://localhost:50001/covid --oplog'  # 指定oplog 时不可指定--db, --collection
    echo 'mongodump --db covid --archive=covid.archive -h localhost:50001'
    echo -e 'mongorestore --db=test --collection=t1 dump/test/purchaseorders.bson'
    echo -e 'mongorestore --nsInclude=test.t1 dump/'
    # use pipe
    echo -e "mongodump --archive --db=test | mongorestore --archive --nsFrom='test.*' --nsTo='examples.*'"
    echo -e 'mongorestore --uri mongodb://admin:123456@9.134.5.74:30001/admin --dir /data/dump'

    echo_red "get/set parameter"
    # num-of-threads <=  20
    db.runCommand({setParameter:1, wiredTigerEngineRuntimeConfig:'eviction_dirty_target=4,eviction_dirty_trigger=50,eviction_target=60,eviction_trigger=97,eviction_checkpoint_target=5,eviction=(threads_max=16,threads_min=12)'})
    db.adminCommand( { "setParameter": 1, "wiredTigerEngineRuntimeConfig": "cache_size=16G"})
    db.adminCommand( { "getParameter": 1, "wiredTigerEngineRuntimeConfig":1}).wiredTigerEngineRuntimeConfig

    db.runCommand({"setParameter":1,"wiredTigerConcurrentReadTransactions":256,"wiredTigerConcurrentWriteTransactions":256})

    # https://jira.mongodb.org/browse/SERVER-37795
    # mongod --setParameter heapProfilingEnabled=true
    # --setParameter wiredTigerCursorCacheSize=0
    db.adminCommand({"getParameter":"*"})

    # mongo shell create many collections
    # for (i=49; i<59; i++) db.getCollection("col_" + i.toString()).insert({k:i, v:i+1})
    db.adminCommand({"setParameter":1,"syncdelay": 70})  # CP interval
    db.adminCommand({"setParameter":1,"ReadForkEnabled": true})  # CP interval

    echo_red "serverStatus "
    db.serverStatus().wiredTiger.concurrentTransactions  # the so-called tickets
    db.serverStatus().wiredTiger.transaction
    db.serverStatus().wiredTiger.cache
    db.serverStatus().wiredTiger.cache["maximum bytes configured"] # look up cacheSize
    db.serverStatus().wiredTiger["block-manager"]
    db.serverStatus().metrics.operation.writeConflicts
    db.serverStatus().globalLock
    db.serverStatus().storageEngine.supportsCommittedReads # 检查 enableMajorityReadConcern
    # print prettier
    db.runCommand({serverStatus:1}).rocksdb
    db.t.stats().wiredTiger.uri / db.t.stats({"indexDetails":true}).indexDetails._id_.uri  # 显示表名/index name 与文件名的对应

    echo_red "tricks for exploring log "
    # 找到排名前20的writeConflicts 日志行
    grep 'writeConflicts:[1-9][0-9]*' mongod.log | sed 's/.*writeConflicts:\([1-9][0-9]*\).*/\1\t\0/' | sort -nr | sed 's/^[1-9][0-9]*\t//' | head -n 20
    # 认证时间统计
    # 慢查询耗时统计
    tail -f log/mongod.log | grep 'protocol:.*ms$'
    # 连接数统计

    echo_red "profiling:"
    db.setProfilingLevel(2) / db.setProfilingLevel(1, { slowms: 20 })
    db.setProfilingLevel(1, { sampleRate: 0.42 }) # 随机采样slow op 的子集
    db.getProfilingStatus()
    db.system.profile.find().limit(10).sort( { ts : -1 } ).pretty()
    db.adminCommand( { logRotate : 1 } )

    echo_red "common oplog op:"
    db.oplog.rs.find({op: {$ne: "n"} }).sort({$natural:-1}).limit(10)
    db.oplog.rs.find({$and: [ {op: {$ne:"n"}}, {op: "i"} ]}).sort({$natural: -1}).limit(4)
    db.oplog.rs.find({$and: [ {op: {$ne:"n"}}, {op: {$ne:"i"}} ]}).sort({$natural: -1}).limit(4)
    db.oplog.rs.find( { "o.create" : { $exists: true }  }).sort({$natural:-1}).limit(3) # create table
    db.oplog.rs.find({op:"d"}).sort({$natural:-1})
    db.oplog.rs.find({"o._id": ObjectId("5fd3")}) # insert
    db.oplog.rs.find({"o2._id": ObjectId("5fd3")}) # update
    db.oplog.rs.find({"o.drop": { $exists: true } }) # drop collection
    db.oplog.rs.find({"op":"u", "ns":"yapi.project", "ts":{$gt:Timestamp(1618309740,0)}}).sort({$natural:1}).limit(1)
    db.oplog.rs.aggregate([ {$collStats: {latencyStats: { histograms: true}}}])

    db.getReplicationInfo()  # check oplogSize

    echo -e '\ndb.runCommand({ connPoolStats:1})'
    # print unsharded collections
    for(j = 0; j < leng; j++) { var name = db.runCommand( { listCollections: 1.0 }).cursor.firstBatch[j].name; var bool = db.getCollection(name).stats().sharded; if(!bool) { print(name) } }
    db.runCommand({getParameter:1, ttlMonitorEnabled: 1}) / db.runCommand({getParameter:1,"ttlDeleteBatch":1}) / db.runCommand({getParameter:1,"ttlMonitorSleepSecs":1})
    db.runCommand( { buildInfo: 1 } )

    db.t.createIndex( { "expireAtt": 1 }, { expireAfterSeconds: 30 } )
    db.runCommand({connectionStatus : 1})
    # 可以通过这种方式 setLogLevel调大level
    db.setLogLevel(1, "command")
    # 若省略component,则为所有component 设置 log level
    db.setLogLevel(2, "storage.journal" )

    echo_red "create user & role"
    db.createRole({ "role" : "restoreoplog", "privileges" : [ { "resource" : { "anyResource" : true }, "actions" : [ "anyAction" ] } ], "roles" : [ ] })
    db.createUser({user: "mongouser", pwd: "123456", "roles" : [ { "role" : "dbAdminAnyDatabase", "db" : "admin" }, { "role" : "readWriteAnyDatabase", "db" : "admin" }, { "role" : "backup", "db" : "admin" }, { "role" : "restore", "db" : "admin" }, { "role" : "clusterAdmin", "db" : "admin" }, { "role" : "restoreoplog", "db" : "admin" } ]})
    db.runCommand({createUser: 'sha1', pwd: 'sha1', roles: ['root'], mechanisms: ['SCRAM-SHA-1']})
    db.runCommand({createUser: 'sha256', pwd: 'sha256', roles: ['root'], mechanisms: ['SCRAM-SHA-256']})
    db.runCommand({createUser: 'both', pwd: 'both', roles: ['root'], mechanisms: ['SCRAM-SHA-1', 'SCRAM-SHA-256']})
    mongo mongodb://sha256:sha256@xxx/admin?authMechanism=SCRAM-SHA-256

    echo_red "balancer"
    sh.isBalancerRunning()
    sh.enableBalancing()
    sh.startBalancer()
    sh.getBalancerState() / db.adminCommand( { balancerStatus: 1 } )

    # 打印命令行和config 文件
    db.adminCommand({getCmdLineOpts: true})
    # 打印kernel version, cpu 指令集,memory size, hostname
    db.adminCommand({hostInfo: true})

    db.settings.find()  # 可查看balancer window
    db.groupCollection_3102.find({field1: '99'.repeat(8)})

    # 某个表发生迁移
    # 日志: moveChunk: "douyin.posts"

    # 特殊的登陆方式
    mongo mongodb://__system:keyfile@xx/local

    # 统计某个时间点 连接和断开 ip 汇总
    grep 'May 20 19:5' log/mongod.log | grep 'connection accept' | awk -F "from" '{print $2}' | awk -F ":" '{print $1}' | sort | uniq -c
    grep 'May 20 19:5' log/mongod.log | grep 'end connection' | awk -F "connection" '{print $2}' | awk -F ":" '{print $1}' | sort | uniq -c

    # 统计慢操作延时的次数/最大值/最小值/平均值
    cat b | awk -F "protocol:op_msg" '{print $2}' | awk -F "ms" '{print $1}' | awk 'BEGIN{total=0; count=0; min=20000; max=0;} { total+=$(NF); count+=1; if ($(NF) < min) { min=$(NF) }; if ($(NF) > max) {max=$(NF) } } END{print "total:" total "\ncount: "count  "\nmin(ms):" min "\nmax(ms):" max "\navg(ms):" total/count}'


    # 统计某个端口的客户端ip, 按照连接数大小排序
    ss -tnp src :7026 | grep 'pid=' | awk '{print $5}' | cut -d ':' -f1 | sort | uniq -c | sort -n

    # 当进入到container 目录时,可用以下命令过滤到冗余log
    egrep -v 'start connection from| accepted from|Successfully authenticated as|end connection|FlowManager::dump_inlock cost|CMongoMongodStatsManager::dump_inlock|WARN:FlowManager::dump_inlock too many|received client metadata from|so no filter info| Starting new replica set monitor|task: UnusedLockCleaner|CmongoMongodStatsManager|Starting new replica set monitor' log/mongod.log > a

    tail -f log/mongod.log | egrep -v 'start connection from| accepted from|Successfully authenticated as|end connection|FlowManager::dump_inlock cost|CMongoMongodStatsManager::dump_inlock|WARN:FlowManager::dump_inlock too many|received client metadata from|so no filter info| Starting new replica set monitor|task: UnusedLockCleaner|CmongoMongodStatsManager'

    # for 4.4
    tail -f log/mongod.log  | egrep -v 'Successful authentication|Connection accepted|Connection ended|done building|client metadata|createCollection|Index build: completed successfully|Index build: completed|createIndexes|Index build: '

    # get authencation time
    egrep "accepted from" log/proxy.log | grep -v "127.0" | awk "{print $12}" | while read l; do conn_num=${l#\#}; egrep " #${conn_num} |conn${conn_num}] Successfully auth" log/proxy.log ; done

    # exclude some unrelevant verbose log (standalone)
    tail -f st.log | egrep -v 'WTJournalFlusher|clientcursormon|AuditManagerMongodRefresh|ftdc|PeriodicTaskRunner|LogicalSessionCacheRefresh|TTLMonitor|startPeriodicThreadToAbortExpiredTransactions|free_mon'

    # exclude verbose log (replica set)
    tail -f 3k1.log | egrep -v 'PeriodicTaskRunner|AuditManagerMongodRefresh|NoAuthCacheUpdater|Scheduling heartbeat to|Callback was canceled|command: replSetHeartbeat|Received heartbeat request|Processed heartbeat from|replSetHeartbeat:|Received response to heartbeat|{ getMore:|command: replSetUpdatePosition|command: replSetUpdatePosition|hbmsg: |replSetUpdatePosition: 2|received notification that node|NoopWriter|startPeriodicThreadToAbortExpiredTransactions|Updating _lastCommittedOpTime to|Setting replication|oldest_timestamp set to|setting new oplogReadTimestamp|get command: isMaster|cmongo_vips| flush took |LogicalSessionCacheRefresh|replSetGetStatus'

    # 某个server 端口处于close-wait 的连接 (不能写成dst ,因为close-wait 是server 状态)
    ss -o state close-wait -tnp src :7026 | wc -l
    # 连续用top 采集某 thread 的CPU
    f=/tmp/top.log; > $f ; while true; do echo -n "`date '+%H:%M:%S'` " >> $f ; top -H -b -n1 | grep 16459 >> $f ; tail -n1 $f; done

    # ycsb
    bin/ycsb load mongodb -s -P w-load -p table=t6 -p -threads=64
eof
    # https://stackoverflow.com/questions/32642308/in-mongodb-how-do-i-print-all-collection-counts-and-indexes/32642321
    # https://stackoverflow.com/questions/23929235/multi-line-string-with-extra-space-preserved-indentation
    # 打印所有db 的所有coll个数
    a=$(cat <<- eof
    var dbs = db.adminCommand('listDatabases'); dbs.databases.forEach(function(database) { db = db.getSiblingDB(database.name); print("----- Database: " + database.name);  var colCount = db.getCollectionNames().length; print("\tcollections: " + colCount); if (colCount > 30) return; db.getCollectionNames().forEach(function(col) {print("\t  " + col + " docs:" + db[col].count() + ", indexes:" + db[col].getIndexes().length); }); })

    var dbs = db.adminCommand('listDatabases'); dbs.databases.forEach(function(database) { db = db.getSiblingDB(database.name); print("----- Database: " + database.name);  var colCount = db.getCollectionNames().length; print("\tcollections: " + colCount);  db.getCollectionNames().forEach(function(col) {print("\t  " + col + " size:" + db[col].stats().size ); }); })

    var dbs = db.adminCommand('listDatabases'); dbs.databases.forEach(function(database) { var dbname =database.name; if (dbname == "admin" || dbname == "local" || dbname == "config") return;  print("dropping " + dbname + " ..."); db = db.getSiblingDB(dbname); db.dropDatabase(); })
eof
)

}

# possible kernel error in mongo log:
# No space left on device

    # 慢日志格式:
    # writeConflicts
    # Sun Oct  4 20:40:29.456 I COMMAND  [conn1181433] command matrix_raw_data.Trace_StartUp.20201004.0x27001096 command: insert { insert: "Trace_StartUp.20201004.0x27001096", ordered: true, $db: "matrix_raw_data" } ninserted:88 keysInserted:450 writeConflicts:9 numYields:0 reslen:415 locks:{ Global: { acquireCount: { r: 103, w: 103 } }, Database: { acquireCount: { w: 103 } }, Collection: { acquireCount: { w: 13 } }, oplog: { acquireCount: { w: 90 } } } protocol:op_query 21565ms

    # insert
    # Sat Oct  3 12:14:01.305 I COMMAND  [conn1082982] command matrix_stat_data.Trace_EvilMethod.detail.0x27000ec6 command: insert { insert: "Trace_EvilMethod.detail.0x27000ec6", ordered: false, $db: "matrix_stat_data" } ninserted:9717 keysInserted:19434 numYields:0 reslen:415 locks:{ Global: { acquireCount: { r: 304, w: 304 } }, Database: { acquireCount: { w: 304 } }, Collection: { acquireCount: { w: 152 } }, oplog: { acquireCount: { w: 152 } } } protocol:op_query 133ms

    # find
    # Wed Jun  9 16:55:53.990 I COMMAND  [conn3817] command test_0.groupCollection_32775 command: find { find: "groupCollection_32775", filter: { field1: "8888888888888888" }, limit: 1, singleBatch: true, lsid: { id: UUID("b3c17455-0fe5-4b3b-a56b-a32e13b95fc5") }, $clusterTime: { clusterTime: Timestamp(1623228947, 1), signature: { hash: BinData(0, 0000000000000000000000000000000000000000), keyId: 0 } }, $db: "test_0", $readPreference: { mode: "primary" } } planSummary: COLLSCAN keysExamined:0 docsExamined:7 cursorExhausted:1 numYields:1 nreturned:1 reslen:317 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_msg 111ms

    # update
    # Sun Jun  6 23:31:51.852 I WRITE    [conn195691] update config.system.sessions command: { q: { _id: { id: UUID("19c04c10-39da-4681-8613-6889a23a1b1d"), uid: BinData(0, 407AADAF22760292AACA78CE8A9E8695FF4F9F0C3D0E610E0B2F1545435ECA24) } }, u: { $currentDate: { lastUse: true }, $setOnInsert: { user: { name: "groupCollectionFalse2@admin" } } }, multi: false, upsert: true } planSummary: IDHACK keysExamined:1 docsExamined:1 nMatched:1 nModified:1 keysInserted:1 keysDeleted:1 numYields:1 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 1 } } } 837ms

    # remove
    # Mon Jun  7 10:41:33.625 I WRITE    [LogicalSessionCacheReap] remove config.transactions command: { q: { _id: { id: UUID("e9c76049-91ce-4fa9-889f-4075df5522c7"), uid: BinData(0, 407AADAF22760292AACA78CE8A9E8695FF4F9F0C3D0E610E0B2F1545435ECA24) } }, limit: 0 } planSummary: IDHACK keysExamined:1 docsExamined:1 ndeleted:1 keysDeleted:1 numYields:1 locks:{ Global: { acquireCount: { r: 13, w: 7 } }, Database: { acquireCount: { r: 4, w: 7 } }, Collection: { acquireCount: { r: 4, w: 5 } }, oplog: { acquireCount: { w: 1 } } } 26499ms


# omsESAddrs 是有cmongo_config index 的,代表元数据ES,从中可找到region 的etcd地址
# operationES 是运营ES
# master.conf 中的esAddr 是慢日志ES

# mplotqueries mongod.log --group namespace --group-limit 10 --type scatter --output-file a.png


