#!/bin/bash
##################################################################################################################################################
# Description:获取各种自定义监控数据,然后将数据传递给pushgateway                                                                                     #
# date: 2023-07-20                                                                                                                                #
# Emain: lintiany@outlook.com                                                                                                                     #
# keynote：yum install -y sysstat                                                                                                                 #
# For example：*/1 * * * * cd /app/shell;/bin/bash  get_monitor_date.sh system_check;sleep 1;/bin/bash  get_monitor_date.sh server_nginxlog_check #
###################################################################################################################################################
CURRENT_DIR=$(cd $(dirname $0); pwd)
cd  $CURRENT_DIR
select_check=$1

#可以将变量统一丢到环境文件中,方便直接更新脚本
#为了一目了然将部分变量写到了脚本中
source /app/shell/.monitor_date.env
logdir=/app/shell/log
#系统##########################################################################################################################################################################################
check_system(){
	#cpu数量,5分钟负载,cpu使用率
	cpu_num=`cat /proc/cpuinfo| grep "processor"| wc -l`
	cpu_load=`/usr/bin/uptime | awk -F ',' '{print $(NF-1)}' | sed -e 's/^[ ]*//g'`
	cpu_use=`cat $extra_system_log |  awk '{print $1}'  |  grep '[0-9]' | tail -n 10 |  awk '{sum+=$1}END{print sum/NR}'`

	#内存大小和使用率,单位换算成M
	mem_total=`cat /proc/meminfo | grep -Ew 'MemTotal' | awk '{printf "%.2f\n",$2/1024}'`
	mem_use=`cat /proc/meminfo | grep -Ew 'MemTotal|MemFree|Cached|Buffers' | awk '{print $2}' |tr "\n" "," |sed -e 's/,$/\n/' | awk -F ',' '{print (1-($2+$3+$4)/$1)*100}'`
	#网卡 上下行 每分钟带宽差,单位M
	bind_rx=`cat /proc/net/dev | grep -Ev 'lo|docker|Transmit|compressed' |head -n 1 | awk '{printf "%.2f\n",$2/1024/1024}'`
	bind_tx=`cat /proc/net/dev | grep -Ev 'lo|docker|Transmit|compressed' |head -n 1 | awk '{printf "%.2f\n",$10/1024/1024}'`

	#tcp连接数
	net_all=`$ssname state ALL| grep -v Send-Q | wc -l`
	net_listen=`$ssname state LISTENING | grep -v Send-Q | wc -l`
	#system_name=`cat /etc/redhat-release  | awk '{print $1"_"$4}'` 

	#将变量写入文件中
	cat > $system_checklog <<- EOF
	cpu_num:$cpu_num
	cpu_load:$cpu_load
	cpu_use:$cpu_use
	mem_total:$mem_total
	mem_use:$mem_use
	bind_rx:$bind_rx
	bind_tx:$bind_tx
	net_all:$net_all
	net_listen:$net_listen
	EOF

	#考虑到全部更新太麻烦了,不加system前缀了
	# cat > $system_checklog <<- EOF
	# system_cpu_num:$cpu_num
	# system_cpu_load:$cpu_load
	# system_cpu_use:$cpu_use
	# system_mem_total:$mem_total
	# system_mem_use:$mem_use
	# system_bind_rx:$bind_rx
	# system_bind_tx:$bind_tx
	# system_net_all:$net_all
	# system_net_listen:$net_listen
	# EOF

	#磁盘相关
	#获取磁盘名
	disk_info=`sudo /usr/sbin/fdisk -l |  grep sectors |   grep Disk |  grep -v mapper | awk -F '[ |:]' '{print $2}'`
	#取不到值的话就固定死
	#disk_info="sda sdb"
	index=0
	for devname in $disk_info
	do		
		let index+=1
		dfname=`cat /etc/fstab  |  grep -Ev '#|^$|swap|nfs|boot' | awk '{print $2}' | sed -n "$index"p`
		diskname=`echo $devname | awk -F '/' '{print $NF}'`
		#disk_data_total=`df | grep -Ev 'tmpfs|文件系统|Filesystem|^$|/boot' | head -n 2 |  tail -n 1 | awk '{printf "%.2f\n",$2/1024/1024}'`
		disk_total=`/usr/bin/df | grep -w $dfname  | grep -v resource |awk '{printf "%.2f\n",$2/1024/1024}' `
		disk_use=`/usr/bin/df | grep -w $dfname | grep -v resource | awk '{print $5}'| awk -F '%' '{print $1}'`
		#io使用情况,第一次是固定值去掉
		disk_read=`cat $extra_system_log |  grep $diskname | awk '{print $6}'  | tail -n 10   | awk '{sum+=$1}END{print sum/NR}'`
		disk_write=`cat $extra_system_log |  grep $diskname | awk '{print $7}' | tail -n 10  | awk '{sum+=$1}END{print sum/NR}'`
		iops_use=`cat $extra_system_log |  grep $diskname | awk '{print $14}'  | tail -n 10   | awk '{sum+=$1}END{print sum/NR}'`
		echo "disk_total:$disk_total:$diskname" >>$system_checklog
		echo "disk_use:$disk_use:$diskname" >>$system_checklog
		echo "disk_read:$disk_read:$diskname" >>$system_checklog
		echo "disk_write:$disk_write:$diskname" >>$system_checklog
		echo "iops_use:$iops_use:$diskname" >>$system_checklog
	done

	#监控进程是否挂掉
	#只能+下划线,需要ps的时候让这个值保持唯一
	course_info=""
	for course_name in $course_info
	do
		cource_num=`ps -ef | grep -v grep |  grep $course_name| wc -l`
		#course_name=`echo  course_$course_name | awk -F '_' '{print $1"_"$2"_"$3}'`
		course_name=`echo  course_$course_name`
		echo "$course_name:$cource_num" >>$system_checklog
	done

	#将值传递给pushgateway
	push_server $system_checklog 
	sleep 1
	#获取磁盘和cpu使用情况
	/usr/bin/iostat -x 2 11 >$extra_system_log &
}


#各类服务###################################################################################################################################################################
#监控nginx日志,我这边是自定义了一下日志,这样看起来更直观
#log_format  wethink '$time_local|$status|$request_time|$body_bytes_sent|$remote_addr|$http_referer|$request_uri';
check_nginxlog(){
    #日志路径
    nginx_logpwd="/var/log/nginx"
    #需要监控的日志
    for logname in access.log
    do
        project_name=`echo $logname | awk -F '.' '{print $1}'`
        recore_lognum="$project_name"_wc.log
        #error_wc="$project_name"_errorwc.log
        #判断文件是否为空,为空就追加1进去
        [ ! -f "$logdir/$recore_lognum" ] && echo 1 > $logdir/$recore_lognum
        #最新的一行日志        
        last_count=`cat $logdir/$recore_lognum`
        #当前行
        current_count=`cat $nginx_logpwd/$logname | wc -l`
        #如果相等就跳出当前循环
        [ $last_count -eq $current_count ] && echo "`date` $logfile no change" && continue
        #由于日志文件每天都会截断，因此会出现当前行数小于上一次行数的情况，此种情况出现则将上一次行数置1
        [ $last_count -gt $current_count ] && last_count=1
        #截取上一次检查到的行数至当前行数的日志并检索出有ERROR的日志，并重定向到相应的ERROR日志文件
        #login_log=`sed -n "$last_count,$current_count p" $nginx_logpwd/$logname | grep "登录异常" |wc -l`
        nginx_4xx=`sed -n "$last_count,$current_count p" $nginx_logpwd/$logname |   awk -F '|'  '{if ($2 ~ /^4/)print $2}'   | sort | uniq -c |  awk '{sum +=$1}END{print sum}'`
        nginx_5xx=`sed -n "$last_count,$current_count p" $nginx_logpwd/$logname |   awk -F '|' '{if ($2 ~ /^5/)print $2}'   | sort | uniq -c |  awk '{sum +=$1}END{print sum}'`
        if [[ $nginx_5xx == "" ]];then
            nginx_5xx=1
        elif [[ $nginx_4xx == ""  ]];then
            nginx_4xx=1
        fi
        echo "nginx_4xx $nginx_4xx" | $pushurl
        echo "nginx_5xx $nginx_5xx" | $pushurl
        echo $current_count > $logdir/$recore_lognum
        #记录nginx每分钟行数,方便排查问题
        echo "`date +%F-%H-%M`,$last_count,$current_count" >>$nginx_logpwd/nginx_record.log
    done
}

nginx_blackip(){
	#日志路径5min_log
	nginx_logpwd="/var/log/nginx"
	black_conf="/etc/nginx/conf.d/blackip.conf"
	nginx_time="date +%d/%b/%Y:%H:%M"
	#需要监控的日志
	for logname in access.log
	do
		project_name=`echo $logname | awk -F '.' '{print $1}'`
		recore_lognum="$project_name"_wc.log
		#error_wc="$project_name"_errorwc.log
		#判断文件是否为空,为空就追加1进去
		[ ! -f "$logdir/$recore_lognum" ] && echo 1 > $logdir/$recore_lognum
		#最新的一行日志        
		last_count=`cat $logdir/$recore_lognum`
		#当前行
		current_count=`cat $nginx_logpwd/$logname | wc -l` 
		#如果行数相同直接跳出当前循环
		[ $last_count -eq $current_count ] && echo "`date` $logfile no change" && continue
		#由于日志文件每天都会截断，因此会出现当前行数小于上一次行数的情况，此种情况出现则将上一次行数置1
		[ $last_count -gt $current_count ] && last_count=1
		#截取上一次检查到的行数至当前行数的日志并检索出有ERROR的日志，并重定向到相应的ERROR日志文件
		#login_log=`sed -n "$last_count,$current_count p" $nginx_logpwd/$logname | grep "登录异常" |wc -l`
		nginx_4xx=`sed -n "$last_count,$current_count p" $nginx_logpwd/$logname |   awk -F '|' '{if ($2 ~ /^4/)print $2}' | grep -v 403  | sort | uniq -c |  awk '{sum +=$1}END{print sum}'`
		nginx_5xx=`sed -n "$last_count,$current_count p" $nginx_logpwd/$logname |   awk -F '|' '{if ($2 ~ /^5/)print $2}' | sort | uniq -c |  awk '{sum +=$1}END{print sum}'`
		nginx_api_time=`sed -n "$last_count,$current_count p" $nginx_logpwd/$logname | grep /app | awk  -F '|'  '{sum+=$3} END {print sum/NR}'`
		nginx_size=`sed -n "$last_count,$current_count p" $nginx_logpwd/$logname  | awk  -F '|'  '{sum+=$4} END {print sum/NR}'`

		if [[ $nginx_5xx == "" ]];then
			nginx_5xx=1
	    fi
		if  [[ $nginx_4xx == "" ]];then
			nginx_4xx=1
		fi
		#如果4xx请求数大于100
		if [ $nginx_4xx  -gt 100 ];then
			#nginx时间格式
			min_log="$logdir/min_nginx.log"
			min_count=$(grep  `$nginx_time -d '-5 min'`  $nginx_logpwd/nginx_record.log  |  awk -F ',' '{print $2}')
			[ $min_count -gt $last_count ] && min_count=1
			sed -n "$min_count,$last_count p" $nginx_logpwd/$logname >$min_log
			#过滤5分钟日志,获取4xx状态,统计每分钟ip请求频率
			#grep -E "`$nginx_time`|`$nginx_time -d '-1 min'`|`$nginx_time -d '-2 min'`|`$nginx_time -d '-3 min'`|`$nginx_time -d '-4 min'`"  $nginx_logpwd/$logname > $min_log
			blackip=`cat $min_log | awk -F '|' '{if ($2 ~ /^4/) print $5}' | sort | uniq -c | sort -nr | head -n 1  | awk '{print $2}'`
			count=0
			for i in `$nginx_time`  
			do
				if [  `cat $min_log  |  grep $blackip  |   awk -F '|' '{if ($2 ~ /^4/) print $5}' | wc -l` -gt 100 ];then
					seq count +=1
				fi
			done 	
			blackip_4xx_total=`cat   $min_log  |  grep $blackip  |  awk -F '|' '{if ($2 ~ /^4/)print $2}' | wc -l`
			blackip_5min_total=`cat  $min_log  |  grep $blackip  |  wc -l`
			# cat   $min_log | awk -F '|' '{if ($2 ~ /^4/)print $1":"$5}' | awk -F ':' '{print $1":"$2":"$3,$5}'  |  awk '{a[$1" "$2]+=1} END {for (i in a) {print i,a[i]}}'  | sort -k 3 -nr |head -n 20
			#如果某个ip每分钟4xx错误都大于100,并且和大于1000,就加入黑名单
			if [  $blackip_4xx_total -gt 1000  ]  &&   [  $count -gt 3  ];then
				#echo $blackip_4xx_total,$blackip_5min_total
				echo "deny $blackip;" >>$black_conf
				/usr/sbin/nginx -t 
				/usr/sbin/nginx -s reload
				python3 /app/shell/send.py  "$blackip",5分钟内共请求"$blackip_5min_total"次,4xx错误共"$blackip_4xx_total"次	   wethink,黑名单通知  
			fi
		fi
		echo "nginx_4xx $nginx_4xx" | $pushurl
		echo "nginx_5xx $nginx_5xx" | $pushurl
		#平均响应时间和大小
		echo "request_api_time $request_api_time" | $pushurl
		echo "request_size $request_size" | $pushurl
		echo $current_count > $logdir/$recore_lognum
		#记录nginx每分钟行数,方便排查问题
		echo "`$nginx_time`,$last_count,$current_count" >>$nginx_logpwd/nginx_record.log
	done
}


check_tomcatlog(){
	tomcatlog=catalina.out
	logfilemarkfile=catalina.log
	error_wc=error_wc.log
	#判断文件是否为空,为空就追加1进去
	[ ! -f "$last_num/$logfilemarkfile" ] && echo 1 > $last_num/$logfilemarkfile
	#最新的一行日志        
	last_count=`cat $last_num/$logfilemarkfile`
	#当前行
	current_count=`cat $logpwd/$tomcatlog | wc -l` 
	#如果相等就跳出当前循环
	[ $last_count -eq $current_count ] && echo "`date` $logfile no change" && continue
	#由于日志文件每天都会截断，因此会出现当前行数小于上一次行数的情况，此种情况出现则将上一次行数置1
	[ $last_count -gt $current_count ] && last_count=1
	#截取上一次检查到的行数至当前行数的日志并检索出有ERROR的日志，并重定向到相应的ERROR日志文件
	tomcat_error=`sed -n "$last_count,$current_count p" $logpwd/$tomcatlog | grep -E "登录异常|" |wc -l`
	#看情况是否需要重启
	if [[  $tomcat_error > 1   ]];then 
		tomcat_dir=/app/tomcat/xxx
		pkill -f $tomcat_dir
		$tomcat_dir/bin/startup.sh
	fi
	echo "tomcat_error $login_log" | curl --data-binary @- http://$pushgateway_ip:$pushgateway_port/metrics/job/system/project/$project/bindip/$bindip/servicename/$servicename
	echo $current_count > $last_num/$logfilemarkfile
}


#监控mysql
check_mysql(){
	client_mysql="/usr/bin/mysql -uuser -p'passwd' -hxxip -P 3306"
	$client_mysql -e "show global status" >$mysql_infolog
	#最大连接数
	$client_mysql -e "show variables" |  grep -w max_connections >>$mysql_infolog
	#是否存在锁表
	$client_mysql -e  "show OPEN TABLES where In_use > 0;" |wc -l >>$mysql_infolog
	>mysql_checklog
	#select,update,insert,delete,commit,rollback,发送,接受,当前连接数
	mysql_check_info="Com_select Com_update Com_insert Com_delete Com_commit Com_rollback Bytes_sent Bytes_received Threads_connected"
	for mysql_check_name in $mysql_info
    do
    	monitor_name=`echo $mysql_check_name | awk -F '_' '{print "mysql_"$0}'`
        value=`cat $mysql_infolog |  grep -w $mysql_check_name |awk  '{print $2}'`
        echo "$monitor_name:$value">>  $mysql_checklog
    done
    #传值    
    push_server $mysql_checklog
}

#监控redis
check_redis(){
	#获取相关信息
	client_redis="/usr/bin/redis-cli -a xx -p 6379 -h xx"
	$client_redis info >$redis_infolog
	>$redis_checklog
	redis_check_info="connected_clients blocked_clients keyspace_hits keyspace_misses  used_cpu_sys used_memory used_memory_rss"
	for redisinfo in $redis_info
	do
		monitor_name=`cat  $$redis_infolog |  grep -w $redisinfo | awk -F ':' '{print "redis_"$1}'`
		value=`cat  $$redis_infolog |  grep  -w $redisinfo | awk -F ':' '{print $2}'`
		echo "$monitor_name:$value"  >>$redis_checklog
	done
	push_server $redis_checklog
}

#监控mongo
check_mongo(){
	client_mongo="mongo  --host $host --port $port --quiet"
	echo "db.serverStatus()" | $client_mongo >$mongo_infolog
	>$mongo_infolog
	mongo_info="available current page_faults myState currentQueue.readers currentQueue.writers"
	for redisinfo in $mongo_info
	do
		monitor_name=`cat  $redis_infolog |  grep -w $redisinfo | awk -F ':' '{print "redis_"$1}'`
		value=`cat  $$redis_infolog |  grep  -w $redisinfo | awk -F ':' '{print $2}'`
		echo "$monitor_name:$value"  >>$mongo_checklog
	done
	push_server $mongo_checklog
}

#监控kafka
check_kafka(){
	/data/kafka/bin/kafka-consumer-groups.sh --describe --bootstrap-server xx,xx,xx --all-groups |  grep warningsTopic |  awk -F ' ' 'BEGIN{sum[$1]=0}{sum[$1]+=$6}END{for(i=1; i<=16; i++)print i","sum[i]}' > $logdir/kafka.log
	for i in `cat $tmplog`
	do
		channel=`echo $i | awk -F ',' '{print $1}'`
		value=`echo $i   | awk -F ','  '{if ($2 == "") {print 0}else {print $2}}'`
		echo KAFKA{Channel=\"$channel\"} $value >>/usr/local/node_exporter/metrics/kafka.prom
	done
	push_server $redis_checklog
}

#监控app接口状态,判断进程状态是否正常
check_app_status(){
	#app_url
	app_status_url="http://ip:port/xxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
	app_status=`curl -Is --connect-timeout 5  $app_status_url | head -n 1 | awk '{print $2}'`
	if [[ ! -n $app_status ]];then
		app_status=1
	fi
	echo  "app_status $app_status" | $pushurl
	#非200重启
	#systemctl restart tomcat
}

#监控线程情况
check_process(){
	#线程端口
	process_url="http://ip:port/xxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
	curl  $process_url  |   jq '.[]  | {name:.schedulerInfo.name,status:.status}' | jq -r '[.[]] | @csv' |  sed 's/\"//g' >$process_checklog
	#pushgateway 只接受数值
	sed -i 's/任务执行成功/0/g' $process_checklog
	sed -i 's/任务未开启/1/g'   $process_checklog
	sed -i 's/任务未执行/2/g'   $process_checklog
	sed -i 's/任务执行超时/3/g' $process_checklog
	sed -i 's/任务执行失败/4/g' $process_checklog
	#将值传递给pushgateway
	push_server $process_checklog
}

#监控在线人数,通过登陆接口获取cookie在获取对应值
check_online(){
	date_pwd=/app/shell/data
	online_file=$date_pwd/wethink_online.csv
	pushgateway_ip=192.168.2.200
	instance=192.168.2.200
	pushgateway_port=60003
	select_job=server
	servicename=monitor
	for  online_info  in   `cat $online_file | grep -vw usrSteUsrId`
	do
		project=`echo $online_info | awk -F ',' '{print $1}'`
		domian_name=`echo $online_info | awk -F ',' '{print $2}'`
		usrSteUsrId=`echo $online_info | awk -F ',' '{print $3}'`
		userPassword=`echo $online_info | awk -F ',' '{print $4}'`
		#login_接口
		wethink_login=$domian_name/app/user/single/userlogin/login
		#online_接口
		wethink_useronline=$domian_name/app/api/home/user/statistics
		
		cookie_file=$date_pwd/.$project.cookie
		online_number=`curl  -b $cookie_file  "$wethink_useronline"| jq .onlineUserStatistics`
		#如果验证码过期就在去获取一次验证码
		if [[ $? != 0 ]];then
			curl  -c $cookie_file -d '{"usrSteUsrId":"'$usrSteUsrId'", "userPassword":"'$userPassword'"}' -H "Content-Type: application/json" -X POST "$wethink_login"
			online_number=`curl  -b $cookie_file  "$wethink_useronline"| jq .onlineUserStatistics`
		fi
		pushurl="curl --data-binary @- http://$pushgateway_ip:$pushgateway_port/metrics/job/$select_job/project/$project/instance/$instance/servicename/$servicename"
		echo  "online_number  $online_number"|  $pushurl
	done
}

#云服务监控,######################################################################################################################################################################
#阿里,aws,腾讯,谷歌都是类似方式,看下文档,稍微修改即可
cloud_env(){
	#时间必须有点延迟不然会出现获取不到数据情况
	Periodtime=120
	endtime=`date +%F" "%H:%M:%S  -d '2 minute ago'`
	startime=`date +%F" "%H:%M:%S -d '4 minute ago'`
	alicms="/usr/sbin/aliyun cms DescribeMetricList --MetricName"
}

#监控阿里云ecs
check_ecs(){
	cloud_env
	#主要获取外网流量
	ecs_info="VPC_PublicIP_InternetInRate VPC_PublicIP_InternetOutRate  diskusage_utilization"
	#循环获取监控值
	for cloud_name in $ecs_info  
	do
	    monitor_name=`echo $cloud_name | awk -F '_' '{print "ECS_"$NF}'`
	    $alicms $cloud_name  --Namespace acs_ecs_dashboard --Period $Periodtime --StartTime "$startime" --EndTime "$endtime"  | jq '.Datapoints' |sed 's/.\(.*\)/\1/' | sed 's/\(.*\)./\1/' | sed 's/\\//g' | jq '.[]| "\(.instanceId) \(.Average)"'>$ecs_log
	    #ecs环境变量 
		ecs_insname="i-xxx:青岛银行_app i-uf680ksjxwusmvtlkbog:沃尔沃_App01 i-xxxuf6ehfmokiqtwhhbsmpe:沃尔沃_App02"
		#给对应实例id,实例名称
	    for instance_name in $ecs_insname 
	    do
	        instance_id=`echo $instance_name | awk -F ':' '{print $1}'`
	        servicename=`echo $instance_name | awk -F ':' '{print $2}'`
	        project=`echo $servicename | awk -F '_' '{print $1}'`
	        value=`cat $ecs_log  |  grep $instance_id | awk -F '"| ' '{print $3}'|head -n 1`
	        pushurl="curl --data-binary @- http://$pushgateway_ip:$pushgateway_port/metrics/job/ECS/project/$project/servicename/$servicename"
	        echo  "$monitor_name  $value"|  $pushurl
	        #echo "$project,$monitor_name,$value" >>$cloudpwd/ecs.log
	    done 
	done
}

#监控rds
check_rds(){	    
	cloud_env
	#循环获取监控值
	rds_info="CpuUsage  IOPSUsage MemoryUsage  ConnectionUsage DiskUsage  MySQL_QPS MySQL_TPS  MySQL_SlowQueries MySQL_NetworkInNew  MySQL_NetworkOutNew"
	for cloud_name in  $rds_info
	do
	    monitor_name=`echo $cloud_name | awk -F '_' '{print "RDS_"$NF}'`
	    $alicms $cloud_name --Namespace acs_rds_dashboard --Period $Periodtime --StartTime "$startime" --EndTime "$endtime" | jq '.Datapoints' | sed 's/.\(.*\)/\1/' | sed 's/\(.*\)./\1/' | sed 's/\\//g' | jq '.[]| "\(.instanceId) \(.Average)"' >$mysqlpwd/tmp.log 
	    #区分实例id
	    rds_name="rm-xxx:xx1  rm-xxx:xx2"
		for instance_name in   $rds_name
	    do
	        instance_id=`echo $instance_name | awk -F ':' '{print $1}'`
	        project=`echo $instance_name | awk -F ':' '{print $2}'`
	        value=`cat  $mysqlpwd/tmp.log | grep  $instance_id | awk -F '"| ' '{print $3}'`
	        pushurl="curl --data-binary @- http://$pushgateway_ip:$pushgateway_port/metrics/job/RDS/project/$project/servicename/$servicename"
	        echo  "$monitor_name  $value" |  $pushurl 
	        #有问题就通过追加日志查看
	        #echo  "$project $monitor_name  $value" >>$mysqlpwd/"$project"_rds.log
	    done
	done
}

#只需要1台即可,可以拆分脚本#############################################################################################################################################
#监控域名
check_domain(){
	for info in `cat $domainlog  | grep -v '#'`
	do
		project=`echo $info | awk -F ',' '{print $1}'`
		domain=`echo $info | awk -F ',' '{print $2}'`
		domainpush=`echo $domain | awk -F '/' '{print $3}'`
		status=`curl -Is --connect-timeout 10 $domain --insecure | head -n 1 |   grep "200" | awk '{print $2}'`
		pushurl="curl --data-binary @- http://$pushgateway_ip:$pushgateway_port/metrics/job/http/project/$project/domain/$domainpush"
		echo "http_status $status"  | $pushurl
		if [[ ! -n $status ]];then
			status=1
		fi
		if [[ $status -ne 200 ]];then
			python3 /app/shell/send.py "项目 $project 可能存在问题,请点击域名进行验证  $domain" wethink
		fi
	done	
}

#监控证书到期
check_ssl(){
	domainlog=/app/shell/log/ssl.log
	for i in `cat $domainlog| grep -v '#'` #读取存储了需要监测的域名的文件
	do
	    projectname=`echo $i | awk -F ',' '{print $1}'`
	    projectdomain=`echo $i | awk -F ',' '{print $2}'|  awk -F '/' '{print $NF}'`
	    #非443端口要做特殊处理
	    if [[ $projectdomain == elearning.hyham.com ]];then
	        openapi_END_TIME=`echo | openssl s_client -servername $projectdomain  -connect $projectdomain:10443 2>/dev/null | openssl x509 -noout -dates |grep 'After'| awk -F '=' '{print $2}'| awk -F ' +' '{print $1,$2,$4 }'`
	    # elif [[ $projectdomain == elearning.classykiss.cn ]]; then
	    #     END_TIME=`echo | openssl s_client -servername $projectdomain  -connect $projectdomain:8688 2>/dev/null | openssl x509 -noout -dates |grep 'After'| awk -F '=' '{print $2}'| awk -F ' +' '{print $1,$2,$4 }'`  
	    else
	        END_TIME=`echo | openssl s_client -servername $projectdomain  -connect $projectdomain:443 2>/dev/null | openssl x509 -noout -dates |grep 'After'| awk -F '=' '{print $2}'| awk -F ' +' '{print $1,$2,$4 }'`
	    fi
	    #statements
	    #使用openssl获取域名的证书情况，然后获取其中的到期时间
	    END_TIME1=`date +%s -d "$END_TIME"` #将日期转化为时间戳
	    END_TIME2=`date +%F -d "$END_TIME"`
	    NOW_TIME=`date +%s`	
	    #为了输出美观,只显示1个解析ip
	    #domainip=`dig $projectdomain  |grep  "$projectdomain"  |grep -v ';' | awk '{print $NF}' | head -n 1`

	    expire_time=$(($(($END_TIME1 - $NOW_TIME))/(60*60*24))) #到期时间减去目前时间再转化为天数
	    #小于30天开始告警
	    if [[ $expire_time -lt 30 ]];then
	       python3 send.py  项目:$projectname,域名:$projectdomain,将于$expire_time天之后到期,到期日期为$END_TIME2 证书到期告警
	       #echo "项目:$projectname 域名:$projectdomain 将于$expire_time天之后到期,到期日期为$END_TIME2"       
	    fi
	done
}

#调用##################################################################################################################################################
#通过定时任务选择对应函数去监控对应的服务
case $select_check in
	system_check)          #监控系统
		check_system
	;;
	server_redis_check)    #监控redis
		check_redis
	;;
	server_mysql_check)    #监控mysql
		check_mysql
	;;
	check_app_status)      #监控app接口
		check_app_status
	;;
	server_porcess_check)  #监控线程状态
		check_process
	;;
	server_nginxlog_check) #监控nginx日志
		check_nginxlog
	;;
	server_nginx_blackip) #监控nginx日志并添加黑名单
		nginx_blackip
	;;
	online_check)          #监控在线人数
		check_online
	;;
	cloud_rds_check)       #监控阿里云RDS
		check_rds
	;;
	cloud_ecs_check)       #监控阿里云服务器  
		check_ecs
	;;
 	other_push)            #将其他服务器信息上传到对应的pushgateway
		push_other
	;;
	*)
		echo "$1 选择对应的服务"
		echo "*/1 * * * * cd /app/shell;/bin/bash  get_monitor_date.sh system_check;sleep 1;/bin/bash  get_monitor_date.sh server_nginxlog_check;/bin/bash  get_monitor_date.sh check_app_status"
	;;
esac