# ~*~ coding: utf-8 ~*~
# auth: haochenxiao

import requests
import datetime
requests.adapters.DEFAULT_RETRIES = 5
# 关闭多余
#s = requests.session()
#s.keep_alive = False

import collections
import socket
import os
import redis
import json
import pymysql
from devops.paramiko_api import *
from confs.Configs import *
from alert.models import AlertHistory
from devops.models import MonitorConfig,ChartHistory
from resources.models import NewServer,ServerUser,ServerGroup
from confs.Log import logger
from devops.db_api import MySQLdb,OracleDB,DbLink
from dbmonitor.models import MySQLConfig,MySQLHistory,OracleConfig,OracleHistory


#redis
redis_db = 2
redisPool = redis.ConnectionPool(host=redis_host,port=redis_port,db=redis_db,password=redis_password)
client = redis.Redis(connection_pool=redisPool)
flag = 'icbc'

def send_redis(sv_ip_list,alarm,item,status,detail='空'):
	# 发送状态到故障自愈系统处理
	sv_ip_list = ','.join(sv_ip_list)
	item = ','.join(item)
	ms = {
		'IP': sv_ip_list,
		'告警项': alarm,
		'自愈项': item,
		'当前状态': status,
		'详情': detail
	}

	logger.info(ms)
	vfs = json.dumps(ms)
	client.lpush(flag, vfs)


def as_host(ip):
	s = NewServer.objects.get(ip_inner=ip)
	hostname = s.hostname
	port = s.port
	username = s.server_user.username
	password = s.server_user.password
	private_key = s.server_user.privatekey

	asset = {'hostname': hostname, 'ip': ip, 'port': port, 'username': username, 'password': password,
			 'private_key': private_key}
	return asset




def process_status(ip,process,alarm,item,check_status,sv_ip_list):
	asset = as_host(ip)
	sm = SSHConnection(asset)
	cmd = "ps -ef|grep {0}|grep -v grep|wc -l ".format(process)
	result = sm.run_cmd(cmd)
	logger.info(result)
	num = int(result['stdout'].strip())
	detail = '无'
	if check_status == 0:
		return num
	else:
		status = 'BAD' if num == 0 else 'OK'

		try:
			# 查询历史状态
			mc_object = MonitorConfig.objects.get(name=alarm)
			alert_status = mc_object.alert_status
			type = mc_object.type

			# 保存历史状态
			if	status == 'OK':
				if not alert_status == 2:
					endtime = datetime.datetime.now()
					ah = AlertHistory.objects.filter(item=alarm, type=type, status=0).first()
					AlertHistory.objects.filter(id=ah.id).update(status=1, endtime=endtime)
					mc_object.alert_status = 2
					mc_object.save()

			else:
				if not alert_status == 1:
					startime = datetime.datetime.now()
					logger.info(startime,alert_status,item,type)
					AlertHistory.objects.create(item=alarm, type=type, status=0,startime=startime)
					mc_object.alert_status = 1
					mc_object.save()
		except Exception as e:
			logger.info(e)


		send_redis(sv_ip_list, alarm, item, status, detail)



def web_monitor_test(url):
	res = requests.get(url,timeout=10,verify=False)
	status_code = res.status_code
	return status_code

def web_monitor(url,alarm,item,sv_ip_list):

	try:
		res = requests.get(url,timeout=10,verify=False)
		status_code = res.status_code
		usetime = res.elapsed.total_seconds()
	except:
		status_code = 500
		usetime = 0

	try:
		#查询历史状态
		mc_object = MonitorConfig.objects.get(name=alarm)
		alert_status = mc_object.alert_status
		type = mc_object.type

		# 保存历史状态
		if status_code < 400 and usetime < 10:
			status = 'OK'
			if not alert_status == 2:
				endtime = datetime.datetime.now()
				ah = AlertHistory.objects.filter(item=alarm, type=type, status=0).first()
				AlertHistory.objects.filter(id=ah.id).update(status=1, endtime=endtime)
				mc_object.alert_status = 2
				mc_object.save()

		else:
			status = 'BAD'
			if not alert_status == 1:
				startime = datetime.datetime.now()
				AlertHistory.objects.create(item=alarm, type=type, status=0,startime=startime)
				mc_object.alert_status = 1
				mc_object.save()
	except Exception as e:
		logger.info(e)
	detail = '状态码:%s,耗时:%s'%(status_code,usetime)

	send_redis(sv_ip_list, alarm, item, status,detail)

	return status_code
	

def check_port_test(address, port):
	# 检查socket返回值
	s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
	s.settimeout(2)
	result = s.connect_ex((address, int(port)))
	return result


def check_port(address, port, alarm, item,sv_ip):
	try:
		# 查询历史状态
		mc_object = MonitorConfig.objects.get(name=alarm)
		alert_status = mc_object.alert_status
		type = mc_object.type
		# 检查socket返回值
		s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
		s.settimeout(2)
		result = s.connect_ex((address, int(port)))
		if result == 0:
			status = 'OK'
			if not alert_status == 2:
				endtime = datetime.datetime.now()
				ah = AlertHistory.objects.filter(item=alarm,type=type,status=0).first()
				AlertHistory.objects.filter(id=ah.id).update(status=1,endtime=endtime)
				mc_object.alert_status = 2
				mc_object.save()
		else:
			status = 'BAD'
			if not alert_status == 1:
				startime = datetime.datetime.now()
				AlertHistory.objects.create(item=alarm, type=type, status=0,startime=startime)
				mc_object.alert_status = 1
				mc_object.save()
	except Exception as e:
		logger.info(e)
	detail = '无'
	send_redis(sv_ip,alarm,item,status,detail)
	return result

def devicemsg(id):
	s = NewServer.objects.get(id=id)
	monitor_ip = s.ip_inner
	url = 'http://%s:6789/BaseMonitor/all'%monitor_ip
	res = requests.get(url, timeout=10, verify=False)
	#status_code = res.status_code
	#usetime = res.elapsed.total_seconds()
	#logger.info(res.text)
	all_mo = json.loads(res.text)
	getCpuInfo = all_mo["getCpuInfo"]
	getLoadInfo = all_mo["getLoadInfo"]
	getMemInfo = all_mo["getMemInfo"]
	getHostInfo = all_mo["getHostInfo"]
	getDiskInfo = all_mo["getDiskInfo"]
	getNetInfo = all_mo["getNetInfo"]
	getProcess = all_mo["getProcess"]

	#CPU负载
	load_average = round(float(getLoadInfo["load1"]),1)
	#进程数
	process = getProcess

	#内存使用率
	mem_rate = round(getMemInfo["usedPercent"], 2)

	#磁盘使用率
	disk_root = collections.OrderedDict()
	for d in getDiskInfo:
		if d["path"].find('sys') == -1 and d["path"].find('dev') == -1 and d["path"].find('proc') == -1 and d["path"].find('boot') == -1 and d["path"].find('run') == -1:
			disk_root[d["path"]] = d["UsedPercent"]
	disk_root = json.dumps(disk_root)

	#网卡流量
	nflow = {}
	flag = 'netflow_' + str(id)
	cl = client.get(flag)

	if cl == None:
		rs = json.dumps(getNetInfo)
		client.set(flag, rs)
	else:
		# 获取上一次值
		last_rs = client.get(flag)
		lrs = json.loads(last_rs)
		# redis存取这次值

		rs = json.dumps(getNetInfo)
		client.set(flag, rs)

		for m in getNetInfo:
			if m == "lo":
				pass
			n = getNetInfo[m]
			#计算流量
			nflow[m] = {"bytesSent":n["bytesSent"] - lrs[m]["bytesSent"],
						"bytesRecv":n["bytesRecv"] - lrs[m]["bytesRecv"],
						"packetsSent":n["packetsSent"] - lrs[m]["packetsSent"],
						"packetsRecv":n["packetsRecv"] - lrs[m]["packetsRecv"],
						"errin":n["errin"] - lrs[m]["errin"],
						"errout":n["errout"] - lrs[m]["errout"],
						"dropin":n["dropin"] - lrs[m]["dropin"],
						"dropout":n["dropout"] - lrs[m]["dropout"]}
		nflow = json.dumps(nflow)

	ChartHistory.objects.create(hostid=id,cpu=load_average,mem=mem_rate,process=process,disk=disk_root,nflow=nflow)

def mysql_job(id):
	my = MySQLConfig.objects.get(id=id)
	username = my.username
	password = my.password
	ip = my.ip
	port = my.port
	charset = my.charset
	mysqldb = MySQLdb(username=username,password=password,ip=ip,port=port,charset=charset)
	result = mysqldb.GetStatus()


	flag = 'mysql_' + str(id)
	cl = client.get(flag)

	if cl == None:
		rs = json.dumps(result)
		client.set(flag,rs)
	else:
		#获取上一次值
		last_rs = client.get(flag)
		lrs = json.loads(last_rs)
		#存取这次值
		rs = json.dumps(result)
		client.set(flag, rs)


		####比较
		#总QPS
		l_Uptime = int(lrs['Uptime'])
		n_Uptime = int(result['Uptime'])

		if n_Uptime == l_Uptime:
			return
		frequency = n_Uptime - l_Uptime

		l_Questions = int(lrs['Questions'])
		n_Questions = int(result['Questions'])

		if n_Questions  > l_Questions:
			total_qps = (n_Questions - l_Questions)/frequency
		else:
			total_qps = 0

		#0插入QPS
		l_Com_insert = int(lrs['Com_insert'])
		n_Com_insert = int(result['Com_insert'])
		if n_Com_insert  > l_Com_insert:
			insert_qps = (n_Com_insert - l_Com_insert) / frequency
		else:
			insert_qps = 0
		#修改QPS
		l_Com_update = int(lrs['Com_update'])
		n_Com_update = int(result['Com_update'])
		if n_Com_update > l_Com_update:
			update_qps = (n_Com_update - l_Com_update) / frequency
		else:
			update_qps = 0
		#查询QPS
		l_Com_select = int(lrs['Com_select'])
		n_Com_select = int(result['Com_select'])
		if n_Com_select > l_Com_select:
			select_qps = (n_Com_select - l_Com_select) / frequency
		else:
			select_qps = 0

		#删除QPS
		l_Com_delete = int(lrs['Com_delete'])
		n_Com_delete = int(result['Com_delete'])
		if n_Com_delete > l_Com_delete:
			delete_qps = (n_Com_delete - l_Com_delete) / frequency
		else:
			delete_qps = 0
		#提交TPS
		l_Com_commit = int(lrs['Com_commit'])
		n_Com_commit = int(result['Com_commit'])
		if n_Com_commit > l_Com_commit:
			commit_tps = (n_Com_commit - l_Com_commit) / frequency
		else:
			commit_tps = 0
		#回滚TPS
		l_Com_rollback = int(lrs['Com_rollback'])
		n_Com_rollback = int(result['Com_rollback'])
		if n_Com_rollback > l_Com_rollback:
			rollback_tps = (n_Com_rollback - l_Com_rollback) / frequency
		else:
			rollback_tps = 0
		#连接数
		threads_running = int(result['Threads_running'])
		threads_cached = int(result['Threads_cached'])
		threads_connected = int(result['Threads_connected'])
		threads_created = int(result['Threads_created'])
		#流量
		l_bytes_sent = int(lrs['Bytes_sent'])
		l_bytes_received = int(lrs['Bytes_received'])
		n_bytes_sent = int(result['Bytes_sent'])
		n_bytes_received = int(result['Bytes_received'])
		bytes_sent = (n_bytes_sent - l_bytes_sent)/1024
		bytes_received = (n_bytes_received - l_bytes_received)/1024
		#Innodb IO
		l_innodb_buffer_pool_reads = int(lrs['Innodb_buffer_pool_reads'])
		n_innodb_buffer_pool_reads = int(result['Innodb_buffer_pool_reads'])
		l_Innodb_buffer_pool_pages_flushed = int(lrs['Innodb_buffer_pool_pages_flushed'])
		n_Innodb_buffer_pool_pages_flushed = int(result['Innodb_buffer_pool_pages_flushed'])
		innodb_buffer_pool_reads = n_innodb_buffer_pool_reads - l_innodb_buffer_pool_reads
		Innodb_buffer_pool_pages_flushed = n_Innodb_buffer_pool_pages_flushed - l_Innodb_buffer_pool_pages_flushed
		#key buffer
		key_reads = int(result['Key_reads'])
		key_read_requests = int(result['Key_read_requests'])
		key_writes = int(result['Key_writes'])
		key_write_requests = int(result['Key_write_requests'])
		key_blocks_unused = int(result['Key_blocks_unused'])
		key_blocks_used = int(result['Key_blocks_used'])

		try:
			key_buffer_read_rate = round(float(key_read_requests / (key_read_requests + key_reads)) * 100, 2)
			key_buffer_write_rate = round(float(key_write_requests / (key_write_requests + key_writes)) * 100, 2)
		except:
			key_buffer_read_rate = 0
			key_buffer_write_rate = 0

		key_blocks_used_rate = round(float(key_blocks_used / (key_blocks_used + key_blocks_unused)) * 100, 2)

		#mysql主从展示
		if my.mysql_master_slave == 2:

			slave = mysqldb.SlaveStatus()
			#Master_Log_File = int(slave['Master_Log_File'])
			#Read_Master_Log_Pos = int(slave['Read_Master_Log_Pos'])
			#Relay_Master_Log_File = int(slave['Relay_Master_Log_File'])
			#Exec_Master_Log_Pos = int(slave['Exec_Master_Log_Pos'])
			Slave_IO_Running = 1 if slave['Slave_IO_Running'] == 'YES' else 0
			Slave_SQL_Running = 1 if slave['Slave_SQL_Running'] == 'YES' else 0
			if my.Slave_IO_Running != Slave_IO_Running or my.Slave_SQL_Running != Slave_SQL_Running:
				my.Slave_IO_Running = Slave_IO_Running
				my.Slave_SQL_Running = Slave_SQL_Running
				my.save()

		MySQLHistory.objects.create(mysqlid=id,total_qps=total_qps,insert_qps=insert_qps,update_qps=update_qps,select_qps=select_qps,delete_qps=delete_qps,
									commit_tps=commit_tps,rollback_tps=rollback_tps,threads_running=threads_running,threads_cached=threads_cached,
									threads_connected=threads_connected,threads_created=threads_created,bytes_sent=bytes_sent,bytes_received=bytes_received,
									Innodb_buffer_pool_pages_flushed=Innodb_buffer_pool_pages_flushed,innodb_buffer_pool_reads=innodb_buffer_pool_reads,
									key_buffer_read_rate=key_buffer_read_rate,key_buffer_write_rate=key_buffer_write_rate,key_blocks_used_rate=key_blocks_used_rate)



def oracle_job(id):
	my = OracleConfig.objects.get(id=id)
	username = my.username
	password = my.password
	ip = my.ip
	port = my.port
	case = my.case
	oracledb = OracleDB(username,password,ip,port=port,case=case)
	result = oracledb.TableSpace()
	tablespace = json.dumps(result)
	OracleHistory.objects.create(oracleid=id,tablespace=tablespace)










