package com.alinesno.cloud.monitor.agent.collect;

import static com.alibaba.druid.util.JdbcSqlStatUtils.rtrim;

import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Properties;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;

import com.alibaba.druid.pool.DruidDataSourceStatLogger;
import com.alibaba.druid.pool.DruidDataSourceStatLoggerAdapter;
import com.alibaba.druid.pool.DruidDataSourceStatValue;
import com.alibaba.druid.stat.JdbcSqlStatValue;
import com.alibaba.druid.support.json.JSONUtils;
import com.alinesno.cloud.compoment.kafka.KafkaProducer;
import com.alinesno.cloud.monitor.agent.aspect.BaseAspect;
import com.alinesno.cloud.monitor.agent.tool.IPUtils;

/**
 * 自定义druid数据库监控并持久化到数据库
 * 
 * @author WeiXiaoJin
 * @since 2020年1月25日 下午19:45:08
 */
@Component("watcherSqlStatLogger")
public class WatcherSQLStatLogger extends DruidDataSourceStatLoggerAdapter implements DruidDataSourceStatLogger  {

	@SuppressWarnings("unused")
	private static Logger log = LoggerFactory.getLogger(WatcherSQLStatLogger.class);
	
	@Value("${spring.application.name:-1}")
	protected String applicationName ; 
	
	protected KafkaProducer kafkaProducer; 

	public WatcherSQLStatLogger(){
	        this.configFromProperties(System.getProperties());
	    }

	public WatcherSQLStatLogger(KafkaProducer kafkaProducer) {
		this.kafkaProducer = kafkaProducer ; 
	}

	/**
	 * @since 0.2.21
	 */
	@Override
	public void configFromProperties(Properties properties) {
		String property = properties.getProperty("druid.stat.loggerName");
		if (property != null && property.length() > 0) {
			setLoggerName(property);
		}
	}

	@Override
	public void log(DruidDataSourceStatValue statValue) {
		
		Map<String, Object> map = new LinkedHashMap<String, Object>();

		map.put("url", statValue.getUrl());
		map.put("dbType", statValue.getDbType());
		map.put("name", statValue.getName());
		map.put("activeCount", statValue.getActiveCount());

		if (statValue.getActivePeak() > 0) {
			map.put("activePeak", statValue.getActivePeak());
			map.put("activePeakTime", statValue.getActivePeakTime());
		}
		map.put("poolingCount", statValue.getPoolingCount());
		if (statValue.getPoolingPeak() > 0) {
			map.put("poolingPeak", statValue.getPoolingPeak());
			map.put("poolingPeakTime", statValue.getPoolingPeakTime());
		}
		map.put("connectCount", statValue.getConnectCount());
		map.put("closeCount", statValue.getCloseCount());

		if (statValue.getWaitThreadCount() > 0) {
			map.put("waitThreadCount", statValue.getWaitThreadCount());
		}

		if (statValue.getNotEmptyWaitCount() > 0) {
			map.put("notEmptyWaitCount", statValue.getNotEmptyWaitCount());
		}

		if (statValue.getNotEmptyWaitMillis() > 0) {
			map.put("notEmptyWaitMillis", statValue.getNotEmptyWaitMillis());
		}

		if (statValue.getLogicConnectErrorCount() > 0) {
			map.put("logicConnectErrorCount", statValue.getLogicConnectErrorCount());
		}

		if (statValue.getPhysicalConnectCount() > 0) {
			map.put("physicalConnectCount", statValue.getPhysicalConnectCount());
		}

		if (statValue.getPhysicalCloseCount() > 0) {
			map.put("physicalCloseCount", statValue.getPhysicalCloseCount());
		}

		if (statValue.getPhysicalConnectErrorCount() > 0) {
			map.put("physicalConnectErrorCount", statValue.getPhysicalConnectErrorCount());
		}

		if (statValue.getExecuteCount() > 0) {
			map.put("executeCount", statValue.getExecuteCount());
		}

		if (statValue.getErrorCount() > 0) {
			map.put("errorCount", statValue.getErrorCount());
		}

		if (statValue.getCommitCount() > 0) {
			map.put("commitCount", statValue.getCommitCount());
		}

		if (statValue.getRollbackCount() > 0) {
			map.put("rollbackCount", statValue.getRollbackCount());
		}

		if (statValue.getPstmtCacheHitCount() > 0) {
			map.put("pstmtCacheHitCount", statValue.getPstmtCacheHitCount());
		}

		if (statValue.getPstmtCacheMissCount() > 0) {
			map.put("pstmtCacheMissCount", statValue.getPstmtCacheMissCount());
		}

		if (statValue.getStartTransactionCount() > 0) {
			map.put("startTransactionCount", statValue.getStartTransactionCount());
			map.put("transactionHistogram", rtrim(statValue.getTransactionHistogram()));
		}

		if (statValue.getConnectCount() > 0) {
			map.put("connectionHoldTimeHistogram", rtrim(statValue.getConnectionHoldTimeHistogram()));
		}

		if (statValue.getClobOpenCount() > 0) {
			map.put("clobOpenCount", statValue.getClobOpenCount());
		}

		if (statValue.getBlobOpenCount() > 0) {
			map.put("blobOpenCount", statValue.getBlobOpenCount());
		}

		if (statValue.getSqlSkipCount() > 0) {
			map.put("sqlSkipCount", statValue.getSqlSkipCount());
		}

		ArrayList<Map<String, Object>> sqlList = new ArrayList<Map<String, Object>>();
		if (statValue.getSqlList().size() > 0) {
			for (JdbcSqlStatValue sqlStat : statValue.getSqlList()) {
				Map<String, Object> sqlStatMap = new LinkedHashMap<String, Object>();
				sqlStatMap.put("sql", sqlStat.getSql());

				if (sqlStat.getExecuteCount() > 0) {
					sqlStatMap.put("executeCount", sqlStat.getExecuteCount());
					sqlStatMap.put("executeMillisMax", sqlStat.getExecuteMillisMax());
					sqlStatMap.put("executeMillisTotal", sqlStat.getExecuteMillisTotal());

					sqlStatMap.put("executeHistogram", rtrim(sqlStat.getExecuteHistogram()));
					sqlStatMap.put("executeAndResultHoldHistogram", rtrim(sqlStat.getExecuteAndResultHoldHistogram()));
				}

				long executeErrorCount = sqlStat.getExecuteErrorCount();
				if (executeErrorCount > 0) {
					sqlStatMap.put("executeErrorCount", executeErrorCount);
				}

				int runningCount = sqlStat.getRunningCount();
				if (runningCount > 0) {
					sqlStatMap.put("runningCount", runningCount);
				}

				int concurrentMax = sqlStat.getConcurrentMax();
				if (concurrentMax > 0) {
					sqlStatMap.put("concurrentMax", concurrentMax);
				}

				if (sqlStat.getFetchRowCount() > 0) {
					sqlStatMap.put("fetchRowCount", sqlStat.getFetchRowCount());
					sqlStatMap.put("fetchRowCountMax", sqlStat.getFetchRowCountMax());
					sqlStatMap.put("fetchRowHistogram", rtrim(sqlStat.getFetchRowHistogram()));
				}

				if (sqlStat.getUpdateCount() > 0) {
					sqlStatMap.put("updateCount", sqlStat.getUpdateCount());
					sqlStatMap.put("updateCountMax", sqlStat.getUpdateCountMax());
					sqlStatMap.put("updateHistogram", rtrim(sqlStat.getUpdateHistogram()));
				}

				if (sqlStat.getInTransactionCount() > 0) {
					sqlStatMap.put("inTransactionCount", sqlStat.getInTransactionCount());
				}

				if (sqlStat.getClobOpenCount() > 0) {
					sqlStatMap.put("clobOpenCount", sqlStat.getClobOpenCount());
				}

				if (sqlStat.getBlobOpenCount() > 0) {
					sqlStatMap.put("blobOpenCount", sqlStat.getBlobOpenCount());
				}

				sqlList.add(sqlStatMap);
			}

			map.put("sqlList", sqlList);
		}

		if (statValue.getKeepAliveCheckCount() > 0) {
			map.put("keepAliveCheckCount", statValue.getKeepAliveCheckCount());
		}

		map.put("applicationName", applicationName) ; 
		map.put("hostname", IPUtils.HOSTNAME) ;
		map.put("ip", IPUtils.IP) ; 
		
		String text = JSONUtils.toJSONString(map);

		// log.debug("监控sql信息:{}"  , text);
		kafkaProducer.asyncSendMessage(BaseAspect.WATCHER_TOPIC_SQL , text);
	}

}
