/**
 * gislin 2017年7月20日
 */
package com.btcode.db.connection;

import com.alibaba.druid.pool.vendor.OracleExceptionSorter;
import static com.alibaba.druid.util.JdbcSqlStatUtils.rtrim;

import com.alibaba.druid.pool.DruidDataSource;
import com.alibaba.druid.pool.DruidDataSourceStatLoggerImpl;
import com.alibaba.druid.pool.DruidDataSourceStatValue;
import com.alibaba.druid.stat.JdbcSqlStatValue;
import com.alibaba.druid.support.json.JSONUtils;
import com.alibaba.druid.util.Utils;
import com.btcode.common.MyLog;
import com.btcode.db.config.DataBaseConfig;
import com.btcode.exception.MsgException;
import com.btcode.log.ILog;

import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.Map;

/**
 * @author gislin 2017年7月20日
 */
public class DefaultConnectionGetter implements IConnectionGetter {

    private ILog log = MyLog.getInstance().getLogger(getClass());

    private DruidDataSource dataSource;
    private DataBaseConfig config;
    private volatile int threadNum = 0;

    public DefaultConnectionGetter(DataBaseConfig config) {
        this.config = config;
        if (config.isLog()) {
            log.logOn();
        }
        else {
            log.logOff();
        }
        try {
            if (config.isUsePool()) {
                dataSource = getDataSource(config);
                dataSource.init();
                log.info(config.getConnectionString() + " dataSource初始化成功" + config.getUserName());
            }
        }
        catch (Exception e) {
            log.error(config.getUserName() + " dataSource init", e);
            throw new RuntimeException(e);
        }
    }

    /**
     * 初始化连接池，其他数据库实现可以重写这个方法
     * @throws SQLException
     */
    protected DruidDataSource getDataSource(DataBaseConfig config) throws SQLException {
        dataSource = DruidDataSourceHelp.getDefault(config);

        dataSource.setExceptionSorter(new OracleExceptionSorter() {//当数据库抛出一些不可恢复的异常时，抛弃连接
            @Override
            public boolean isExceptionFatal(SQLException e) {
                log.error(config.getUserName(), e);
                return super.isExceptionFatal(e);
            }
        });

        dataSource.setStatLogger(new DruidDataSourceStatLoggerImpl() {//连接池定时监控
            @Override
            public void log(DruidDataSourceStatValue statValue) {
                log.info(config.getUserName() + getDruidDataSourceStatInfo(statValue));
            }
        });

        log.info(config.getConnectionString() + " dataSource初始化成功" + config.getUserName());
        return dataSource;
    }

    @Override
    public Connection getConnection() {
        return localDbConn.get();
    }

    private ThreadLocal<Connection> localDbConn = new ThreadLocal<Connection>() {
        @Override
        public Connection get() {
            try {
                Connection conn = super.get();
                if (conn != null && !conn.isClosed() && conn.isValid(30)) {
                    return conn;
                }
                log.info(config.getUserName() + "获取到的ThreadLocal连接失效，将清除并重新获取");
                if (conn != null) {
                    threadNum--;
                    conn.close();
                }
            }
            catch (Exception e) {
                log.error(config.getUserName() + "ThreadLocal Exception", e);
                throw new MsgException("获取数据库连接出错", e);
            }
            Connection connection = initialValue();
            set(connection);
            return connection;
        }

        @Override
        protected Connection initialValue() {
            return getUnsafeConnection();
        }

        @Override
        public void remove() {
            Connection conn = super.get();
            if (conn != null) {
                try {
                    conn.close();
                }
                catch (SQLException e) {
                    log.error(config.getUserName() + "close Exception", e);
                }
            }
            super.remove();
        }
    };

    private Connection getUnsafeConnection() {
        Connection conn = null;
        try {
            if (dataSource != null) {
                conn = dataSource.getConnection();
                log.info(config.getUserName() + "从DataSource获取连接 " + threadNum);
            }
            else {
                Class.forName(config.getDirverClass()).newInstance();
                conn = DriverManager.getConnection(config.getConnectionString(),
                        config.getUserName(), config.getPassword());
                log.info(config.getUserName() + "新建连接 " + threadNum);
            }
            conn.setAutoCommit(true);

            threadNum++;
        }
        catch (Exception e) {
            log.error(config.getUserName() + "getUnsafeConnection Exception", e);
        }
        return conn;
    }

    /**
     * 监控连接信息
     */
    protected String getDruidDataSourceStatInfo(DruidDataSourceStatValue statValue) {
        Map<String, Object> map = new LinkedHashMap<>();

        map.put("url", statValue.getUrl());
        map.put("name", statValue.getName());
        map.put("activeCount", statValue.getActiveCount());

        if (statValue.getActivePeak() > 0) {
            map.put("activePeak", statValue.getActivePeak());
            map.put("activePeakTime", statValue.getActivePeakTime());
        }
        map.put("poolingCount", statValue.getPoolingCount());
        if (statValue.getPoolingPeak() > 0) {
            map.put("poolingPeak", statValue.getPoolingPeak());
            map.put("poolingPeakTime", statValue.getPoolingPeakTime());
        }
        map.put("connectCount", statValue.getConnectCount());
        map.put("closeCount", statValue.getCloseCount());

        if (statValue.getWaitThreadCount() > 0) {
            map.put("waitThreadCount", statValue.getWaitThreadCount());
        }
        if (statValue.getNotEmptyWaitCount() > 0) {
            map.put("notEmptyWaitCount", statValue.getNotEmptyWaitCount());
        }
        if (statValue.getNotEmptyWaitMillis() > 0) {
            map.put("notEmptyWaitMillis", statValue.getNotEmptyWaitMillis());
        }
        if (statValue.getLogicConnectErrorCount() > 0) {
            map.put("logicConnectErrorCount", statValue.getLogicConnectErrorCount());
        }
        if (statValue.getPhysicalConnectCount() > 0) {
            map.put("physicalConnectCount", statValue.getPhysicalConnectCount());
        }
        if (statValue.getPhysicalCloseCount() > 0) {
            map.put("physicalCloseCount", statValue.getPhysicalCloseCount());
        }
        if (statValue.getPhysicalConnectErrorCount() > 0) {
            map.put("physicalConnectErrorCount", statValue.getPhysicalConnectErrorCount());
        }
        if (statValue.getExecuteCount() > 0) {
            map.put("executeCount", statValue.getExecuteCount());
        }
        if (statValue.getErrorCount() > 0) {
            map.put("errorCount", statValue.getErrorCount());
        }
        if (statValue.getCommitCount() > 0) {
            map.put("commitCount", statValue.getCommitCount());
        }
        if (statValue.getRollbackCount() > 0) {
            map.put("rollbackCount", statValue.getRollbackCount());
        }
        if (statValue.getPstmtCacheHitCount() > 0) {
            map.put("pstmtCacheHitCount", statValue.getPstmtCacheHitCount());
        }
        if (statValue.getPstmtCacheMissCount() > 0) {
            map.put("pstmtCacheMissCount", statValue.getPstmtCacheMissCount());
        }
        if (statValue.getStartTransactionCount() > 0) {
            map.put("startTransactionCount", statValue.getStartTransactionCount());
            map.put("transactionHistogram", rtrim(statValue.getTransactionHistogram()));
        }
        if (statValue.getConnectCount() > 0) {
            map.put("connectionHoldTimeHistogram",
                    rtrim(statValue.getConnectionHoldTimeHistogram()));
        }
        if (statValue.getClobOpenCount() > 0) {
            map.put("clobOpenCount", statValue.getClobOpenCount());
        }
        if (statValue.getBlobOpenCount() > 0) {
            map.put("blobOpenCount", statValue.getBlobOpenCount());
        }
        if (statValue.getSqlSkipCount() > 0) {
            map.put("sqlSkipCount", statValue.getSqlSkipCount());
        }
        if (statValue.getKeepAliveCheckCount() > 0) {
            map.put("keepAliveCheckCount", statValue.getKeepAliveCheckCount());
        }
        ArrayList<Map<String, Object>> sqlList = new ArrayList<>();
        if (statValue.getSqlList().size() > 0) {
            for (JdbcSqlStatValue sqlStat : statValue.getSqlList()) {
                Map<String, Object> sqlStatMap = new LinkedHashMap<>();
                sqlStatMap.put("sql", sqlStat.getSql());
                if (sqlStat.getExecuteCount() > 0) {
                    sqlStatMap.put("executeCount", sqlStat.getExecuteCount());
                    sqlStatMap.put("executeMillisMax", sqlStat.getExecuteMillisMax());
                    sqlStatMap.put("executeMillisTotal", sqlStat.getExecuteMillisTotal());
                    sqlStatMap.put("executeHistogram", rtrim(sqlStat.getExecuteHistogram()));
                    sqlStatMap.put("executeAndResultHoldHistogram",
                            rtrim(sqlStat.getExecuteAndResultHoldHistogram()));
                }
                long executeErrorCount = sqlStat.getExecuteErrorCount();
                if (executeErrorCount > 0) {
                    sqlStatMap.put("executeErrorCount", executeErrorCount);
                }
                int runningCount = sqlStat.getRunningCount();
                if (runningCount > 0) {
                    sqlStatMap.put("runningCount", runningCount);
                }
                int concurrentMax = sqlStat.getConcurrentMax();
                if (concurrentMax > 0) {
                    sqlStatMap.put("concurrentMax", concurrentMax);
                }
                if (sqlStat.getFetchRowCount() > 0) {
                    sqlStatMap.put("fetchRowCount", sqlStat.getFetchRowCount());
                    sqlStatMap.put("fetchRowCountMax", sqlStat.getFetchRowCountMax());
                    sqlStatMap.put("fetchRowHistogram", rtrim(sqlStat.getFetchRowHistogram()));
                }
                if (sqlStat.getUpdateCount() > 0) {
                    sqlStatMap.put("updateCount", sqlStat.getUpdateCount());
                    sqlStatMap.put("updateCountMax", sqlStat.getUpdateCountMax());
                    sqlStatMap.put("updateHistogram", rtrim(sqlStat.getUpdateHistogram()));
                }
                if (sqlStat.getInTransactionCount() > 0) {
                    sqlStatMap.put("inTransactionCount", sqlStat.getInTransactionCount());
                }
                if (sqlStat.getClobOpenCount() > 0) {
                    sqlStatMap.put("clobOpenCount", sqlStat.getClobOpenCount());
                }
                if (sqlStat.getBlobOpenCount() > 0) {
                    sqlStatMap.put("blobOpenCount", sqlStat.getBlobOpenCount());
                }
                if(sqlStat.getExecuteErrorCount()>0){
                    sqlStatMap.put("executeErrorCount", sqlStat.getExecuteErrorCount());
                    sqlStatMap.put("executeErrorLastTime", sqlStat.getExecuteErrorLastTime());
                    if (sqlStat.getExecuteErrorLast() != null) {
                        sqlStatMap.put("executeErrorLast", Utils.getStackTrace(sqlStat.getExecuteErrorLast()));
                    }
                }
                sqlList.add(sqlStatMap);
            }
            map.put("sqlList", sqlList);
        }

        return JSONUtils.toJSONString(map);
    }

    @Override
    public void close() {
        localDbConn = null;
        if (dataSource != null) {
            dataSource.close();
            log.info(config.getUserName() + " dataSource close");
        }
    }
}
