/*
 * Copyright 1999-2018 Alibaba Group Holding Ltd.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package com.alibaba.druid.pool;

import static com.alibaba.druid.util.Utils.getBoolean;

import java.io.Closeable;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.Statement;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicLongFieldUpdater;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;

import javax.management.JMException;
import javax.management.MBeanRegistration;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import javax.naming.NamingException;
import javax.naming.Reference;
import javax.naming.Referenceable;
import javax.naming.StringRefAddr;
import javax.sql.ConnectionEvent;
import javax.sql.ConnectionEventListener;
import javax.sql.ConnectionPoolDataSource;
import javax.sql.PooledConnection;

import com.alibaba.druid.Constants;
import com.alibaba.druid.TransactionTimeoutException;
import com.alibaba.druid.VERSION;
import com.alibaba.druid.filter.AutoLoad;
import com.alibaba.druid.filter.Filter;
import com.alibaba.druid.filter.FilterChainImpl;
import com.alibaba.druid.mock.MockDriver;
import com.alibaba.druid.pool.DruidPooledPreparedStatement.PreparedStatementKey;
import com.alibaba.druid.pool.vendor.DB2ExceptionSorter;
import com.alibaba.druid.pool.vendor.InformixExceptionSorter;
import com.alibaba.druid.pool.vendor.MSSQLValidConnectionChecker;
import com.alibaba.druid.pool.vendor.MockExceptionSorter;
import com.alibaba.druid.pool.vendor.MySqlExceptionSorter;
import com.alibaba.druid.pool.vendor.MySqlValidConnectionChecker;
import com.alibaba.druid.pool.vendor.NullExceptionSorter;
import com.alibaba.druid.pool.vendor.OracleExceptionSorter;
import com.alibaba.druid.pool.vendor.OracleValidConnectionChecker;
import com.alibaba.druid.pool.vendor.PGExceptionSorter;
import com.alibaba.druid.pool.vendor.PGValidConnectionChecker;
import com.alibaba.druid.pool.vendor.SybaseExceptionSorter;
import com.alibaba.druid.proxy.DruidDriver;
import com.alibaba.druid.proxy.jdbc.DataSourceProxyConfig;
import com.alibaba.druid.proxy.jdbc.TransactionInfo;
import com.alibaba.druid.sql.ast.SQLStatement;
import com.alibaba.druid.sql.ast.statement.SQLSelectQuery;
import com.alibaba.druid.sql.ast.statement.SQLSelectQueryBlock;
import com.alibaba.druid.sql.ast.statement.SQLSelectStatement;
import com.alibaba.druid.sql.parser.SQLParserUtils;
import com.alibaba.druid.sql.parser.SQLStatementParser;
import com.alibaba.druid.stat.DruidDataSourceStatManager;
import com.alibaba.druid.stat.JdbcDataSourceStat;
import com.alibaba.druid.stat.JdbcSqlStat;
import com.alibaba.druid.stat.JdbcSqlStatValue;
import com.alibaba.druid.support.logging.Log;
import com.alibaba.druid.support.logging.LogFactory;
import com.alibaba.druid.util.*;
import com.alibaba.druid.wall.WallFilter;
import com.alibaba.druid.wall.WallProviderStatValue;

/**
 * @author ljw [ljw2083@alibaba-inc.com]
 * @author wenshao [szujobs@hotmail.com]
 */
public class DruidDataSource extends DruidAbstractDataSource implements DruidDataSourceMBean, ManagedDataSource,
		Referenceable, Closeable, Cloneable, ConnectionPoolDataSource, MBeanRegistration {

	private final static Log LOG = LogFactory.getLog(DruidDataSource.class);
	private static final long serialVersionUID = 1L;
	// stats
	private volatile long recycleErrorCount = 0L;
	private long connectCount = 0L;
	private long closeCount = 0L;
	private volatile long connectErrorCount = 0L;
	private long recycleCount = 0L;
	private long removeAbandonedCount = 0L;
	private long notEmptyWaitCount = 0L;
	private long notEmptySignalCount = 0L;
	private long notEmptyWaitNanos = 0L;
	private int keepAliveCheckCount = 0;
	private int activePeak = 0;
	private long activePeakTime = 0;
	private int poolingPeak = 0;
	private long poolingPeakTime = 0;
	// store
	private volatile DruidConnectionHolder[] connections;//真正的数据库连接
	private int poolingCount = 0;
	private int activeCount = 0;
	private long discardCount = 0;
	private int notEmptyWaitThreadCount = 0;
	private int notEmptyWaitThreadPeak = 0;
	//
	private DruidConnectionHolder[] evictConnections;
	private DruidConnectionHolder[] keepAliveConnections;

	// threads
	private volatile ScheduledFuture<?> destroySchedulerFuture;
	private DestroyTask destroyTask;

	private volatile Future<?> createSchedulerFuture;

	private CreateConnectionThread createConnectionThread;
	private DestroyConnectionThread destroyConnectionThread;
	private LogStatsThread logStatsThread;
	private int createTaskCount;

	private final CountDownLatch initedLatch = new CountDownLatch(2);

	private volatile boolean enable = true;

	private boolean resetStatEnable = true;
	private volatile long resetCount = 0L;

	private String initStackTrace;

	private volatile boolean closing = false;
	private volatile boolean closed = false;
	private long closeTimeMillis = -1L;

	protected JdbcDataSourceStat dataSourceStat;

	private boolean useGlobalDataSourceStat = false;
	private boolean mbeanRegistered = false;
	public static ThreadLocal<Long> waitNanosLocal = new ThreadLocal<Long>();
	private boolean logDifferentThread = true;
	private volatile boolean keepAlive = false;
	private boolean asyncInit = false;
	protected boolean killWhenSocketReadTimeout = false;

	protected static final AtomicLongFieldUpdater<DruidDataSource> recycleErrorCountUpdater = AtomicLongFieldUpdater
			.newUpdater(DruidDataSource.class, "recycleErrorCount");
	protected static final AtomicLongFieldUpdater<DruidDataSource> connectErrorCountUpdater = AtomicLongFieldUpdater
			.newUpdater(DruidDataSource.class, "connectErrorCount");
	protected static final AtomicLongFieldUpdater<DruidDataSource> resetCountUpdater = AtomicLongFieldUpdater
			.newUpdater(DruidDataSource.class, "resetCount");

	public DruidDataSource() {
		this(false);
	}

	public DruidDataSource(boolean fairLock) {
		super(fairLock);
		// 使用系统参数对DruidDataSource进行设值
		configFromPropety(System.getProperties());
	}

	public boolean isAsyncInit() {
		return asyncInit;
	}

	public void setAsyncInit(boolean asyncInit) {
		this.asyncInit = asyncInit;
	}
	
	//获得系统属性，并且从中取得与druid相关的属性(如果不为null)注入到本类中
	/*如果需要设置系统属性可以在VM options中设置，格式如下
	-DconfigurePath=hello 
	如果有空格则需要用引号
	-DconfigurePath="hel lo"
	取的方法为：
	System.getProperty("configurePath")*/
	public void configFromPropety(Properties properties) {
		{
			String property = properties.getProperty("druid.name");
			if (property != null) {
				this.setName(property);
			}
		}
		{
			String property = properties.getProperty("druid.url");
			if (property != null) {
				this.setUrl(property);
			}
		}
		{
			String property = properties.getProperty("druid.username");
			if (property != null) {
				this.setUsername(property);
			}
		}
		{
			String property = properties.getProperty("druid.password");
			if (property != null) {
				this.setPassword(property);
			}
		}
		{
			Boolean value = getBoolean(properties, "druid.testWhileIdle");
			if (value != null) {
				this.testWhileIdle = value;
			}
		}
		{
			Boolean value = getBoolean(properties, "druid.testOnBorrow");
			if (value != null) {
				this.testOnBorrow = value;
			}
		}
		{
			String property = properties.getProperty("druid.validationQuery");
			if (property != null && property.length() > 0) {
				this.setValidationQuery(property);
			}
		}
		{
			Boolean value = getBoolean(properties, "druid.useGlobalDataSourceStat");
			if (value != null) {
				this.setUseGlobalDataSourceStat(value);
			}
		}
		{
			Boolean value = getBoolean(properties, "druid.useGloalDataSourceStat"); // compatible for early versions
			if (value != null) {
				this.setUseGlobalDataSourceStat(value);
			}
		}
		{
			Boolean value = getBoolean(properties, "druid.asyncInit"); // compatible for early versions
			if (value != null) {
				this.setAsyncInit(value);
			}
		}
		{
			String property = properties.getProperty("druid.filters");

			if (property != null && property.length() > 0) {
				try {
					this.setFilters(property);
				} catch (SQLException e) {
					LOG.error("setFilters error", e);
				}
			}
		}
		{
			String property = properties.getProperty(Constants.DRUID_TIME_BETWEEN_LOG_STATS_MILLIS);
			if (property != null && property.length() > 0) {
				try {
					long value = Long.parseLong(property);
					this.setTimeBetweenLogStatsMillis(value);
				} catch (NumberFormatException e) {
					LOG.error("illegal property '" + Constants.DRUID_TIME_BETWEEN_LOG_STATS_MILLIS + "'", e);
				}
			}
		}
		{
			String property = properties.getProperty(Constants.DRUID_STAT_SQL_MAX_SIZE);
			if (property != null && property.length() > 0) {
				try {
					int value = Integer.parseInt(property);
					if (dataSourceStat != null) {
						dataSourceStat.setMaxSqlSize(value);
					}
				} catch (NumberFormatException e) {
					LOG.error("illegal property '" + Constants.DRUID_STAT_SQL_MAX_SIZE + "'", e);
				}
			}
		}
		{
			Boolean value = getBoolean(properties, "druid.clearFiltersEnable");
			if (value != null) {
				this.setClearFiltersEnable(value);
			}
		}
		{
			Boolean value = getBoolean(properties, "druid.resetStatEnable");
			if (value != null) {
				this.setResetStatEnable(value);
			}
		}
		{
			String property = properties.getProperty("druid.notFullTimeoutRetryCount");
			if (property != null && property.length() > 0) {
				try {
					int value = Integer.parseInt(property);
					this.setNotFullTimeoutRetryCount(value);
				} catch (NumberFormatException e) {
					LOG.error("illegal property 'druid.notFullTimeoutRetryCount'", e);
				}
			}
		}
		{
			String property = properties.getProperty("druid.timeBetweenEvictionRunsMillis");
			if (property != null && property.length() > 0) {
				try {
					long value = Long.parseLong(property);
					this.setTimeBetweenEvictionRunsMillis(value);
				} catch (NumberFormatException e) {
					LOG.error("illegal property 'druid.timeBetweenEvictionRunsMillis'", e);
				}
			}
		}
		{
			String property = properties.getProperty("druid.maxWaitThreadCount");
			if (property != null && property.length() > 0) {
				try {
					int value = Integer.parseInt(property);
					this.setMaxWaitThreadCount(value);
				} catch (NumberFormatException e) {
					LOG.error("illegal property 'druid.maxWaitThreadCount'", e);
				}
			}
		}
		{
			Boolean value = getBoolean(properties, "druid.failFast");
			if (value != null) {
				this.setFailFast(value);
			}
		}
		{
			String property = properties.getProperty("druid.phyTimeoutMillis");
			if (property != null && property.length() > 0) {
				try {
					long value = Long.parseLong(property);
					this.setPhyTimeoutMillis(value);
				} catch (NumberFormatException e) {
					LOG.error("illegal property 'druid.phyTimeoutMillis'", e);
				}
			}
		}
		{
			String property = properties.getProperty("druid.minEvictableIdleTimeMillis");
			if (property != null && property.length() > 0) {
				try {
					long value = Long.parseLong(property);
					this.setMinEvictableIdleTimeMillis(value);
				} catch (NumberFormatException e) {
					LOG.error("illegal property 'druid.minEvictableIdleTimeMillis'", e);
				}
			}
		}
		{
			String property = properties.getProperty("druid.maxEvictableIdleTimeMillis");
			if (property != null && property.length() > 0) {
				try {
					long value = Long.parseLong(property);
					this.setMaxEvictableIdleTimeMillis(value);
				} catch (NumberFormatException e) {
					LOG.error("illegal property 'druid.maxEvictableIdleTimeMillis'", e);
				}
			}
		}
		{
			Boolean value = getBoolean(properties, "druid.keepAlive");
			if (value != null) {
				this.setKeepAlive(value);
			}
		}
		{
			Boolean value = getBoolean(properties, "druid.poolPreparedStatements");
			if (value != null) {
				this.setPoolPreparedStatements0(value);
			}
		}
		{
			Boolean value = getBoolean(properties, "druid.initVariants");
			if (value != null) {
				this.setInitVariants(value);
			}
		}
		{
			Boolean value = getBoolean(properties, "druid.initGlobalVariants");
			if (value != null) {
				this.setInitGlobalVariants(value);
			}
		}
		{
			Boolean value = getBoolean(properties, "druid.useUnfairLock");
			if (value != null) {
				this.setUseUnfairLock(value);
			}
		}
		{
			String property = properties.getProperty("druid.driverClassName");
			if (property != null) {
				this.setDriverClassName(property);
			}
		}
		{
			String property = properties.getProperty("druid.initialSize");
			if (property != null && property.length() > 0) {
				try {
					int value = Integer.parseInt(property);
					this.setInitialSize(value);
				} catch (NumberFormatException e) {
					LOG.error("illegal property 'druid.initialSize'", e);
				}
			}
		}
		{
			String property = properties.getProperty("druid.minIdle");
			if (property != null && property.length() > 0) {
				try {
					int value = Integer.parseInt(property);
					this.setMinIdle(value);
				} catch (NumberFormatException e) {
					LOG.error("illegal property 'druid.minIdle'", e);
				}
			}
		}
		{
			String property = properties.getProperty("druid.maxActive");
			if (property != null && property.length() > 0) {
				try {
					int value = Integer.parseInt(property);
					this.setMaxActive(value);
				} catch (NumberFormatException e) {
					LOG.error("illegal property 'druid.maxActive'", e);
				}
			}
		}
		{
			Boolean value = getBoolean(properties, "druid.killWhenSocketReadTimeout");
			if (value != null) {
				setKillWhenSocketReadTimeout(value);
			}
		}
		{
			String property = properties.getProperty("druid.connectProperties");
			if (property != null) {
				this.setConnectionProperties(property);
			}
		}
		{
			String property = properties.getProperty("druid.maxPoolPreparedStatementPerConnectionSize");
			if (property != null && property.length() > 0) {
				try {
					int value = Integer.parseInt(property);
					this.setMaxPoolPreparedStatementPerConnectionSize(value);
				} catch (NumberFormatException e) {
					LOG.error("illegal property 'druid.maxPoolPreparedStatementPerConnectionSize'", e);
				}
			}
		}
		{
			String property = properties.getProperty("druid.initConnectionSqls");
			if (property != null && property.length() > 0) {
				try {
					StringTokenizer tokenizer = new StringTokenizer(property, ";");
					setConnectionInitSqls(Collections.list(tokenizer));
				} catch (NumberFormatException e) {
					LOG.error("illegal property 'druid.initConnectionSqls'", e);
				}
			}
		}
	}

	public boolean isKillWhenSocketReadTimeout() {
		return killWhenSocketReadTimeout;
	}

	public void setKillWhenSocketReadTimeout(boolean killWhenSocketTimeOut) {
		this.killWhenSocketReadTimeout = killWhenSocketTimeOut;
	}

	public boolean isUseGlobalDataSourceStat() {
		return useGlobalDataSourceStat;
	}

	public void setUseGlobalDataSourceStat(boolean useGlobalDataSourceStat) {
		this.useGlobalDataSourceStat = useGlobalDataSourceStat;
	}

	public boolean isKeepAlive() {
		return keepAlive;
	}

	public void setKeepAlive(boolean keepAlive) {
		this.keepAlive = keepAlive;
	}

	public String getInitStackTrace() {
		return initStackTrace;
	}

	public boolean isResetStatEnable() {
		return resetStatEnable;
	}

	public void setResetStatEnable(boolean resetStatEnable) {
		this.resetStatEnable = resetStatEnable;
		if (dataSourceStat != null) {
			dataSourceStat.setResetStatEnable(resetStatEnable);
		}
	}

	public long getDiscardCount() {
		return discardCount;
	}

	public void restart() throws SQLException {
		lock.lock();
		try {
			if (activeCount > 0) {
				throw new SQLException("can not restart, activeCount not zero. " + activeCount);
			}
			if (LOG.isInfoEnabled()) {
				LOG.info("{dataSource-" + this.getID() + "} restart");
			}

			this.close();
			this.resetStat();
			this.inited = false;
			this.enable = true;
			this.closed = false;
		} finally {
			lock.unlock();
		}
	}

	public void resetStat() {
		if (!isResetStatEnable()) {
			return;
		}

		lock.lock();
		try {
			connectCount = 0;
			closeCount = 0;
			discardCount = 0;
			recycleCount = 0;
			createCount = 0L;
			directCreateCount = 0;
			destroyCount = 0L;
			removeAbandonedCount = 0;
			notEmptyWaitCount = 0;
			notEmptySignalCount = 0L;
			notEmptyWaitNanos = 0;

			activePeak = activeCount;
			activePeakTime = 0;
			poolingPeak = 0;
			createTimespan = 0;
			lastError = null;
			lastErrorTimeMillis = 0;
			lastCreateError = null;
			lastCreateErrorTimeMillis = 0;
		} finally {
			lock.unlock();
		}

		connectErrorCountUpdater.set(this, 0);
		errorCountUpdater.set(this, 0);
		commitCountUpdater.set(this, 0);
		rollbackCountUpdater.set(this, 0);
		startTransactionCountUpdater.set(this, 0);
		cachedPreparedStatementHitCountUpdater.set(this, 0);
		closedPreparedStatementCountUpdater.set(this, 0);
		preparedStatementCountUpdater.set(this, 0);
		transactionHistogram.reset();
		cachedPreparedStatementDeleteCountUpdater.set(this, 0);
		recycleErrorCountUpdater.set(this, 0);

		resetCountUpdater.incrementAndGet(this);
	}

	public long getResetCount() {
		return this.resetCount;
	}

	public boolean isEnable() {
		return enable;
	}

	public void setEnable(boolean enable) {
		lock.lock();
		try {
			this.enable = enable;
			if (!enable) {
				notEmpty.signalAll();
				notEmptySignalCount++;
			}
		} finally {
			lock.unlock();
		}
	}

	public void setPoolPreparedStatements(boolean value) {
		setPoolPreparedStatements0(value);
	}

	private void setPoolPreparedStatements0(boolean value) {
		if (this.poolPreparedStatements == value) {
			return;
		}

		this.poolPreparedStatements = value;

		if (!inited) {
			return;
		}

		if (LOG.isInfoEnabled()) {
			LOG.info("set poolPreparedStatements " + this.poolPreparedStatements + " -> " + value);
		}

		if (!value) {
			lock.lock();
			try {

				for (int i = 0; i < poolingCount; ++i) {
					DruidConnectionHolder connection = connections[i];

					for (PreparedStatementHolder holder : connection.getStatementPool().getMap().values()) {
						closePreapredStatement(holder);
					}

					connection.getStatementPool().getMap().clear();
				}
			} finally {
				lock.unlock();
			}
		}
	}

	public void setMaxActive(int maxActive) {
		if (this.maxActive == maxActive) {
			return;
		}

		if (maxActive == 0) {
			throw new IllegalArgumentException("maxActive can't not set zero");
		}

		if (!inited) {
			this.maxActive = maxActive;
			return;
		}

		if (maxActive < this.minIdle) {
			throw new IllegalArgumentException("maxActive less than minIdle, " + maxActive + " < " + this.minIdle);
		}

		if (LOG.isInfoEnabled()) {
			LOG.info("maxActive changed : " + this.maxActive + " -> " + maxActive);
		}

		lock.lock();
		try {
			int allCount = this.poolingCount + this.activeCount;

			if (maxActive > allCount) {
				this.connections = Arrays.copyOf(this.connections, maxActive);
				evictConnections = new DruidConnectionHolder[maxActive];
				keepAliveConnections = new DruidConnectionHolder[maxActive];
			} else {
				this.connections = Arrays.copyOf(this.connections, allCount);
				evictConnections = new DruidConnectionHolder[allCount];
				keepAliveConnections = new DruidConnectionHolder[allCount];
			}

			this.maxActive = maxActive;
		} finally {
			lock.unlock();
		}
	}

	@SuppressWarnings("rawtypes")
	public void setConnectProperties(Properties properties) {
		if (properties == null) {
			properties = new Properties();
		}

		boolean equals;
		if (properties.size() == this.connectProperties.size()) {
			equals = true;
			for (Map.Entry entry : properties.entrySet()) {
				Object value = this.connectProperties.get(entry.getKey());
				Object entryValue = entry.getValue();
				if (value == null && entryValue != null) {
					equals = false;
					break;
				}

				if (!value.equals(entry.getValue())) {
					equals = false;
					break;
				}
			}
		} else {
			equals = false;
		}

		if (!equals) {
			if (inited && LOG.isInfoEnabled()) {
				LOG.info("connectProperties changed : " + this.connectProperties + " -> " + properties);
			}

			configFromPropety(properties);

			for (Filter filter : this.filters) {
				filter.configFromProperties(properties);
			}

			if (exceptionSorter != null) {
				exceptionSorter.configFromProperties(properties);
			}

			if (validConnectionChecker != null) {
				validConnectionChecker.configFromProperties(properties);
			}

			if (statLogger != null) {
				statLogger.configFromProperties(properties);
			}
		}

		this.connectProperties = properties;
	}

	public void init() throws SQLException {
		// 由volatite修改，每次获取连接，也会调用init()
		if (inited) {
			return;
		}

		final ReentrantLock lock = this.lock;
		try {// 可以被中断的lock操作
			lock.lockInterruptibly();
		} catch (InterruptedException e) {
			throw new SQLException("interrupt", e);
		}

		boolean init = false;//没有初始化
		try {
			if (inited) {//已经初始化就不用初始化了。
				return;
			}
			// 获取程序的调用栈，标注由哪个函数调用的init方法
			initStackTrace = Utils.toString(Thread.currentThread().getStackTrace());

			this.id = DruidDriver.createDataSourceId();
			if (this.id > 1) {// 如果id大于1，说明不是一个数据源，需要设置一些参数
				long delta = (this.id - 1) * 100000;//id从1的基础上每增加1，种子数就增加10w，
				this.connectionIdSeedUpdater.addAndGet(this, delta);//连接对象种子 connectionIdSeed，
				this.statementIdSeedUpdater.addAndGet(this, delta);//执行语句种子 statementIdSeed
				this.resultSetIdSeedUpdater.addAndGet(this, delta);//结果集种子 resultSetIdSeed
				this.transactionIdSeedUpdater.addAndGet(this, delta);//事务种子 transactionIdSeed
			}

			if (this.jdbcUrl != null) {
				this.jdbcUrl = this.jdbcUrl.trim();
				initFromWrapDriverUrl();//如果url地址是包装类(即以jdbc:wrap-jdbc:开头)则需要另外处理
			}
			// 初始化Filter
			//这些Filter可以嵌入各个环节，包括创建、销毁链接，提交、回滚事务等等，比如常见的ConfigFilter(支持密码加密)、
			//StatFilter(监控，比如打印慢查询SQL)、LogFilter(打印各种日志)。依次从wrapper jdbcUrl、
			//setFilters指定的Filters、SPI加载Filter并进行初始化
			for (Filter filter : filters) {
				filter.init(this);//循环调用过滤器，遍历过滤器，执行init方法
			}

			if (this.dbType == null || this.dbType.length() == 0) {
				this.dbType = JdbcUtils.getDbType(jdbcUrl, null);//如果数据库类型dbType为空则，从url地址中判断dbType的类型
			}

			if (JdbcConstants.MYSQL.equals(this.dbType) || JdbcConstants.MARIADB.equals(this.dbType)
					|| JdbcConstants.ALIYUN_ADS.equals(this.dbType)) {//如果dbtype是 mysql mariadb或者是aliyun_ads
				boolean cacheServerConfigurationSet = false;
				if (this.connectProperties.containsKey("cacheServerConfiguration")) {
					cacheServerConfigurationSet = true;//默认缓存配置为false，如果连接属性当中个包含了该key，则设置为true
				} else if (this.jdbcUrl.indexOf("cacheServerConfiguration") != -1) {//或者jdbcurl中cacheServerConfiguration信息存在(即不等于indexOf != -1)
					cacheServerConfigurationSet = true;
				}
				if (cacheServerConfigurationSet) {
					//如果为配置缓存，就在连接属性connectProperties配置cacheServerConfiguration为true
					this.connectProperties.put("cacheServerConfiguration", "true");
				}
			}
			// 各种参数校验
			if (maxActive <= 0) {
				throw new IllegalArgumentException("illegal maxActive " + maxActive);
			}
			if (maxActive < minIdle) {
				throw new IllegalArgumentException("illegal maxActive " + maxActive);
			}
			if (getInitialSize() > maxActive) {
				throw new IllegalArgumentException(
						"illegal initialSize " + this.initialSize + ", maxActive " + maxActive);
			}
			if (timeBetweenLogStatsMillis > 0 && useGlobalDataSourceStat) {
				throw new IllegalArgumentException(
						"timeBetweenLogStatsMillis not support useGlobalDataSourceStat=true");
			}
			if (maxEvictableIdleTimeMillis < minEvictableIdleTimeMillis) {
				throw new SQLException("maxEvictableIdleTimeMillis must be grater than minEvictableIdleTimeMillis");
			}
			if (this.driverClass != null) {// 对驱动类driverClass去空格处理
				this.driverClass = driverClass.trim();
			}
			//load filters from SPI ServiceLoader，SPI。反正是把配置的filter放在filterchain中(List<Filter>) 
			// 从SPI中加载Filter，如果前面加载的Filter不存在则还需要进行初始化，可以指定系统参数druid.load.spifilter.skip=false禁用该SPI
			initFromSPIServiceLoader();//从SPI服务类中获得过滤链

			if (this.driver == null) {// 初始化Driver实例。加载数据库驱动Driver.驱动为空，则从url地址中获得相应的driverClass
				if (this.driverClass == null || this.driverClass.isEmpty()) {
					this.driverClass = JdbcUtils.getDriverClassName(this.jdbcUrl);
				}
				//如果得到的是MockDriver的className名称，则返回MockDriver.instance 的Driver类型
				if (MockDriver.class.getName().equals(driverClass)) {// 用于支持com.alibaba.druid.mock.MockDriver
					driver = MockDriver.instance;
				} else {//如果不是，则采用JDBC工具返回，对应的class示例转换的Driver类型。
					driver = JdbcUtils.createDriver(driverClassLoader, driverClass);
				}
			} else {
				if (this.driverClass == null) {
					this.driverClass = driver.getClass().getName();//如果driver不为空，则取得该驱动class的名称
				}
			}

			initCheck();// 针对不同数据库的一些校验逻辑

			//异常分类，根据不同的数据库进行分类
			initExceptionSorter(); // 初始化ExceptionSorter。主要的api就是isExceptionFatal(SQLException e)，用于判断是否是Fatal级别的异常
			//初始化有效连接验证引用，根据数据库类型分类
			initValidConnectionChecker();// 初始化连接检测器，不同数据库的实现不一样，比如mysql是调用pingInternal检测连接是否OK
			//校验查询
			validationQueryCheck();
			
			// 初始化DataSource的监控器
			if (isUseGlobalDataSourceStat()) {
				dataSourceStat = JdbcDataSourceStat.getGlobal();
				if (dataSourceStat == null) {
					dataSourceStat = new JdbcDataSourceStat("Global", "Global", this.dbType);
					JdbcDataSourceStat.setGlobal(dataSourceStat);
				}
				if (dataSourceStat.getDbType() == null) {
					dataSourceStat.setDbType(this.dbType);
				}
			} else {
				dataSourceStat = new JdbcDataSourceStat(this.name, this.jdbcUrl, this.dbType, this.connectProperties);
			}
			dataSourceStat.setResetStatEnable(this.resetStatEnable);
			
			// 连接池中可用的连接(未被拿走)，内部会维护一个poolingCount值代表队列中剩余可用的连接，每次从末尾拿走连接
			connections = new DruidConnectionHolder[maxActive];//新建连接池数组，个数是最大活动连接数maxActive
			// 失效、过期的连接，会暂时放在这个数组里面
			evictConnections = new DruidConnectionHolder[maxActive];
			// 销毁线程会检测线程，如果检测存活的线程放暂时放在这里，然后统一放入connections中。存活的连接数数组
			keepAliveConnections = new DruidConnectionHolder[maxActive];

			SQLException connectError = null;

			// 是否异常初始化连接池，如果不是的话，则初始化指定的initialSize个连接数
			if (createScheduler != null) {
				for (int i = 0; i < initialSize; ++i) {
					createTaskCount++;
					CreateConnectionTask task = new CreateConnectionTask(true);
					this.createSchedulerFuture = createScheduler.submit(task);
				}
			} else if (!asyncInit) {
				try {
					// 初始的连接池。初始化initialSize个Connection
					for (int i = 0; i < initialSize; ++i) {
						PhysicalConnectionInfo pyConnectInfo = createPhysicalConnection(); // 生成真正的数据库连接
						DruidConnectionHolder holder = new DruidConnectionHolder(this, pyConnectInfo);
						connections[poolingCount] = holder;
						incrementPoolingCount();//poolingCount来保存这些连接的总数量。
					}

					if (poolingCount > 0) {
						poolingPeak = poolingCount;
						poolingPeakTime = System.currentTimeMillis();
					}
				} catch (SQLException ex) {
					LOG.error("init datasource error, url: " + this.getUrl(), ex);
					connectError = ex;
				}
			}

			//用于定期打印DruidDataSource的一些数据，默认是不开启的，需要开启的话只需要设置timeBetweenLogStatsMillis指定打印的时间周期，log步骤需要获取主锁，建议时间不要设得太短
			createAndLogThread(); // 开启logger日志打印的线程。
			// 开启创建连接的线程，如果线程池createScheduler为null，则开启单个创建连接的线程
			//druid内部默认使用一个线程异步地创建连接，当然可以指定createScheduler线程池，
			//开启多个线程创建连接，但是请把keepAlive设为true，否则不会开启异步线程创建连接
			createAndStartCreatorThread();//创建连接的线程，一直在工作，池子满了就是等待状态。  
			//定期扫描连接池内过期的连接，如果想对连接池外面正在使用的连接也进行清理的话，
			//需要指定removeAbandoned为true，清理线程会判断连接是否正在使用，是否超过了清理时间而进行清理
			createAndStartDestroyThread();//收缩池子的线程，一直在工作。   // 开启销毁过期连接的线程

			initedLatch.await();;//主线程在计数器为0前一直等待。  倒计时同步器。// 等待前面的线程初始化完成
			init = true;//置init状态标识为true。

			initedTime = new Date();
			registerMbean(); // 初始DataSource注册到jmx中

			if (connectError != null && poolingCount == 0) {
				throw connectError;
			}

			//比如期望开启多个创建连接的线程，可能只指定一个createScheduler线程池，然而可能并没有达到预期的效果，
			//因为keepAlive默认是false的，不会启动多线程创建连接
			if (keepAlive) {
				// keepAlive为true时，并且createScheduler不为null，则初始化minIdle个线程用于创建连接
				if (createScheduler != null) {
					for (int i = 0; i < minIdle; ++i) {
						createTaskCount++;
						CreateConnectionTask task = new CreateConnectionTask(true);
						this.createSchedulerFuture = createScheduler.submit(task);
					}
				} else {
					this.emptySignal();
				}
			}

		} catch (SQLException e) {
			LOG.error("{dataSource-" + this.getID() + "} init error", e);
			throw e;
		} catch (InterruptedException e) {
			throw new SQLException(e.getMessage(), e);
		} catch (RuntimeException e) {
			LOG.error("{dataSource-" + this.getID() + "} init error", e);
			throw e;
		} catch (Error e) {
			LOG.error("{dataSource-" + this.getID() + "} init error", e);
			throw e;

		} finally {
			inited = true;
			lock.unlock();

			if (init && LOG.isInfoEnabled()) {
				String msg = "{dataSource-" + this.getID();

				if (this.name != null && !this.name.isEmpty()) {
					msg += ",";
					msg += this.name;
				}

				msg += "} inited";

				LOG.info(msg);
			}
		}
	}

	private void createAndLogThread() {
		if (this.timeBetweenLogStatsMillis <= 0) {
			return;
		}

		String threadName = "Druid-ConnectionPool-Log-" + System.identityHashCode(this);
		logStatsThread = new LogStatsThread(threadName);
		logStatsThread.start();

		this.resetStatEnable = false;
	}

	protected void createAndStartDestroyThread() {
		destroyTask = new DestroyTask();

		if (destroyScheduler != null) {
			long period = timeBetweenEvictionRunsMillis;
			if (period <= 0) {
				period = 1000;
			}
			destroySchedulerFuture = destroyScheduler.scheduleAtFixedRate(destroyTask, period, period,
					TimeUnit.MILLISECONDS);
			initedLatch.countDown();
			return;
		}

		String threadName = "Druid-ConnectionPool-Destroy-" + System.identityHashCode(this);
		destroyConnectionThread = new DestroyConnectionThread(threadName);
		destroyConnectionThread.start();
	}

	protected void createAndStartCreatorThread() {
		if (createScheduler == null) {
			//createScheduler这个调度线程池是否被设置，如果没有设置，直接countDown；否则，
			//就开启一个创建数据库连接的线程，当然这个线程的run方法还是会调用countDown方法。
			String threadName = "Druid-ConnectionPool-Create-" + System.identityHashCode(this);
			createConnectionThread = new CreateConnectionThread(threadName);
			createConnectionThread.start();
			return;
		}

		initedLatch.countDown();//如果有createScheduler就直接-1；  
	}

	/**
	 * load filters from SPI ServiceLoader
	 * 
	 * @see ServiceLoader
	 */
	private void initFromSPIServiceLoader() {

		String property = System.getProperty("druid.load.spifilter.skip");//取得jvm属性
		if (property != null) {
			return;
		}
		//取相应的filter增加到本类中
		ServiceLoader<Filter> druidAutoFilterLoader = ServiceLoader.load(Filter.class);

		for (Filter autoFilter : druidAutoFilterLoader) {
			AutoLoad autoLoad = autoFilter.getClass().getAnnotation(AutoLoad.class);
			if (autoLoad != null && autoLoad.value()) {
				if (LOG.isInfoEnabled()) {
					LOG.info("load filter from spi :" + autoFilter.getClass().getName());
				}
				addFilter(autoFilter);
			}
		}
	}

	private void initFromWrapDriverUrl() throws SQLException {
		if (!jdbcUrl.startsWith(DruidDriver.DEFAULT_PREFIX)) {//如果url地址是包装类(即以jdbc:wrap-jdbc:开头)则需要另外处理
			return;
		}
		//根据url地址获得数据源代理类配置，设置对应的驱动类driverClass jdbcUrl 和name
		DataSourceProxyConfig config = DruidDriver.parseConfig(jdbcUrl, null);
		this.driverClass = config.getRawDriverClassName();
		LOG.error("error url : '" + jdbcUrl + "', it should be : '" + config.getRawUrl() + "'");
		this.jdbcUrl = config.getRawUrl();
		if (this.name == null) {
			this.name = config.getName();
		}
		for (Filter filter : config.getFilters()) {
			addFilter(filter);//将过滤器遍历添加到本类中。如果已经存在该过滤器就不再重复添加。
		}
	}

	/**
	 * 会去重复
	 * 
	 * @param filter
	 */
	private void addFilter(Filter filter) {
		boolean exists = false;
		for (Filter initedFilter : this.filters) {
			if (initedFilter.getClass() == filter.getClass()) {
				exists = true;
				break;
			}
		}

		if (!exists) {
			filter.init(this);
			this.filters.add(filter);
		}

	}

	private void validationQueryCheck() {
		if (!(testOnBorrow || testOnReturn || testWhileIdle)) {
			return;
		}

		if (this.validConnectionChecker != null) {
			return;
		}

		if (this.validationQuery != null && this.validationQuery.length() > 0) {
			return;
		}

		String errorMessage = "";

		if (testOnBorrow) {
			errorMessage += "testOnBorrow is true, ";
		}

		if (testOnReturn) {
			errorMessage += "testOnReturn is true, ";
		}

		if (testWhileIdle) {
			errorMessage += "testWhileIdle is true, ";
		}

		LOG.error(errorMessage + "validationQuery not set");
	}

	protected void initCheck() throws SQLException {
		if (JdbcUtils.ORACLE.equals(this.dbType)) {
			//版本检查，如果是oracle数据库，driver.getMajorVersion()<10 抛异常，版本问题
			isOracle = true;

			if (driver.getMajorVersion() < 10) {
				throw new SQLException("not support oracle driver " + driver.getMajorVersion() + "." + driver.getMinorVersion());
			}

			if (driver.getMajorVersion() == 10 && isUseOracleImplicitCache()) {
				this.getConnectProperties().setProperty("oracle.jdbc.FreeMemoryOnEnterImplicitCache", "true");
			}

			oracleValidationQueryCheck();
			//分别对DB2 ORACLE进行查询检查验证，mysql不需要检查,设置isMySQL为true
		} else if (JdbcUtils.DB2.equals(dbType)) {
			db2ValidationQueryCheck();
		} else if (JdbcUtils.MYSQL.equals(this.dbType) || JdbcUtils.MYSQL_DRIVER_6.equals(this.dbType)) {
			isMySql = true;
		}
	}

	private void oracleValidationQueryCheck() {
		if (validationQuery == null) {
			return;
		}
		if (validationQuery.length() == 0) {
			return;
		}

		SQLStatementParser sqlStmtParser = SQLParserUtils.createSQLStatementParser(validationQuery, this.dbType);
		List<SQLStatement> stmtList = sqlStmtParser.parseStatementList();

		if (stmtList.size() != 1) {
			return;
		}

		SQLStatement stmt = stmtList.get(0);
		if (!(stmt instanceof SQLSelectStatement)) {
			return;
		}

		SQLSelectQuery query = ((SQLSelectStatement) stmt).getSelect().getQuery();
		if (query instanceof SQLSelectQueryBlock) {
			if (((SQLSelectQueryBlock) query).getFrom() == null) {
				LOG.error("invalid oracle validationQuery. " + validationQuery + ", may should be : " + validationQuery
						+ " FROM DUAL");
			}
		}
	}

	private void db2ValidationQueryCheck() {
		if (validationQuery == null) {
			return;
		}
		if (validationQuery.length() == 0) {
			return;
		}

		SQLStatementParser sqlStmtParser = SQLParserUtils.createSQLStatementParser(validationQuery, this.dbType);
		List<SQLStatement> stmtList = sqlStmtParser.parseStatementList();

		if (stmtList.size() != 1) {
			return;
		}

		SQLStatement stmt = stmtList.get(0);
		if (!(stmt instanceof SQLSelectStatement)) {
			return;
		}

		SQLSelectQuery query = ((SQLSelectStatement) stmt).getSelect().getQuery();
		if (query instanceof SQLSelectQueryBlock) {
			if (((SQLSelectQueryBlock) query).getFrom() == null) {
				LOG.error("invalid db2 validationQuery. " + validationQuery + ", may should be : " + validationQuery
						+ " FROM SYSDUMMY");
			}
		}
	}

	private void initValidConnectionChecker() {
		if (this.validConnectionChecker != null) {
			return;
		}

		String realDriverClassName = driver.getClass().getName();
		if (JdbcUtils.isMySqlDriver(realDriverClassName)) {
			this.validConnectionChecker = new MySqlValidConnectionChecker();

		} else if (realDriverClassName.equals(JdbcConstants.ORACLE_DRIVER)
				|| realDriverClassName.equals(JdbcConstants.ORACLE_DRIVER2)) {
			this.validConnectionChecker = new OracleValidConnectionChecker();

		} else if (realDriverClassName.equals(JdbcConstants.SQL_SERVER_DRIVER)
				|| realDriverClassName.equals(JdbcConstants.SQL_SERVER_DRIVER_SQLJDBC4)
				|| realDriverClassName.equals(JdbcConstants.SQL_SERVER_DRIVER_JTDS)) {
			this.validConnectionChecker = new MSSQLValidConnectionChecker();

		} else if (realDriverClassName.equals(JdbcConstants.POSTGRESQL_DRIVER)
				|| realDriverClassName.equals(JdbcConstants.ENTERPRISEDB_DRIVER)) {
			this.validConnectionChecker = new PGValidConnectionChecker();
		}
	}

	private void initExceptionSorter() {
		if (exceptionSorter instanceof NullExceptionSorter) {
			if (driver instanceof MockDriver) {
				return;
			}
		} else if (this.exceptionSorter != null) {
			return;
		}

		for (Class<?> driverClass = driver.getClass();;) {
			String realDriverClassName = driverClass.getName();
			if (realDriverClassName.equals(JdbcConstants.MYSQL_DRIVER) //
					|| realDriverClassName.equals(JdbcConstants.MYSQL_DRIVER_6)) {
				this.exceptionSorter = new MySqlExceptionSorter();
				this.isMySql = true;
			} else if (realDriverClassName.equals(JdbcConstants.ORACLE_DRIVER)
					|| realDriverClassName.equals(JdbcConstants.ORACLE_DRIVER2)) {
				this.exceptionSorter = new OracleExceptionSorter();
			} else if (realDriverClassName.equals("com.informix.jdbc.IfxDriver")) {
				this.exceptionSorter = new InformixExceptionSorter();

			} else if (realDriverClassName.equals("com.sybase.jdbc2.jdbc.SybDriver")) {
				this.exceptionSorter = new SybaseExceptionSorter();

			} else if (realDriverClassName.equals(JdbcConstants.POSTGRESQL_DRIVER)
					|| realDriverClassName.equals(JdbcConstants.ENTERPRISEDB_DRIVER)) {
				this.exceptionSorter = new PGExceptionSorter();

			} else if (realDriverClassName.equals("com.alibaba.druid.mock.MockDriver")) {
				this.exceptionSorter = new MockExceptionSorter();
			} else if (realDriverClassName.contains("DB2")) {
				this.exceptionSorter = new DB2ExceptionSorter();

			} else {
				Class<?> superClass = driverClass.getSuperclass();
				if (superClass != null && superClass != Object.class) {
					driverClass = superClass;
					continue;
				}
			}

			break;
		}
	}

	@Override
	public DruidPooledConnection getConnection() throws SQLException {
		return getConnection(maxWait);
	}

	public DruidPooledConnection getConnection(long maxWaitMillis) throws SQLException {
		init();

		// 如果存在filter，则使用filter进行创建连接
		if (filters.size() > 0) {//插入了过滤链，果然是为统计而生，都记录在案了。
			FilterChainImpl filterChain = new FilterChainImpl(this);
			//filterChain.dataSource_connect()参数中有this，说明它把自己传进去了，
			//说明这个filterChain并不从属于任何datasource，可以是这具数据源，也可以是那个数据源。具体过滤哪个，临时传入。 
			return filterChain.dataSource_connect(this, maxWaitMillis);
		} else {
			return getConnectionDirect(maxWaitMillis);
		}
	}

	@Override
	public PooledConnection getPooledConnection() throws SQLException {
		return getConnection(maxWait);
	}

	@Override
	public PooledConnection getPooledConnection(String user, String password) throws SQLException {
		throw new UnsupportedOperationException("Not supported by DruidDataSource");
	}

	public DruidPooledConnection getConnectionDirect(long maxWaitMillis) throws SQLException {
		int notFullTimeoutRetryCnt = 0;
		for (;;) {
			// handle notFullTimeoutRetry
			DruidPooledConnection poolableConnection;
			try {
				poolableConnection = getConnectionInternal(maxWaitMillis);//获取连接
			} catch (GetConnectionTimeoutException ex) {
				if (notFullTimeoutRetryCnt <= this.notFullTimeoutRetryCount && !isFull()) {
					notFullTimeoutRetryCnt++;
					if (LOG.isWarnEnabled()) {
						LOG.warn("get connection timeout retry : " + notFullTimeoutRetryCnt);
					}
					continue;
				}
				throw ex;
			}
			//testOnBorrow为true，则进行对连接进行校验，校验失败则进行清理并重新进入循环，否则跳到下一步
			if (testOnBorrow) {
				// 如果testOnBorrow设为true的话，则在返回连接之前，需要进行校验，校验的逻辑不仅仅是判断是否被关闭，还需要调用ValidConnectionChecker进行check，前面的init里面也分析过
		        // 需要注意的是，如果check失败，则会discard处理，并且重新走一遍循环
				boolean validate = testConnectionInternal(poolableConnection.holder, poolableConnection.conn);
				if (!validate) {
					if (LOG.isDebugEnabled()) {
						LOG.debug("skip not validate connection.");
					}

					Connection realConnection = poolableConnection.conn;
					discardConnection(realConnection);
					continue;
				}
			} else {
				Connection realConnection = poolableConnection.conn;
				if (poolableConnection.conn.isClosed()) {
					discardConnection(null); // 传入null，避免重复关闭
					continue;
				}
				//testWhileIdle为true，距离上次激活时间超过timeBetweenEvictionRunsMillis，则进行清理
				if (testWhileIdle) {// 如果testWhileIdle为true，并且上一次激活的时间如果超过清理线程执行的间距，则进行check动作
					long currentTimeMillis = System.currentTimeMillis();
					long lastActiveTimeMillis = poolableConnection.holder.lastActiveTimeMillis;
					long idleMillis = currentTimeMillis - lastActiveTimeMillis;

					long timeBetweenEvictionRunsMillis = this.timeBetweenEvictionRunsMillis;

					if (timeBetweenEvictionRunsMillis <= 0) {
						timeBetweenEvictionRunsMillis = DEFAULT_TIME_BETWEEN_EVICTION_RUNS_MILLIS;
					}

					if (idleMillis >= timeBetweenEvictionRunsMillis || idleMillis < 0 // unexcepted branch
					) {
						boolean validate = testConnectionInternal(poolableConnection.holder, poolableConnection.conn);
						if (!validate) {
							if (LOG.isDebugEnabled()) {
								LOG.debug("skip not validate connection.");
							}

							discardConnection(realConnection);
							continue;
						}
					}
				}
			}
			//removeAbandoned为true，则会把连接存放在activeConnections中，清理线程会对其定期进行处理
	        // 如果removeAbandoned设为true，则把返回的线程保存起来，便于清理线程进行清理，注意只有removeAbandoned为true清理线程才会对池外的连接进行清理
			if (removeAbandoned) {
				StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace();
				poolableConnection.connectStackTrace = stackTrace;
				poolableConnection.setConnectedTimeNano();
				poolableConnection.traceEnable = true;
				// 放在activeConnections中，它是一个IdentityHashMap，个人觉得完成可以使用并发的Map，可能是考虑hashCode的问题吧
				activeConnectionLock.lock();
				try {
					activeConnections.put(poolableConnection, PRESENT);
				} finally {
					activeConnectionLock.unlock();
				}
			}

			if (!this.defaultAutoCommit) {
				poolableConnection.setAutoCommit(false);
			}

			return poolableConnection;
		}
	}

	/**
	 * 抛弃连接，不进行回收，而是抛弃
	 * 
	 * @param realConnection
	 */
	public void discardConnection(Connection realConnection) {
		JdbcUtils.close(realConnection);

		lock.lock();
		try {
			activeCount--;
			discardCount++;

			if (activeCount <= minIdle) {
				emptySignal();
			}
		} finally {
			lock.unlock();
		}
	}

	private DruidPooledConnection getConnectionInternal(long maxWait) throws SQLException {//消费者。主要的逻辑由pollLast(nanos)或者takeLast()完成
		if (closed) {
			connectErrorCountUpdater.incrementAndGet(this);
			throw new DataSourceClosedException("dataSource already closed at " + new Date(closeTimeMillis));
		}

		if (!enable) {
			connectErrorCountUpdater.incrementAndGet(this);
			throw new DataSourceDisableException();
		}

		final long nanos = TimeUnit.MILLISECONDS.toNanos(maxWait);
		final int maxWaitThreadCount = this.maxWaitThreadCount;

		DruidConnectionHolder holder;//获取的连接

		for (boolean createDirect = false;;) {
			if (createDirect) {// createDirect只是针对createScheduler(创建连接的线程池)进行处理
				if (creatingCountUpdater.compareAndSet(this, 0, 1)) {
					PhysicalConnectionInfo pyConnInfo = DruidDataSource.this.createPhysicalConnection();
					holder = new DruidConnectionHolder(this, pyConnInfo);
					holder.lastActiveTimeMillis = System.currentTimeMillis();

					creatingCountUpdater.decrementAndGet(this);
					directCreateCountUpdater.incrementAndGet(this);

					if (LOG.isDebugEnabled()) {
						LOG.debug("conn-direct_create ");
					}

					boolean discard = false;
					lock.lock();
					try {
						if (activeCount < maxActive) {
							activeCount++;
							if (activeCount > activePeak) {
								activePeak = activeCount;
								activePeakTime = System.currentTimeMillis();
							}
							break;
						} else {
							discard = true;
						}
					} finally {
						lock.unlock();
					}

					if (discard) {
						JdbcUtils.close(pyConnInfo.getPhysicalConnection());
					}
				}
			}

			try {// 对主锁加锁
				lock.lockInterruptibly();
			} catch (InterruptedException e) {
				connectErrorCountUpdater.incrementAndGet(this);
				throw new SQLException("interrupt", e);
			}

			try {
				// 如果等待创建连接的线程数如果大于maxWaitThreadCount，抛出异常，这个notEmptyWaitThreadCount是在pollLast(nanos)和takeLast()中设置
				if (maxWaitThreadCount > 0 && notEmptyWaitThreadCount >= maxWaitThreadCount) {
					connectErrorCountUpdater.incrementAndGet(this);
					throw new SQLException("maxWaitThreadCount " + maxWaitThreadCount + ", current wait Thread count "
							+ lock.getQueueLength());
				}
				// 针对fatalError，抛出异常
				if (onFatalError && onFatalErrorMaxActive > 0 && activeCount >= onFatalErrorMaxActive) {
					connectErrorCountUpdater.incrementAndGet(this);

					SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
					String errorMsg = "onFatalError, activeCount " + activeCount + ", onFatalErrorMaxActive "
							+ onFatalErrorMaxActive;

					if (lastFatalErrorTimeMillis > 0) {
						errorMsg += ", time '" + format.format(new Date(lastFatalErrorTimeMillis)) + "'";
					}

					if (lastFatalErrorSql != null) {
						errorMsg += ", sql \n" + lastFatalErrorSql;
					}

					throw new SQLException(errorMsg, lastFatalError);
				}

				connectCount++;
				 //  针对存在createScheduler线程池的处理
				if (createScheduler != null && poolingCount == 0 && activeCount < maxActive
						&& creatingCountUpdater.get(this) == 0
						&& createScheduler instanceof ScheduledThreadPoolExecutor) {
					ScheduledThreadPoolExecutor executor = (ScheduledThreadPoolExecutor) createScheduler;
					if (executor.getQueue().size() > 0) {
						createDirect = true;
						continue;
					}
				}

				if (maxWait > 0) {
					holder = pollLast(nanos);//池子里面获取连接。支持超时。
				} else {
					holder = takeLast();//池子里面获取连接
				}

				if (holder != null) {
					activeCount++;
					if (activeCount > activePeak) {
						activePeak = activeCount;
						activePeakTime = System.currentTimeMillis();
					}
				}
			} catch (InterruptedException e) {
				connectErrorCountUpdater.incrementAndGet(this);
				throw new SQLException(e.getMessage(), e);
			} catch (SQLException e) {
				connectErrorCountUpdater.incrementAndGet(this);
				throw e;
			} finally {
				lock.unlock();
			}

			break;
		}

		if (holder == null) {
			long waitNanos = waitNanosLocal.get();

			StringBuilder buf = new StringBuilder();
			buf.append("wait millis ")//
					.append(waitNanos / (1000 * 1000))//
					.append(", active ").append(activeCount)//
					.append(", maxActive ").append(maxActive)//
					.append(", creating ").append(creatingCount)//
			;

			List<JdbcSqlStatValue> sqlList = this.getDataSourceStat().getRuningSqlList();
			for (int i = 0; i < sqlList.size(); ++i) {
				if (i != 0) {
					buf.append('\n');
				} else {
					buf.append(", ");
				}
				JdbcSqlStatValue sql = sqlList.get(i);
				buf.append("runningSqlCount ").append(sql.getRunningCount());
				buf.append(" : ");
				buf.append(sql.getSql());
			}

			String errorMessage = buf.toString();

			if (this.createError != null) {
				throw new GetConnectionTimeoutException(errorMessage, createError);
			} else {
				throw new GetConnectionTimeoutException(errorMessage);
			}
		}
		// 包装下Connection
		holder.incrementUseCount();

		DruidPooledConnection poolalbeConnection = new DruidPooledConnection(holder);
		return poolalbeConnection;//返回连接
	}

	public void handleConnectionException(DruidPooledConnection pooledConnection, Throwable t, String sql)
			throws SQLException {
		final DruidConnectionHolder holder = pooledConnection.getConnectionHolder();

		errorCountUpdater.incrementAndGet(this);
		lastError = t;
		lastErrorTimeMillis = System.currentTimeMillis();

		if (t instanceof SQLException) {
			SQLException sqlEx = (SQLException) t;

			// broadcastConnectionError
			ConnectionEvent event = new ConnectionEvent(pooledConnection, sqlEx);
			for (ConnectionEventListener eventListener : holder.getConnectionEventListeners()) {
				eventListener.connectionErrorOccurred(event);
			}

			// exceptionSorter.isExceptionFatal
			if (exceptionSorter != null && exceptionSorter.isExceptionFatal(sqlEx)) {
				handleFatalError(pooledConnection, sqlEx, sql);
			}

			throw sqlEx;
		} else {
			throw new SQLException("Error", t);
		}
	}

	protected final void handleFatalError(DruidPooledConnection conn, SQLException error, String sql)
			throws SQLException {
		final DruidConnectionHolder holder = conn.holder;

		if (conn.isTraceEnable()) {
			activeConnectionLock.lock();
			try {
				if (conn.isTraceEnable()) {
					activeConnections.remove(conn);
					conn.setTraceEnable(false);
				}
			} finally {
				activeConnectionLock.unlock();
			}
		}

		long lastErrorTimeMillis = this.lastErrorTimeMillis;
		if (lastErrorTimeMillis == 0) {
			lastErrorTimeMillis = System.currentTimeMillis();
		}

		if (sql != null && sql.length() > 1024) {
			sql = sql.substring(0, 1024);
		}

		boolean requireDiscard = false;
		final ReentrantLock lock = conn.lock;
		lock.lock();
		try {
			if ((!conn.isClosed()) || !conn.isDisable()) {
				holder.setDiscard(true);
				conn.disable(error);
				requireDiscard = true;
			}

			lastFatalErrorTimeMillis = lastErrorTimeMillis;
			onFatalError = true;
			lastFatalError = error;
			lastFatalErrorSql = sql;
		} finally {
			lock.unlock();
		}

		if (requireDiscard) {
			if (holder.statementTrace != null) {
				for (Statement stmt : holder.statementTrace) {
					JdbcUtils.close(stmt);
				}
			}

			this.discardConnection(holder.getConnection());
			holder.setDiscard(true);
		}

		LOG.error("discard connection", error);
	}

	// 调用Connection.close()，会调用DruidDataSource的recycle方法
	protected void recycle(DruidPooledConnection pooledConnection) throws SQLException {
		final DruidConnectionHolder holder = pooledConnection.holder;

		if (holder == null) {
			LOG.warn("connectionHolder is null");
			return;
		}

		if (logDifferentThread //
				&& (!isAsyncCloseConnectionEnable()) //
				&& pooledConnection.ownerThread != Thread.currentThread()//
		) {
			LOG.warn("get/close not same thread");
		}

		final Connection physicalConnection = holder.conn;

		if (pooledConnection.traceEnable) {
			Object oldInfo = null;
			activeConnectionLock.lock();
			try {
				if (pooledConnection.traceEnable) {
					oldInfo = activeConnections.remove(pooledConnection);
					pooledConnection.traceEnable = false;
				}
			} finally {
				activeConnectionLock.unlock();
			}
			if (oldInfo == null) {
				if (LOG.isWarnEnabled()) {
					LOG.warn("remove abandonded failed. activeConnections.size " + activeConnections.size());
				}
			}
		}

		final boolean isAutoCommit = holder.underlyingAutoCommit;
		final boolean isReadOnly = holder.underlyingReadOnly;
		final boolean testOnReturn = this.testOnReturn;

		try {
			// check need to rollback?
			if ((!isAutoCommit) && (!isReadOnly)) {
				pooledConnection.rollback();
			}

			// reset holder, restore default settings, clear warnings
			boolean isSameThread = pooledConnection.ownerThread == Thread.currentThread();
			if (!isSameThread) {
				final ReentrantLock lock = pooledConnection.lock;
				lock.lock();
				try {
					holder.reset();
				} finally {
					lock.unlock();
				}
			} else {
				holder.reset();
			}

			if (holder.discard) {
				return;
			}

			if (physicalConnection.isClosed()) {//先是做一些检测工作，比如是否被关闭
				lock.lock();
				try {
					activeCount--;
					closeCount++;
				} finally {
					lock.unlock();
				}
				return;
			}

			if (testOnReturn) {// 对连接进行检测
				//根据testOnReturn参数校验连接的可用性，主要是调用testConnectionInternal函数
				boolean validate = testConnectionInternal(holder, physicalConnection);
				if (!validate) {
					JdbcUtils.close(physicalConnection);

					destroyCountUpdater.incrementAndGet(this);

					lock.lock();
					try {
						activeCount--;
						closeCount++;
					} finally {
						lock.unlock();
					}
					return;
				}
			}

			if (!enable) {
				discardConnection(holder.conn);
				return;
			}

			boolean result;
			final long lastActiveTimeMillis = System.currentTimeMillis();
			lock.lock();
			try {
				activeCount--;
				closeCount++;
				// 放在连接池的末尾
				result = putLast(holder, lastActiveTimeMillis); 
				recycleCount++;
			} finally {
				lock.unlock();
			}

			if (!result) {
				JdbcUtils.close(holder.conn);
				LOG.info("connection recyle failed.");
			}
		} catch (Throwable e) {
			holder.clearStatementCache();

			if (!holder.discard) {
				this.discardConnection(physicalConnection);
				holder.discard = true;
			}

			LOG.error("recyle error", e);
			recycleErrorCountUpdater.incrementAndGet(this);
		}
	}

	public long getRecycleErrorCount() {
		return recycleErrorCount;
	}

	public void clearStatementCache() throws SQLException {
		lock.lock();
		try {
			for (int i = 0; i < poolingCount; ++i) {
				DruidConnectionHolder conn = connections[i];

				if (conn.statementPool != null) {
					conn.statementPool.clear();
				}
			}
		} finally {
			lock.unlock();
		}
	}

	/**
	 * close datasource
	 */
	public void close() {
		lock.lock();
		try {
			if (this.closed) {
				return;
			}

			if (!this.inited) {
				return;
			}

			this.closing = true;

			if (logStatsThread != null) {
				logStatsThread.interrupt();
			}

			if (createConnectionThread != null) {
				createConnectionThread.interrupt();
			}

			if (destroyConnectionThread != null) {
				destroyConnectionThread.interrupt();
			}

			if (createSchedulerFuture != null) {
				createSchedulerFuture.cancel(true);
			}

			if (destroySchedulerFuture != null) {
				destroySchedulerFuture.cancel(true);
			}

			for (int i = 0; i < poolingCount; ++i) {
				DruidConnectionHolder connHolder = connections[i];

				for (PreparedStatementHolder stmtHolder : connHolder.getStatementPool().getMap().values()) {
					connHolder.getStatementPool().closeRemovedStatement(stmtHolder);
				}
				connHolder.getStatementPool().getMap().clear();

				Connection physicalConnection = connHolder.getConnection();
				try {
					physicalConnection.close();
				} catch (Exception ex) {
					LOG.warn("close connection error", ex);
				}
				connections[i] = null;
				destroyCountUpdater.incrementAndGet(this);
			}
			poolingCount = 0;
			unregisterMbean();

			enable = false;
			notEmpty.signalAll();
			notEmptySignalCount++;

			this.closed = true;
			this.closeTimeMillis = System.currentTimeMillis();

			for (Filter filter : filters) {
				filter.destroy();
			}
		} finally {
			lock.unlock();
		}

		if (LOG.isInfoEnabled()) {
			LOG.info("{dataSource-" + this.getID() + "} closed");
		}
	}

	public void registerMbean() {
		if (!mbeanRegistered) {
			AccessController.doPrivileged(new PrivilegedAction<Object>() {

				@Override
				public Object run() {
					ObjectName objectName = DruidDataSourceStatManager.addDataSource(DruidDataSource.this,
							DruidDataSource.this.name);

					DruidDataSource.this.setObjectName(objectName);
					DruidDataSource.this.mbeanRegistered = true;

					return null;
				}
			});
		}
	}

	public void unregisterMbean() {
		if (mbeanRegistered) {
			AccessController.doPrivileged(new PrivilegedAction<Object>() {

				@Override
				public Object run() {
					DruidDataSourceStatManager.removeDataSource(DruidDataSource.this);
					DruidDataSource.this.mbeanRegistered = false;
					return null;
				}
			});
		}
	}

	public boolean isMbeanRegistered() {
		return mbeanRegistered;
	}

	boolean putLast(DruidConnectionHolder e, long lastActiveTimeMillis) {//回收连接
		if (poolingCount >= maxActive) {
			return false;
		}

		e.lastActiveTimeMillis = lastActiveTimeMillis;
		connections[poolingCount] = e;
		incrementPoolingCount();

		if (poolingCount > poolingPeak) {
			poolingPeak = poolingCount;
			poolingPeakTime = lastActiveTimeMillis;
		}

		notEmpty.signal();
		notEmptySignalCount++;

		return true;
	}

	DruidConnectionHolder takeLast() throws InterruptedException, SQLException {
		try {
			while (poolingCount == 0) {
				emptySignal(); // 创建连接

				if (failFast && failContinuous.get()) {
					throw new DataSourceNotAvailableException(createError);
				}

				notEmptyWaitThreadCount++;
				if (notEmptyWaitThreadCount > notEmptyWaitThreadPeak) {
					notEmptyWaitThreadPeak = notEmptyWaitThreadCount;
				}
				try {
					notEmpty.await(); // 连接空了，等着不空
				} finally {
					notEmptyWaitThreadCount--;
				}
				notEmptyWaitCount++;

				if (!enable) {
					connectErrorCountUpdater.incrementAndGet(this);
					throw new DataSourceDisableException();
				}
			}
		} catch (InterruptedException ie) {
			notEmpty.signal(); // propagate to non-interrupted thread
			notEmptySignalCount++;
			throw ie;
		}

		decrementPoolingCount();//减少池中连接的计数，当然拿走一个少一个了。 
		DruidConnectionHolder last = connections[poolingCount];//正好拿走的是池中最后一个。 
		connections[poolingCount] = null;//最后一个就成null了。 

		return last;
	}

	private DruidConnectionHolder pollLast(long nanos) throws InterruptedException, SQLException {
		long estimate = nanos;

		for (;;) {
			if (poolingCount == 0) {
				emptySignal(); // 如果连接池内没有连接了，则调用empty.signal()，通知CreateThread创建连接，并且等待指定的时间，被唤醒之后再去查看是否有可用连接

				if (failFast && failContinuous.get()) {
					throw new DataSourceNotAvailableException(createError);
				}
				// 已经超时了
				if (estimate <= 0) {
					waitNanosLocal.set(nanos - estimate);
					return null;
				}

				notEmptyWaitThreadCount++;
				if (notEmptyWaitThreadCount > notEmptyWaitThreadPeak) {
					notEmptyWaitThreadPeak = notEmptyWaitThreadCount;
				}
				 // 等待指定的时间
				try {
					long startEstimate = estimate;
					estimate = notEmpty.awaitNanos(estimate); // signal by
																// recycle or
																// creator
					notEmptyWaitCount++;
					notEmptyWaitNanos += (startEstimate - estimate);

					if (!enable) {
						connectErrorCountUpdater.incrementAndGet(this);
						throw new DataSourceDisableException();
					}
				} catch (InterruptedException ie) {
					notEmpty.signal(); // propagate to non-interrupted thread
					notEmptySignalCount++;
					throw ie;
				} finally {
					notEmptyWaitThreadCount--;
				}
				// 说明在等待时间内连接仍然未创建完成，返回null
				if (poolingCount == 0) {
					if (estimate > 0) {
						continue;
					}

					waitNanosLocal.set(nanos - estimate);
					return null;
				}
			}
			// poolingCount值减1，取出poolingCount索引的DruidConnectionHolder，并置为null
			decrementPoolingCount();
			DruidConnectionHolder last = connections[poolingCount];
			connections[poolingCount] = null;

			long waitNanos = nanos - estimate;
			last.setLastNotEmptyWaitNanos(waitNanos);

			return last;
		}
	}

	private final void decrementPoolingCount() {
		poolingCount--;
	}

	private final void incrementPoolingCount() {
		poolingCount++;
	}

	@Override
	public Connection getConnection(String username, String password) throws SQLException {
		if (this.username == null && this.password == null && username != null && password != null) {
			this.username = username;
			this.password = password;

			return getConnection();
		}

		if (!StringUtils.equals(username, this.username)) {
			throw new UnsupportedOperationException("Not supported by DruidDataSource");
		}

		if (!StringUtils.equals(password, this.password)) {
			throw new UnsupportedOperationException("Not supported by DruidDataSource");
		}

		return getConnection();
	}

	public long getCreateCount() {
		return createCount;
	}

	public long getDestroyCount() {
		return destroyCount;
	}

	public long getConnectCount() {
		lock.lock();
		try {
			return connectCount;
		} finally {
			lock.unlock();
		}
	}

	public long getCloseCount() {
		return closeCount;
	}

	public long getConnectErrorCount() {
		return connectErrorCountUpdater.get(this);
	}

	@Override
	public int getPoolingCount() {
		lock.lock();
		try {
			return poolingCount;
		} finally {
			lock.unlock();
		}
	}

	public int getPoolingPeak() {
		lock.lock();
		try {
			return poolingPeak;
		} finally {
			lock.unlock();
		}
	}

	public Date getPoolingPeakTime() {
		if (poolingPeakTime <= 0) {
			return null;
		}

		return new Date(poolingPeakTime);
	}

	public long getRecycleCount() {
		return recycleCount;
	}

	public int getActiveCount() {
		lock.lock();
		try {
			return activeCount;
		} finally {
			lock.unlock();
		}
	}

	public void logStats() {
		final DruidDataSourceStatLogger statLogger = this.statLogger;
		if (statLogger == null) {
			return;
		}

		DruidDataSourceStatValue statValue = getStatValueAndReset();

		statLogger.log(statValue);
	}

	public DruidDataSourceStatValue getStatValueAndReset() {
		DruidDataSourceStatValue value = new DruidDataSourceStatValue();

		lock.lock();
		try {
			value.setPoolingCount(this.poolingCount);
			value.setPoolingPeak(this.poolingPeak);
			value.setPoolingPeakTime(this.poolingPeakTime);

			value.setActiveCount(this.activeCount);
			value.setActivePeak(this.activePeak);
			value.setActivePeakTime(this.activePeakTime);

			value.setConnectCount(this.connectCount);
			value.setCloseCount(this.closeCount);
			value.setWaitThreadCount(lock.getWaitQueueLength(notEmpty));
			value.setNotEmptyWaitCount(this.notEmptyWaitCount);
			value.setNotEmptyWaitNanos(this.notEmptyWaitNanos);
			value.setKeepAliveCheckCount(this.keepAliveCheckCount);

			// reset
			this.poolingPeak = 0;
			this.poolingPeakTime = 0;
			this.activePeak = 0;
			this.activePeakTime = 0;
			this.connectCount = 0;
			this.closeCount = 0;
			this.keepAliveCheckCount = 0;

			this.notEmptyWaitCount = 0;
			this.notEmptyWaitNanos = 0;
		} finally {
			lock.unlock();
		}

		value.setName(this.getName());
		value.setDbType(this.dbType);
		value.setDriverClassName(this.getDriverClassName());

		value.setUrl(this.getUrl());
		value.setUserName(this.getUsername());
		value.setFilterClassNames(this.getFilterClassNames());

		value.setInitialSize(this.getInitialSize());
		value.setMinIdle(this.getMinIdle());
		value.setMaxActive(this.getMaxActive());

		value.setQueryTimeout(this.getQueryTimeout());
		value.setTransactionQueryTimeout(this.getTransactionQueryTimeout());
		value.setLoginTimeout(this.getLoginTimeout());
		value.setValidConnectionCheckerClassName(this.getValidConnectionCheckerClassName());
		value.setExceptionSorterClassName(this.getExceptionSorterClassName());

		value.setTestOnBorrow(this.testOnBorrow);
		value.setTestOnReturn(this.testOnReturn);
		value.setTestWhileIdle(this.testWhileIdle);

		value.setDefaultAutoCommit(this.isDefaultAutoCommit());

		if (defaultReadOnly != null) {
			value.setDefaultReadOnly(defaultReadOnly);
		}
		value.setDefaultTransactionIsolation(this.getDefaultTransactionIsolation());

		value.setLogicConnectErrorCount(connectErrorCountUpdater.getAndSet(this, 0));

		value.setPhysicalConnectCount(createCountUpdater.getAndSet(this, 0));
		value.setPhysicalCloseCount(destroyCountUpdater.getAndSet(this, 0));
		value.setPhysicalConnectErrorCount(createErrorCountUpdater.getAndSet(this, 0));

		value.setExecuteCount(this.getAndResetExecuteCount());
		value.setErrorCount(errorCountUpdater.getAndSet(this, 0));
		value.setCommitCount(commitCountUpdater.getAndSet(this, 0));
		value.setRollbackCount(rollbackCountUpdater.getAndSet(this, 0));

		value.setPstmtCacheHitCount(cachedPreparedStatementHitCountUpdater.getAndSet(this, 0));
		value.setPstmtCacheMissCount(cachedPreparedStatementMissCountUpdater.getAndSet(this, 0));

		value.setStartTransactionCount(startTransactionCountUpdater.getAndSet(this, 0));
		value.setTransactionHistogram(this.getTransactionHistogram().toArrayAndReset());

		value.setConnectionHoldTimeHistogram(this.getDataSourceStat().getConnectionHoldHistogram().toArrayAndReset());
		value.setRemoveAbandoned(this.isRemoveAbandoned());
		value.setClobOpenCount(this.getDataSourceStat().getClobOpenCountAndReset());
		value.setBlobOpenCount(this.getDataSourceStat().getBlobOpenCountAndReset());

		value.setSqlSkipCount(this.getDataSourceStat().getSkipSqlCountAndReset());
		value.setSqlList(this.getDataSourceStat().getSqlStatMapAndReset());

		return value;
	}

	public long getRemoveAbandonedCount() {
		return removeAbandonedCount;
	}

	protected boolean put(PhysicalConnectionInfo physicalConnectionInfo) {
		DruidConnectionHolder holder = null;
		try {//产生一个连接holder 
			//创建好物理连接之后，需要使用DruidConnectionHolder代理实际的物理连接，该对象持有DruidDataSource的引用，
			//调用Connection最终会调用DruidDataSource的recyle(DruidPooledConnection conn)回收该连接，创建物理连接的过程是不加锁的，避免影响性能
			holder = new DruidConnectionHolder(DruidDataSource.this, physicalConnectionInfo);
		} catch (SQLException ex) {
			lock.lock();
			try {
				if (createScheduler != null) {
					createTaskCount--;
				}
			} finally {
				lock.unlock();
			}
			LOG.error("create connection holder error", ex);
			return false;
		}

		return put(holder);//创建好连接之后，还需要把该连接put到连接池中，重新进行加锁。
	}

	private boolean put(DruidConnectionHolder holder) {
		lock.lock();
		try {
			if (poolingCount >= maxActive) { // 再次校验，避免创建的连接超过maxActive
				return false;
			}//put过程是将连接存放在poolingCount索引，并且通知notEmpty取走连接，也就是需要获取连接的线程
			connections[poolingCount] = holder;//真正的数据库连接 // 放回连接池中
			incrementPoolingCount();//池子中的计数加1. 

			if (poolingCount > poolingPeak) {
				poolingPeak = poolingCount;
				poolingPeakTime = System.currentTimeMillis();
			}
			// 通知获取连接的线程来取走
			notEmpty.signal();//消费者唤醒。那么谁在notEmpty条件上等待呢？我们查一下，发现是方法   takeLast()之中，
			notEmptySignalCount++;

			if (createScheduler != null) {
				createTaskCount--;
				if (poolingCount + createTaskCount < notEmptyWaitThreadCount //
						&& activeCount + poolingCount + createTaskCount < maxActive) {
					emptySignal();
				}
			}
		} finally {
			lock.unlock();
		}
		return true;
	}

	public class CreateConnectionTask implements Runnable {

		private int errorCount = 0;
		private boolean initTask = false;

		public CreateConnectionTask() {
		}

		public CreateConnectionTask(boolean initTask) {
			this.initTask = initTask;//初始化为true
		}

		@Override
		public void run() {
			runInternal();
		}

		private void runInternal() {
			for (;;) {

				// addLast
				lock.lock();
				try {
					if (closed || closing) {
						createTaskCount--;
						return;
					}

					boolean emptyWait = true;

					if (createError != null && poolingCount == 0) {
						emptyWait = false;
					}

					if (emptyWait) {
						// 必须存在线程等待，才创建连接
						if (poolingCount >= notEmptyWaitThreadCount //
								&& !(keepAlive && activeCount + poolingCount < minIdle) && !initTask) {
							createTaskCount--;
							return;
						}

						// 防止创建超过maxActive数量的连接
						if (activeCount + poolingCount >= maxActive) {
							createTaskCount--;
							return;
						}
					}
				} finally {
					lock.unlock();
				}

				PhysicalConnectionInfo physicalConnection = null;

				try {
					physicalConnection = createPhysicalConnection();
					setFailContinuous(false);
				} catch (OutOfMemoryError e) {
					LOG.error("create connection OutOfMemoryError, out memory. ", e);

					errorCount++;
					if (errorCount > connectionErrorRetryAttempts && timeBetweenConnectErrorMillis > 0) {
						// fail over retry attempts
						setFailContinuous(true);
						if (failFast) {
							lock.lock();
							try {
								notEmpty.signalAll();
							} finally {
								lock.unlock();
							}
						}

						if (breakAfterAcquireFailure) {
							lock.lock();
							try {
								createTaskCount--;
							} finally {
								lock.unlock();
							}
							return;
						}

						this.errorCount = 0; // reset errorCount
						if (closing || closed) {
							createTaskCount--;
							return;
						}
						createSchedulerFuture = createScheduler.schedule(this, timeBetweenConnectErrorMillis,
								TimeUnit.MILLISECONDS);
						return;
					}
				} catch (SQLException e) {
					LOG.error("create connection SQLException, url: " + jdbcUrl, e);
					errorCount++;
					if (errorCount > connectionErrorRetryAttempts && timeBetweenConnectErrorMillis > 0) {
						// fail over retry attempts
						setFailContinuous(true);
						if (failFast) {
							lock.lock();
							try {
								notEmpty.signalAll();
							} finally {
								lock.unlock();
							}
						}

						if (breakAfterAcquireFailure) {
							lock.lock();
							try {
								createTaskCount--;
							} finally {
								lock.unlock();
							}
							return;
						}

						this.errorCount = 0; // reset errorCount
						if (closing || closed) {
							createTaskCount--;
							return;
						}
						createSchedulerFuture = createScheduler.schedule(this, timeBetweenConnectErrorMillis,
								TimeUnit.MILLISECONDS);
						return;
					}
				} catch (RuntimeException e) {
					LOG.error("create connection RuntimeException", e);
					// unknow fatal exception
					setFailContinuous(true);
					continue;
				} catch (Error e) {
					lock.lock();
					try {
						createTaskCount--;
					} finally {
						lock.unlock();
					}
					LOG.error("create connection Error", e);
					// unknow fatal exception
					setFailContinuous(true);
					break;
				} catch (Throwable e) {
					LOG.error("create connection unexecpted error.", e);
					break;
				}

				if (physicalConnection == null) {
					continue;
				}

				boolean result = put(physicalConnection);
				if (!result) {
					JdbcUtils.close(physicalConnection.getPhysicalConnection());
					LOG.info("put physical connection to pool failed.");
				}
				break;
			}
		}
	}

	public class CreateConnectionThread extends Thread {//创建连接线程

		public CreateConnectionThread(String name) {
			super(name);
			this.setDaemon(true);//一个守护线程
		}

		public void run() {
			initedLatch.countDown();

			long lastDiscardCount = 0;
			int errorCount = 0;
			for (;;) {
				// addLast
				try {
					lock.lockInterruptibly();
				} catch (InterruptedException e2) {
					break;
				}

				long discardCount = DruidDataSource.this.discardCount;
				boolean discardChanged = discardCount - lastDiscardCount > 0;
				lastDiscardCount = discardCount;
				
				try {
					boolean emptyWait = true;//要不要等待，链接太多了就要等待。
					if (createError != null && poolingCount == 0 && !discardChanged) {
						emptyWait = false;
					}
					if (emptyWait && asyncInit && createCount < initialSize) {
						emptyWait = false;
					}
					if (emptyWait) {
						// 必须存在线程等待，才创建连接
						//连接池内的连接个数大于等待的线程数量，如果keepAlive为true的话，
						//还需要考虑池外、池内的连接数，其中activeCount在返回连接时+1，回收链接时-1，也就是池外的连接数
						if (poolingCount >= notEmptyWaitThreadCount //
								&& !(keepAlive && activeCount + poolingCount < minIdle)) {
							empty.await();
						}
						// 防止创建超过maxActive数量的连接
						//如果池内、池外的连接数大于maxActive，也进入await
						if (activeCount + poolingCount >= maxActive) {
							empty.await();//连接太多了的时候，在empty条件上等待，等着空。生产者。
							continue;
						}
					}

				} catch (InterruptedException e) {
					lastCreateError = e;
					lastErrorTimeMillis = System.currentTimeMillis();

					if (!closing) {
						LOG.error("create connection Thread Interrupted, url: " + jdbcUrl, e);
					}
					break;
				} finally {
					lock.unlock();
				}
				//不需要等待，连接不多
				PhysicalConnectionInfo connection = null;
				try {
					connection = createPhysicalConnection();//创建一个物理连接
					setFailContinuous(false);
				} catch (SQLException e) {
					LOG.error("create connection SQLException, url: " + jdbcUrl + ", errorCode " + e.getErrorCode()
							+ ", state " + e.getSQLState(), e);
					errorCount++;
					if (errorCount > connectionErrorRetryAttempts && timeBetweenConnectErrorMillis > 0) {
						// fail over retry attempts
						setFailContinuous(true);
						if (failFast) {
							lock.lock();
							try {
								notEmpty.signalAll();
							} finally {
								lock.unlock();
							}
						}
						if (breakAfterAcquireFailure) {
							break;
						}
						try {
							Thread.sleep(timeBetweenConnectErrorMillis);
						} catch (InterruptedException interruptEx) {
							break;
						}
					}
				} catch (RuntimeException e) {
					LOG.error("create connection RuntimeException", e);
					setFailContinuous(true);
					continue;
				} catch (Error e) {
					LOG.error("create connection Error", e);
					setFailContinuous(true);
					break;
				}

				if (connection == null) {
					continue;
				}
				//创建连接成功
				boolean result = put(connection);//这个put很可能是放池子中，
				if (!result) {
					JdbcUtils.close(connection.getPhysicalConnection());
					LOG.info("put physical connection to pool failed.");
				}

				errorCount = 0; // reset errorCount
			}
		}
	}

	public class DestroyConnectionThread extends Thread {

		public DestroyConnectionThread(String name) {
			super(name);
			this.setDaemon(true);
		}

		public void run() {
			initedLatch.countDown();

			for (;;) {
				// 从前面开始删除
				try {
					if (closed) {
						break;
					}

					if (timeBetweenEvictionRunsMillis > 0) {
						Thread.sleep(timeBetweenEvictionRunsMillis);
					} else {
						Thread.sleep(1000); //
					}

					if (Thread.interrupted()) {
						break;
					}

					destroyTask.run();
				} catch (InterruptedException e) {
					break;
				}
			}
		}

	}

	public class DestroyTask implements Runnable {

		@Override
		public void run() {
			shrink(true, keepAlive);//有destroyTask.run();----->shrink(true);看名字是收缩嘛，可能连接空闲的太多了，就缩小呗。 

			if (isRemoveAbandoned()) {
				removeAbandoned();
			}
		}

	}

	public class LogStatsThread extends Thread {

		public LogStatsThread(String name) {
			super(name);
			this.setDaemon(true);
		}

		public void run() {
			try {
				for (;;) {
					try {
						logStats();
					} catch (Exception e) {
						LOG.error("logStats error", e);
					}

					Thread.sleep(timeBetweenLogStatsMillis);
				}
			} catch (InterruptedException e) {
				// skip
			}
		}
	}
	//针对连接池外的连接进行清理，可以避免数据库死锁导致连接无法被释放的问题
	public int removeAbandoned() {
		int removeCount = 0;

		long currrentNanos = System.nanoTime();

		List<DruidPooledConnection> abandonedList = new ArrayList<DruidPooledConnection>();

		activeConnectionLock.lock();
		try {
			Iterator<DruidPooledConnection> iter = activeConnections.keySet().iterator();

			for (; iter.hasNext();) {
				DruidPooledConnection pooledConnection = iter.next();
				// 判断是否在running，这个是由Filter为其设置值
				if (pooledConnection.isRunning()) {
					continue;
				}
				// 如果连接时间超过了removeAbandonedTimeoutMillis值，则进行后续的废弃操作
				long timeMillis = (currrentNanos - pooledConnection.getConnectedTimeNano()) / (1000 * 1000);

				if (timeMillis >= removeAbandonedTimeoutMillis) {
					iter.remove();
					pooledConnection.setTraceEnable(false);
					abandonedList.add(pooledConnection);
				}
			}
		} finally {
			activeConnectionLock.unlock();
		}
		// 如果连接是disable的，则会关闭连接，否则不进行处理
		if (abandonedList.size() > 0) {
			for (DruidPooledConnection pooledConnection : abandonedList) {
				final ReentrantLock lock = pooledConnection.lock;
				lock.lock();
				try {
					if (pooledConnection.isDisable()) {
						continue;
					}
				} finally {
					lock.unlock();
				}

				JdbcUtils.close(pooledConnection);
				pooledConnection.abandond();
				removeAbandonedCount++;
				removeCount++;

				if (isLogAbandoned()) {
					StringBuilder buf = new StringBuilder();
					buf.append("abandon connection, owner thread: ");
					buf.append(pooledConnection.getOwnerThread().getName());
					buf.append(", connected at : ");
					buf.append(pooledConnection.getConnectedTimeMillis());
					buf.append(", open stackTrace\n");

					StackTraceElement[] trace = pooledConnection.getConnectStackTrace();
					for (int i = 0; i < trace.length; i++) {
						buf.append("\tat ");
						buf.append(trace[i].toString());
						buf.append("\n");
					}

					buf.append("ownerThread current state is " + pooledConnection.getOwnerThread().getState()
							+ ", current stackTrace\n");
					trace = pooledConnection.getOwnerThread().getStackTrace();
					for (int i = 0; i < trace.length; i++) {
						buf.append("\tat ");
						buf.append(trace[i].toString());
						buf.append("\n");
					}

					LOG.error(buf.toString());
				}
			}
		}

		return removeCount;
	}

	/** Instance key */
	protected String instanceKey = null;

	public Reference getReference() throws NamingException {
		final String className = getClass().getName();
		final String factoryName = className + "Factory"; // XXX: not robust
		Reference ref = new Reference(className, factoryName, null);
		ref.add(new StringRefAddr("instanceKey", instanceKey));
		ref.add(new StringRefAddr("url", this.getUrl()));
		ref.add(new StringRefAddr("username", this.getUsername()));
		ref.add(new StringRefAddr("password", this.getPassword()));
		// TODO ADD OTHER PROPERTIES
		return ref;
	}

	@Override
	public List<String> getFilterClassNames() {
		List<String> names = new ArrayList<String>();
		for (Filter filter : filters) {
			names.add(filter.getClass().getName());
		}
		return names;
	}

	public int getRawDriverMajorVersion() {
		int version = -1;
		if (this.driver != null) {
			version = driver.getMajorVersion();
		}
		return version;
	}

	public int getRawDriverMinorVersion() {
		int version = -1;
		if (this.driver != null) {
			version = driver.getMinorVersion();
		}
		return version;
	}

	public String getProperties() {
		Properties properties = new Properties();
		properties.putAll(connectProperties);
		if (properties.containsKey("password")) {
			properties.put("password", "******");
		}
		return properties.toString();
	}

	@Override
	public void shrink() {
		shrink(false, false);
	}

	public void shrink(boolean checkTime) {
		shrink(checkTime, keepAlive);
	}

	public void shrink(boolean checkTime, boolean keepAlive) {
		try {
			lock.lockInterruptibly();//清理之前加锁了的。所有的操作都是加锁了的。
		} catch (InterruptedException e) {
			return;
		}

		int evictCount = 0;
		int keepAliveCount = 0;
		try {
			if (!inited) {
				return;
			}
			// checkCount代表超过minIdle的连接数
			final int checkCount = poolingCount - minIdle;//池中的数量-最小空闲数量，
			final long currentTimeMillis = System.currentTimeMillis();
			for (int i = 0; i < poolingCount; ++i) {// 清理连接池内的连接
				DruidConnectionHolder connection = connections[i];

				if (checkTime) {
					//检测物理连接时间限制，从创建时间作为起始点
					//phyTimeoutMillis：连接最大存活时间，默认是-1(不限制物理连接时间)，从创建连接开始计算，如果超过该时间，则会被清理
					if (phyTimeoutMillis > 0) {
						//如果是指定了phyTimeoutMillis，并且创建时间大于该值，则直接回收
						long phyConnectTimeMillis = currentTimeMillis - connection.connectTimeMillis;
						if (phyConnectTimeMillis > phyTimeoutMillis) {
							evictConnections[evictCount++] = connection;//可回收的都放在这里了  
							continue;
						}
					}

					long idleMillis = currentTimeMillis - connection.lastActiveTimeMillis;
					//如果上次激活时间小于minEvictableIdleTimeMillis值则不进行处理
					 // 上次激活时间小于minEvictableIdleTimeMillis值则不管它
					if (idleMillis < minEvictableIdleTimeMillis) {//空闲时间
						break;
					}

					if (checkTime && i < checkCount) {
						//// 如果上次激活时间小于minEvictableIdleTimeMillis，则清理掉checkCount个连接(需要维持在minIdle个连接)
						evictConnections[evictCount++] = connection;//可回收的都放在这里了  
					} else if (idleMillis > maxEvictableIdleTimeMillis) {
						// 超过了maxEvictableIdleTimeMillis直接清理
						evictConnections[evictCount++] = connection;//可回收的都放在这里了  
					} else if (keepAlive) {
						 // 介于minEvictableIdleTimeMillis和maxEvictableIdleTimeMillis之间，并且keepAlive是true，则需要keepAlive
						keepAliveConnections[keepAliveCount++] = connection;
					}
				} else {
					if (i < checkCount) {
						evictConnections[evictCount++] = connection;
					} else {
						break;
					}
				}
			}
			// 把不需要的连接干掉，包括keepAlive的连接，keepAlive的连接在下面会重新put到连接池中
			int removeCount = evictCount + keepAliveCount;
			if (removeCount > 0) {
				//然后对connections连接池数组进行copy，清理掉无效的连接以及需要keepAlive的连接；
				System.arraycopy(connections, removeCount, connections, 0, poolingCount - removeCount);
				Arrays.fill(connections, poolingCount - removeCount, poolingCount, null);
				poolingCount -= removeCount;
			}
			keepAliveCheckCount += keepAliveCount;
		} finally {
			lock.unlock();
		}

		if (evictCount > 0) {// 关闭失效的连接
			for (int i = 0; i < evictCount; ++i) {
				DruidConnectionHolder item = evictConnections[i];//可回收的都放在这里了  
				Connection connection = item.getConnection();
				JdbcUtils.close(connection);//关闭这些连接了。  
				destroyCountUpdater.incrementAndGet(this);
			}
			Arrays.fill(evictConnections, null);
		}
		// 处理需要保持存活的连接，首先要调用validateConnection校验是否有效
	    // 如果有效，则更新其lastActiveTimeMillis时间，并且放回连接池中
	    // 如果失效，则直接关闭连接
		if (keepAliveCount > 0) {
			this.getDataSourceStat().addKeepAliveCheckCount(keepAliveCount);
			// keep order
			for (int i = keepAliveCount - 1; i >= 0; --i) {
				DruidConnectionHolder holer = keepAliveConnections[i];
				Connection connection = holer.getConnection();
				holer.incrementKeepAliveCheckCount();

				boolean validate = false;
				try {
					this.validateConnection(connection);
					validate = true;
				} catch (Throwable error) {
					if (LOG.isDebugEnabled()) {
						LOG.debug("keepAliveErr", error);
					}
					// skip
				}

				if (validate) {
					holer.lastActiveTimeMillis = System.currentTimeMillis();
					put(holer);
				} else {
					JdbcUtils.close(connection);
				}
			}// 清空数据，避免内存泄露
			Arrays.fill(keepAliveConnections, null);
		}
	}

	public int getWaitThreadCount() {
		lock.lock();
		try {
			return lock.getWaitQueueLength(notEmpty);
		} finally {
			lock.unlock();
		}
	}

	public long getNotEmptyWaitCount() {
		return notEmptyWaitCount;
	}

	public int getNotEmptyWaitThreadCount() {
		lock.lock();
		try {
			return notEmptyWaitThreadCount;
		} finally {
			lock.unlock();
		}
	}

	public int getNotEmptyWaitThreadPeak() {
		lock.lock();
		try {
			return notEmptyWaitThreadPeak;
		} finally {
			lock.unlock();
		}
	}

	public long getNotEmptySignalCount() {
		return notEmptySignalCount;
	}

	public long getNotEmptyWaitMillis() {
		return notEmptyWaitNanos / (1000 * 1000);
	}

	public long getNotEmptyWaitNanos() {
		return notEmptyWaitNanos;
	}

	public int getLockQueueLength() {
		return lock.getQueueLength();
	}

	public int getActivePeak() {
		return activePeak;
	}

	public Date getActivePeakTime() {
		if (activePeakTime <= 0) {
			return null;
		}

		return new Date(activePeakTime);
	}

	public String dump() {
		lock.lock();
		try {
			return this.toString();
		} finally {
			lock.unlock();
		}
	}

	public long getErrorCount() {
		return this.errorCount;
	}

	public String toString() {
		StringBuilder buf = new StringBuilder();

		buf.append("{");

		buf.append("\n\tCreateTime:\"");
		buf.append(Utils.toString(getCreatedTime()));
		buf.append("\"");

		buf.append(",\n\tActiveCount:");
		buf.append(getActiveCount());

		buf.append(",\n\tPoolingCount:");
		buf.append(getPoolingCount());

		buf.append(",\n\tCreateCount:");
		buf.append(getCreateCount());

		buf.append(",\n\tDestroyCount:");
		buf.append(getDestroyCount());

		buf.append(",\n\tCloseCount:");
		buf.append(getCloseCount());

		buf.append(",\n\tConnectCount:");
		buf.append(getConnectCount());

		buf.append(",\n\tConnections:[");
		for (int i = 0; i < poolingCount; ++i) {
			DruidConnectionHolder conn = connections[i];
			if (conn != null) {
				if (i != 0) {
					buf.append(",");
				}
				buf.append("\n\t\t");
				buf.append(conn.toString());
			}
		}
		buf.append("\n\t]");

		buf.append("\n}");

		if (this.isPoolPreparedStatements()) {
			buf.append("\n\n[");
			for (int i = 0; i < poolingCount; ++i) {
				DruidConnectionHolder conn = connections[i];
				if (conn != null) {
					if (i != 0) {
						buf.append(",");
					}
					buf.append("\n\t{\n\tID:");
					buf.append(System.identityHashCode(conn.getConnection()));
					PreparedStatementPool pool = conn.getStatementPool();

					buf.append(", \n\tpoolStatements:[");

					int entryIndex = 0;
					try {
						for (Map.Entry<PreparedStatementKey, PreparedStatementHolder> entry : pool.getMap()
								.entrySet()) {
							if (entryIndex != 0) {
								buf.append(",");
							}
							buf.append("\n\t\t{hitCount:");
							buf.append(entry.getValue().getHitCount());
							buf.append(",sql:\"");
							buf.append(entry.getKey().getSql());
							buf.append("\"");
							buf.append("\t}");

							entryIndex++;
						}
					} catch (ConcurrentModificationException e) {
						// skip ..
					}

					buf.append("\n\t\t]");

					buf.append("\n\t}");
				}
			}
			buf.append("\n]");
		}

		return buf.toString();
	}

	public List<Map<String, Object>> getPoolingConnectionInfo() {
		List<Map<String, Object>> list = new ArrayList<Map<String, Object>>();
		lock.lock();
		try {
			for (int i = 0; i < poolingCount; ++i) {
				DruidConnectionHolder connHolder = connections[i];
				Connection conn = connHolder.getConnection();

				Map<String, Object> map = new LinkedHashMap<String, Object>();
				map.put("id", System.identityHashCode(conn));
				map.put("connectionId", connHolder.getConnectionId());
				map.put("useCount", connHolder.getUseCount());
				if (connHolder.lastActiveTimeMillis > 0) {
					map.put("lastActiveTime", new Date(connHolder.lastActiveTimeMillis));
				}
				map.put("connectTime", new Date(connHolder.getTimeMillis()));
				map.put("holdability", connHolder.getUnderlyingHoldability());
				map.put("transactionIsolation", connHolder.getUnderlyingTransactionIsolation());
				map.put("autoCommit", connHolder.underlyingAutoCommit);
				map.put("readoOnly", connHolder.isUnderlyingReadOnly());

				if (connHolder.isPoolPreparedStatements()) {
					List<Map<String, Object>> stmtCache = new ArrayList<Map<String, Object>>();
					PreparedStatementPool stmtPool = connHolder.getStatementPool();
					for (PreparedStatementHolder stmtHolder : stmtPool.getMap().values()) {
						Map<String, Object> stmtInfo = new LinkedHashMap<String, Object>();

						stmtInfo.put("sql", stmtHolder.key.getSql());
						stmtInfo.put("defaultRowPrefetch", stmtHolder.getDefaultRowPrefetch());
						stmtInfo.put("rowPrefetch", stmtHolder.getRowPrefetch());
						stmtInfo.put("hitCount", stmtHolder.getHitCount());

						stmtCache.add(stmtInfo);
					}

					map.put("pscache", stmtCache);
				}
				map.put("keepAliveCheckCount", connHolder.getKeepAliveCheckCount());

				list.add(map);
			}
		} finally {
			lock.unlock();
		}
		return list;
	}

	public void logTransaction(TransactionInfo info) {
		long transactionMillis = info.getEndTimeMillis() - info.getStartTimeMillis();
		if (transactionThresholdMillis > 0 && transactionMillis > transactionThresholdMillis) {
			StringBuilder buf = new StringBuilder();
			buf.append("long time transaction, take ");
			buf.append(transactionMillis);
			buf.append(" ms : ");
			for (String sql : info.getSqlList()) {
				buf.append(sql);
				buf.append(";");
			}
			LOG.error(buf.toString(), new TransactionTimeoutException());
		}
	}

	@Override
	public String getVersion() {
		return VERSION.getVersionNumber();
	}

	@Override
	public JdbcDataSourceStat getDataSourceStat() {
		return dataSourceStat;
	}

	public Object clone() throws CloneNotSupportedException {
		return cloneDruidDataSource();
	}

	public DruidDataSource cloneDruidDataSource() {
		DruidDataSource x = new DruidDataSource();

		cloneTo(x);

		return x;
	}

	public Map<String, Object> getStatDataForMBean() {
		try {
			Map<String, Object> map = new HashMap<String, Object>();

			// 0 - 4
			map.put("Name", this.getName());
			map.put("URL", this.getUrl());
			map.put("CreateCount", this.getCreateCount());
			map.put("DestroyCount", this.getDestroyCount());
			map.put("ConnectCount", this.getConnectCount());

			// 5 - 9
			map.put("CloseCount", this.getCloseCount());
			map.put("ActiveCount", this.getActiveCount());
			map.put("PoolingCount", this.getPoolingCount());
			map.put("LockQueueLength", this.getLockQueueLength());
			map.put("WaitThreadCount", this.getNotEmptyWaitThreadPeak());

			// 10 - 14
			map.put("InitialSize", this.getInitialSize());
			map.put("MaxActive", this.getMaxActive());
			map.put("MinIdle", this.getMinIdle());
			map.put("PoolPreparedStatements", this.isPoolPreparedStatements());
			map.put("TestOnBorrow", this.isTestOnBorrow());

			// 15 - 19
			map.put("TestOnReturn", this.isTestOnReturn());
			map.put("MinEvictableIdleTimeMillis", this.minEvictableIdleTimeMillis);
			map.put("ConnectErrorCount", this.getConnectErrorCount());
			map.put("CreateTimespanMillis", this.getCreateTimespanMillis());
			map.put("DbType", this.dbType);

			// 20 - 24
			map.put("ValidationQuery", this.getValidationQuery());
			map.put("ValidationQueryTimeout", this.getValidationQueryTimeout());
			map.put("DriverClassName", this.getDriverClassName());
			map.put("Username", this.getUsername());
			map.put("RemoveAbandonedCount", this.getRemoveAbandonedCount());

			// 25 - 29
			map.put("NotEmptyWaitCount", this.getNotEmptyWaitCount());
			map.put("NotEmptyWaitNanos", this.getNotEmptyWaitNanos());
			map.put("ErrorCount", this.getErrorCount());
			map.put("ReusePreparedStatementCount", this.getCachedPreparedStatementHitCount());
			map.put("StartTransactionCount", this.getStartTransactionCount());

			// 30 - 34
			map.put("CommitCount", this.getCommitCount());
			map.put("RollbackCount", this.getRollbackCount());
			map.put("LastError", JMXUtils.getErrorCompositeData(this.getLastError()));
			map.put("LastCreateError", JMXUtils.getErrorCompositeData(this.getLastCreateError()));
			map.put("PreparedStatementCacheDeleteCount", this.getCachedPreparedStatementDeleteCount());

			// 35 - 39
			map.put("PreparedStatementCacheAccessCount", this.getCachedPreparedStatementAccessCount());
			map.put("PreparedStatementCacheMissCount", this.getCachedPreparedStatementMissCount());
			map.put("PreparedStatementCacheHitCount", this.getCachedPreparedStatementHitCount());
			map.put("PreparedStatementCacheCurrentCount", this.getCachedPreparedStatementCount());
			map.put("Version", this.getVersion());

			// 40 -
			map.put("LastErrorTime", this.getLastErrorTime());
			map.put("LastCreateErrorTime", this.getLastCreateErrorTime());
			map.put("CreateErrorCount", this.getCreateErrorCount());
			map.put("DiscardCount", this.getDiscardCount());
			map.put("ExecuteQueryCount", this.getExecuteQueryCount());

			map.put("ExecuteUpdateCount", this.getExecuteUpdateCount());

			return map;
		} catch (JMException ex) {
			throw new IllegalStateException("getStatData error", ex);
		}
	}

	public Map<String, Object> getStatData() {
		final int activeCount;
		final int activePeak;
		final Date activePeakTime;

		final int poolingCount;
		final int poolingPeak;
		final Date poolingPeakTime;

		final long connectCount;
		final long closeCount;

		lock.lock();
		try {
			poolingCount = this.poolingCount;
			poolingPeak = this.poolingPeak;
			poolingPeakTime = this.getPoolingPeakTime();

			activeCount = this.activeCount;
			activePeak = this.activePeak;
			activePeakTime = this.getActivePeakTime();

			connectCount = this.connectCount;
			closeCount = this.closeCount;
		} finally {
			lock.unlock();
		}
		Map<String, Object> dataMap = new LinkedHashMap<String, Object>();

		dataMap.put("Identity", System.identityHashCode(this));
		dataMap.put("Name", this.getName());
		dataMap.put("DbType", this.dbType);
		dataMap.put("DriverClassName", this.getDriverClassName());

		dataMap.put("URL", this.getUrl());
		dataMap.put("UserName", this.getUsername());
		dataMap.put("FilterClassNames", this.getFilterClassNames());

		dataMap.put("WaitThreadCount", this.getWaitThreadCount());
		dataMap.put("NotEmptyWaitCount", this.getNotEmptyWaitCount());
		dataMap.put("NotEmptyWaitMillis", this.getNotEmptyWaitMillis());

		dataMap.put("PoolingCount", poolingCount);
		dataMap.put("PoolingPeak", poolingPeak);
		dataMap.put("PoolingPeakTime", poolingPeakTime);

		dataMap.put("ActiveCount", activeCount);
		dataMap.put("ActivePeak", activePeak);
		dataMap.put("ActivePeakTime", activePeakTime);

		dataMap.put("InitialSize", this.getInitialSize());
		dataMap.put("MinIdle", this.getMinIdle());
		dataMap.put("MaxActive", this.getMaxActive());

		dataMap.put("QueryTimeout", this.getQueryTimeout());
		dataMap.put("TransactionQueryTimeout", this.getTransactionQueryTimeout());
		dataMap.put("LoginTimeout", this.getLoginTimeout());
		dataMap.put("ValidConnectionCheckerClassName", this.getValidConnectionCheckerClassName());
		dataMap.put("ExceptionSorterClassName", this.getExceptionSorterClassName());

		dataMap.put("TestOnBorrow", this.isTestOnBorrow());
		dataMap.put("TestOnReturn", this.isTestOnReturn());
		dataMap.put("TestWhileIdle", this.isTestWhileIdle());

		dataMap.put("DefaultAutoCommit", this.isDefaultAutoCommit());
		dataMap.put("DefaultReadOnly", this.getDefaultReadOnly());
		dataMap.put("DefaultTransactionIsolation", this.getDefaultTransactionIsolation());

		dataMap.put("LogicConnectCount", connectCount);
		dataMap.put("LogicCloseCount", closeCount);
		dataMap.put("LogicConnectErrorCount", this.getConnectErrorCount());

		dataMap.put("PhysicalConnectCount", this.getCreateCount());
		dataMap.put("PhysicalCloseCount", this.getDestroyCount());
		dataMap.put("PhysicalConnectErrorCount", this.getCreateErrorCount());

		dataMap.put("ExecuteCount", this.getExecuteCount());
		dataMap.put("ExecuteUpdateCount", this.getExecuteUpdateCount());
		dataMap.put("ExecuteQueryCount", this.getExecuteQueryCount());
		dataMap.put("ExecuteBatchCount", this.getExecuteBatchCount());
		dataMap.put("ErrorCount", this.getErrorCount());
		dataMap.put("CommitCount", this.getCommitCount());
		dataMap.put("RollbackCount", this.getRollbackCount());

		dataMap.put("PSCacheAccessCount", this.getCachedPreparedStatementAccessCount());
		dataMap.put("PSCacheHitCount", this.getCachedPreparedStatementHitCount());
		dataMap.put("PSCacheMissCount", this.getCachedPreparedStatementMissCount());

		dataMap.put("StartTransactionCount", this.getStartTransactionCount());
		dataMap.put("TransactionHistogram", this.getTransactionHistogramValues());

		dataMap.put("ConnectionHoldTimeHistogram", this.getDataSourceStat().getConnectionHoldHistogram().toArray());
		dataMap.put("RemoveAbandoned", this.isRemoveAbandoned());
		dataMap.put("ClobOpenCount", this.getDataSourceStat().getClobOpenCount());
		dataMap.put("BlobOpenCount", this.getDataSourceStat().getBlobOpenCount());
		dataMap.put("KeepAliveCheckCount", this.getDataSourceStat().getKeepAliveCheckCount());

		dataMap.put("KeepAlive", this.isKeepAlive());
		dataMap.put("FailFast", this.isFailFast());
		dataMap.put("MaxWait", this.getMaxWait());
		dataMap.put("MaxWaitThreadCount", this.getMaxWaitThreadCount());
		dataMap.put("PoolPreparedStatements", this.isPoolPreparedStatements());
		dataMap.put("MaxPoolPreparedStatementPerConnectionSize", this.getMaxPoolPreparedStatementPerConnectionSize());
		dataMap.put("MinEvictableIdleTimeMillis", this.minEvictableIdleTimeMillis);
		dataMap.put("MaxEvictableIdleTimeMillis", this.maxEvictableIdleTimeMillis);

		dataMap.put("LogDifferentThread", isLogDifferentThread());
		dataMap.put("RecycleErrorCount", getRecycleErrorCount());
		dataMap.put("PreparedStatementOpenCount", getPreparedStatementCount());
		dataMap.put("PreparedStatementClosedCount", getClosedPreparedStatementCount());

		dataMap.put("UseUnfairLock", isUseUnfairLock());
		dataMap.put("InitGlobalVariants", isInitGlobalVariants());
		dataMap.put("InitVariants", isInitVariants());
		return dataMap;
	}

	public JdbcSqlStat getSqlStat(int sqlId) {
		return this.getDataSourceStat().getSqlStat(sqlId);
	}

	public JdbcSqlStat getSqlStat(long sqlId) {
		return this.getDataSourceStat().getSqlStat(sqlId);
	}

	public Map<String, JdbcSqlStat> getSqlStatMap() {
		return this.getDataSourceStat().getSqlStatMap();
	}

	public Map<String, Object> getWallStatMap() {
		WallProviderStatValue wallStatValue = getWallStatValue(false);

		if (wallStatValue != null) {
			return wallStatValue.toMap();
		}

		return null;
	}

	public WallProviderStatValue getWallStatValue(boolean reset) {
		for (Filter filter : this.filters) {
			if (filter instanceof WallFilter) {
				WallFilter wallFilter = (WallFilter) filter;
				return wallFilter.getProvider().getStatValue(reset);
			}
		}

		return null;
	}

	public Lock getLock() {
		return lock;
	}

	@Override
	public boolean isWrapperFor(Class<?> iface) {
		for (Filter filter : this.filters) {
			if (filter.isWrapperFor(iface)) {
				return true;
			}
		}

		if (this.statLogger != null
				&& (this.statLogger.getClass() == iface || DruidDataSourceStatLogger.class == iface)) {
			return true;
		}

		return super.isWrapperFor(iface);
	}

	@SuppressWarnings("unchecked")
	public <T> T unwrap(Class<T> iface) {
		for (Filter filter : this.filters) {
			if (filter.isWrapperFor(iface)) {
				return (T) filter;
			}
		}

		if (this.statLogger != null
				&& (this.statLogger.getClass() == iface || DruidDataSourceStatLogger.class == iface)) {
			return (T) statLogger;
		}

		return super.unwrap(iface);
	}

	public boolean isLogDifferentThread() {
		return logDifferentThread;
	}

	public void setLogDifferentThread(boolean logDifferentThread) {
		this.logDifferentThread = logDifferentThread;
	}

	public DruidPooledConnection tryGetConnection() throws SQLException {
		if (poolingCount == 0) {
			return null;
		}
		return getConnection();
	}

	@Override
	public int fill() throws SQLException {
		return this.fill(this.maxActive);
	}

	@Override
	public int fill(int toCount) throws SQLException {
		if (closed) {
			throw new DataSourceClosedException("dataSource already closed at " + new Date(closeTimeMillis));
		}

		if (toCount < 0) {
			throw new IllegalArgumentException("toCount can't not be less than zero");
		}

		init();

		if (toCount > this.maxActive) {
			toCount = this.maxActive;
		}

		int fillCount = 0;
		for (;;) {
			try {
				lock.lockInterruptibly();
			} catch (InterruptedException e) {
				connectErrorCountUpdater.incrementAndGet(this);
				throw new SQLException("interrupt", e);
			}

			boolean fillable = this.isFillable(toCount);

			lock.unlock();

			if (!fillable) {
				break;
			}

			DruidConnectionHolder holder;
			try {
				PhysicalConnectionInfo pyConnInfo = createPhysicalConnection();
				holder = new DruidConnectionHolder(this, pyConnInfo);
			} catch (SQLException e) {
				LOG.error("fill connection error, url: " + this.jdbcUrl, e);
				connectErrorCountUpdater.incrementAndGet(this);
				throw e;
			}

			try {
				lock.lockInterruptibly();
			} catch (InterruptedException e) {
				connectErrorCountUpdater.incrementAndGet(this);
				throw new SQLException("interrupt", e);
			}

			try {
				if (!this.isFillable(toCount)) {
					JdbcUtils.close(holder.getConnection());
					LOG.info("fill connections skip.");
					break;
				}
				this.putLast(holder, System.currentTimeMillis());
				fillCount++;
			} finally {
				lock.unlock();
			}
		}

		if (LOG.isInfoEnabled()) {
			LOG.info("fill " + fillCount + " connections");
		}

		return fillCount;
	}

	private boolean isFillable(int toCount) {
		int currentCount = this.poolingCount + this.activeCount;
		if (currentCount >= toCount || currentCount >= this.maxActive) {
			return false;
		} else {
			return true;
		}
	}

	public boolean isFull() {
		lock.lock();
		try {
			return this.poolingCount + this.activeCount >= this.maxActive;
		} finally {
			lock.unlock();
		}
	}

	private void emptySignal() {
		if (createScheduler == null) {// 如果不存在createScheduler，则唤醒创建线程
			empty.signal();
			return;
		}

		if (createTaskCount >= maxCreateTaskCount) {
			return;
		}

		if (activeCount + poolingCount + createTaskCount >= maxActive) {
			return;
		}

		createTaskCount++;
		CreateConnectionTask task = new CreateConnectionTask();
		this.createSchedulerFuture = createScheduler.submit(task);
	}

	@Override
	public ObjectName preRegister(MBeanServer server, ObjectName name) throws Exception {
		if (server != null) {
			try {
				if (server.isRegistered(name)) {
					server.unregisterMBean(name);
				}
			} catch (Exception ex) {
				LOG.warn("DruidDataSource preRegister error", ex);
			}
		}
		return name;
	}

	@Override
	public void postRegister(Boolean registrationDone) {

	}

	@Override
	public void preDeregister() throws Exception {

	}

	@Override
	public void postDeregister() {

	}

	public boolean isClosed() {
		return this.closed;
	}
}
