package com.reger.datasource.core;

import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;

import javax.sql.DataSource;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.util.Assert;
import org.springframework.util.StringUtils;

import com.alibaba.druid.pool.DruidDataSource;
import com.reger.datasource.properties.DataSourceProperties;
import com.reger.datasource.properties.DruidProperties;
import com.reger.datasource.properties.MybatisNodeProperties;
import com.reger.datasource.properties.ShardingProperties;

import io.shardingjdbc.core.api.MasterSlaveDataSourceFactory;
import io.shardingjdbc.core.api.ShardingDataSourceFactory;
import io.shardingjdbc.core.api.config.MasterSlaveRuleConfiguration;
import io.shardingjdbc.core.api.config.ShardingRuleConfiguration;
import io.shardingjdbc.core.yaml.sharding.YamlTableRuleConfiguration;

class DataSourceBulider {
	
	private static final Logger log = LoggerFactory.getLogger(DataSourceBulider.class);
	private static final Map<String,DruidDataSource> dataSourceAll=new ConcurrentHashMap<>();
	private static ScheduledExecutorService executorService= Executors.newScheduledThreadPool(4,new ThreadFactory() {
		@Override
		public Thread newThread(Runnable target) {
			Thread thread=new Thread(target);
			thread.setName("Druid-Scheduled-"+thread.getId());
			return thread;
		}
	});
	public static final void stop() throws InterruptedException {
		Iterator<DruidDataSource> it = dataSourceAll.values().iterator();
		while (it.hasNext()) {
			DruidDataSource dataSource = it.next();
			dataSource.close();
		}
		executorService.shutdownNow();
		if(executorService.awaitTermination(30, TimeUnit.SECONDS)) {
			if(!executorService.isShutdown()) {
				log.error("未在30秒内成功关闭Druid管理线程池...");
				executorService.shutdown();
			}
		}
	}

	public static final DataSource bulid(MybatisNodeProperties nodeProperties, DruidProperties defaultProperties )throws SQLException {
		return new ReferenceDataSource() {
			@Override
			DataSource getReferenceDataSource(){
				try {
					return _bulid(nodeProperties, defaultProperties);
				} catch (SQLException e) {
					throw new RuntimeException(e);
				}
			}
		}; 
	}
	
	public static final DataSource _bulid(MybatisNodeProperties nodeProperties, DruidProperties defaultProperties )throws SQLException {
		Map<String, DataSourceProperties> dataSourcePropertiess = nodeProperties.getDataSources();
		Assert.notEmpty(dataSourcePropertiess, "数据源列表不可以为空");
		Map<String, DataSource> dataSourceMap = getDataSourceMap(dataSourcePropertiess, defaultProperties);
		Map<String, Object> configMap = nodeProperties.getConfigMap();
		if (nodeProperties.getSharding() == null && dataSourcePropertiess.size() == 1) {
			if(dataSourceMap.size()==1) {
				return dataSourceMap.values().iterator().next();
			}
			Collection<MasterSlaveRuleConfiguration> masterSlaveRuleConfigs=getMasterSlaveRuleConfigs(dataSourcePropertiess);
			return MasterSlaveDataSourceFactory.createDataSource(dataSourceMap, masterSlaveRuleConfigs.iterator().next(),configMap);
		}else {
			ShardingRuleConfiguration shardingRuleConfiguration = getShardingRuleConfiguration(nodeProperties);
			return ShardingDataSourceFactory.createDataSource(dataSourceMap, shardingRuleConfiguration,configMap, nodeProperties.getProps());
		}
	}

	private static final ShardingRuleConfiguration getShardingRuleConfiguration(MybatisNodeProperties nodeProperties)
			throws SQLException {
		ShardingRuleConfiguration result = new ShardingRuleConfiguration();
		ShardingProperties sharding = nodeProperties.getSharding();
		if(sharding!=null) {
			result.setDefaultDataSourceName(sharding.getDefaultDataSourceName());
			for (Map.Entry<String, YamlTableRuleConfiguration> entry : sharding.getTables().entrySet()) {
				YamlTableRuleConfiguration tableRuleConfig = entry.getValue();
				tableRuleConfig.setLogicTable(entry.getKey());
				result.getTableRuleConfigs().add(tableRuleConfig.build());
			}
			result.getBindingTableGroups().addAll(sharding.getBindingTables());
			if (null != sharding.getDefaultDatabaseStrategy()) {
				result.setDefaultDatabaseShardingStrategyConfig(sharding.getDefaultDatabaseStrategy().build());
			}
			if (null != sharding.getDefaultTableStrategy()) {
				result.setDefaultTableShardingStrategyConfig(sharding.getDefaultTableStrategy().build());
			}
			result.setDefaultKeyGeneratorClass(sharding.getDefaultKeyGeneratorClass());
		}
		Collection<MasterSlaveRuleConfiguration> masterSlaveRuleConfigs=getMasterSlaveRuleConfigs(nodeProperties.getDataSources());
		result.setMasterSlaveRuleConfigs(masterSlaveRuleConfigs);
		return result;
	}

	
	private static Map<String, DataSource> getDataSourceMap(Map<String, DataSourceProperties> dataSources,DruidProperties defaultProperties) throws SQLException {
		Iterator<Entry<String, DataSourceProperties>> it = dataSources.entrySet().iterator();
		Map<String, DataSource> dataSourceMap = new HashMap<String, DataSource>();
		while (it.hasNext()) {
			Map.Entry<String, DataSourceProperties> entry = (Map.Entry<String, DataSourceProperties>) it.next();
			String dsName = entry.getKey();
			String masterName=dsName + "-master";
			dataSourceMap.put(masterName, createDataSource( entry.getValue().getMaster(), masterName, defaultProperties));
			List<DruidProperties> slaves = entry.getValue().getSlaves();
			if(slaves!=null) {
				for (int i = 0; i < slaves.size(); i++) {
					String name=dsName + "-slave-" + i;
					DruidProperties slaveProperties = slaves.get(i);
					dataSourceMap.put(name, createDataSource(slaveProperties, name, defaultProperties));
				}
			}
		}
		return dataSourceMap;
	}
	
	private static DataSource createDataSource(DruidProperties druidProperties,String defaultName,DruidProperties defaultProperties) throws SQLException {
		if(StringUtils.isEmpty(druidProperties.getRefName())) {
			if(StringUtils.isEmpty(druidProperties.getName())) {
				druidProperties.name(defaultName);
			}
			DruidDataSource dataSource= druidProperties.merge(defaultProperties).defaultEmpty().createDataSource(executorService);
			dataSourceAll.put(dataSource.getName(), dataSource);
			if(StringUtils.isEmpty(druidProperties.getDbName())) {
				return dataSource;
			}
			return new TempDataSource(dataSource, druidProperties.getDbName());
		}
		Assert.hasText(druidProperties.getDbName(), "DB名字不可以为空");
		return new TempDataSource(dataSourceAll, druidProperties.getRefName(), druidProperties.getDbName());
	}
	
	private static final Collection<MasterSlaveRuleConfiguration> getMasterSlaveRuleConfigs(Map<String, DataSourceProperties> dataSources) {
		Collection<MasterSlaveRuleConfiguration> masterSlaveRuleConfigs = new LinkedList<>();
		for (Map.Entry<String, DataSourceProperties> each : dataSources.entrySet()) {
			MasterSlaveRuleConfiguration msRuleConfig = new MasterSlaveRuleConfiguration();
			String dsName = each.getKey();
			DataSourceProperties dataSourceProperties = each.getValue();
			msRuleConfig.setName(dsName);
			String masterName=dsName + "-master";
			msRuleConfig.setMasterDataSourceName(masterName);
			List<String> slaveDataSourceNames = new ArrayList<String>();
			List<DruidProperties> slaves =dataSourceProperties.getSlaves();
			if(slaves != null&&!slaves.isEmpty()) {
				for (int i = 0; i < slaves.size(); i++) {
					String name=dsName + "-slave-" + i;
					slaveDataSourceNames.add(name);
				}
			}else {
				slaveDataSourceNames.add(masterName);
			}
			msRuleConfig.setSlaveDataSourceNames(slaveDataSourceNames);
			msRuleConfig.setLoadBalanceAlgorithmType(dataSourceProperties.getLoadBalanceAlgorithmType());
			msRuleConfig.setLoadBalanceAlgorithmClassName(dataSourceProperties.getLoadBalanceAlgorithmClassName());
			masterSlaveRuleConfigs.add(msRuleConfig);
		}
		return masterSlaveRuleConfigs;
	}
}
