package com.horse.cloud.framework.mybatis.datasource;

import com.alibaba.druid.pool.DruidDataSource;
import com.horse.cloud.framework.mybatis.MybatisException;
import com.horse.cloud.framework.mybatis.config.ExpansionDatasourceConfig;
import org.apache.shardingsphere.api.config.masterslave.MasterSlaveRuleConfiguration;
import org.apache.shardingsphere.api.config.sharding.ShardingRuleConfiguration;
import org.apache.shardingsphere.api.config.sharding.TableRuleConfiguration;
import org.apache.shardingsphere.api.config.sharding.strategy.InlineShardingStrategyConfiguration;
import org.apache.shardingsphere.shardingjdbc.api.ShardingDataSourceFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.boot.context.properties.bind.Bindable;
import org.springframework.boot.context.properties.bind.Binder;
import org.springframework.context.EnvironmentAware;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.env.Environment;
import org.springframework.util.StringUtils;

import javax.sql.DataSource;
import java.sql.SQLException;
import java.util.*;

/**
 * @editor: intelliJ IDEA 16.1.3
 * @author: bing.Pan
 * @e-Mail: 15923508369@163.com
 * @gmDate: 2018/3/15/03/2018 14:20:下午2:20
 * @siDesc: mybatis整合shardingsphere
 */

@Configuration
public class MultiRouteDataSource  implements InitializingBean,EnvironmentAware {

    private static final Logger LOGGER = LoggerFactory.getLogger(MultiRouteDataSource.class);

    private Environment environment;

    private Map<String, DataSource> shardingDatasourceMap = new LinkedHashMap<>();
    private Map<String, String> masterSlaveDatasourceMap = new HashMap<>();

    private Map<String, String> shardingTableMap = new HashMap<>();
    private List<String> togetherTableList = new ArrayList<>();

    private String defaultDatabaseName;
    private String sqlShow;


    @Bean
    public DataSource getDataSource(){
        try {

            ShardingRuleConfiguration  shardingConfig  = new ShardingRuleConfiguration();

            // 封装有做分库分表的表结构信息
            List<TableRuleConfiguration> shardingTableRuleList = new LinkedList<>();
            shardingTableMap.forEach((key,val)->{
                String[] shardingValue = val.split("\\|");
                String[] databaseSharding = shardingValue[1].split(",");
                String[] tableSharding = shardingValue[2].split(",");

                TableRuleConfiguration shardingRule = new TableRuleConfiguration(key,shardingValue[0]);
                shardingRule.setDatabaseShardingStrategyConfig(new InlineShardingStrategyConfiguration(databaseSharding[0],databaseSharding[1]));
                shardingRule.setTableShardingStrategyConfig(new InlineShardingStrategyConfiguration(tableSharding[0],tableSharding[1]));
                shardingTableRuleList.add(shardingRule);
            });
            shardingConfig.getTableRuleConfigs().addAll(shardingTableRuleList);


            // 封装没有做分库分表的表结构信息
            List<TableRuleConfiguration> defaultTogetherRuleList = new LinkedList<>();
            togetherTableList.forEach(val ->{
                TableRuleConfiguration aggregationRule = new TableRuleConfiguration(val,defaultDatabaseName + "." + val);
                defaultTogetherRuleList.add(aggregationRule);

            });
            shardingConfig.getTableRuleConfigs().addAll(defaultTogetherRuleList);




            // 封装主从数据库配置
            List<MasterSlaveRuleConfiguration> masterSlaveRuleConfigurationList = new ArrayList<>();
            masterSlaveDatasourceMap.forEach((key,val) -> {
                MasterSlaveRuleConfiguration masterSlaveRuleConfiguration = new MasterSlaveRuleConfiguration(key,key,Arrays.asList(val.split(",")));
                masterSlaveRuleConfigurationList.add(masterSlaveRuleConfiguration);
            });
            shardingConfig.setMasterSlaveRuleConfigs(masterSlaveRuleConfigurationList);

            Properties properties = new Properties();
            properties.setProperty("sql.show",sqlShow);

            return ShardingDataSourceFactory.createDataSource(shardingDatasourceMap, shardingConfig, properties);
        } catch (SQLException e) {
            e.printStackTrace();
        }
        return null;
    }




    /**
     * 重写此方法将多个数据源注册到容器中
     *
     * @throws Exception
     */
    @Override
    public void afterPropertiesSet() throws Exception {

        initDatasourceInfo();

        initSharingInfo();

    }

    /**
     * 初始化分库字段信息
     */
    private void initSharingInfo() {
        Binder binder = Binder.get(environment);
        shardingTableMap = binder.bind("horse.data-table.sharing-table", Bindable.mapOf(String.class, String.class)).get();
        LOGGER.info("【framework-mybatisSharding】配置分表信息为[{}]",shardingTableMap);

        togetherTableList =  binder.bind("horse.data-table.together-table", Bindable.listOf(String.class)).get();
        LOGGER.info("【framework-mybatisSharding】配置聚合表信息为{}", togetherTableList);

        sqlShow =  binder.bind("horse.data-table.sql-show", Bindable.of(String.class)).get();

    }

    /**
     * 初始化数据源信息
     * @throws MybatisException
     */
    private void initDatasourceInfo() throws MybatisException {
        Binder binder = Binder.get(environment);

        List<String> databasesList = binder.bind("horse.multiple-data-source.databases-name", Bindable.listOf(String.class)).get();
        Map<String, String> slaveDatasourceMap = binder.bind("horse.multiple-data-source.master-slave-name", Bindable.mapOf(String.class, String.class)).get();
        String druidType = binder.bind("horse.multiple-data-source.type", Bindable.of(String.class)).get();
        List<String> connUrlList = binder.bind("horse.multiple-data-source.conn-url", Bindable.listOf(String.class)).get();
        List<String> userNameList = binder.bind("horse.multiple-data-source.user-name", Bindable.listOf(String.class)).get();
        List<String> passwordList = binder.bind("horse.multiple-data-source.password", Bindable.listOf(String.class)).get();
        String driveClassName = binder.bind("horse.multiple-data-source.drive-class-name", Bindable.of(String.class)).get();

        ExpansionDatasourceConfig expansionDatasource = binder.bind("horse.expansion-datasource", Bindable.of(ExpansionDatasourceConfig.class)).get();
        defaultDatabaseName =  binder.bind("horse.multiple-data-source.default-name", Bindable.of(String.class)).get();
        LOGGER.info("【framework-mybatisSharding】配置数据库分片集合为[{}]",databasesList);
        LOGGER.info("【framework-mybatisSharding】配置数据库主从集合为[{}]",slaveDatasourceMap);
        LOGGER.info("【framework-mybatisSharding】配置数据库连接池名称为[{}]",druidType);
        LOGGER.info("【framework-mybatisSharding】配置数据库地址集合为[{}]",connUrlList);
        LOGGER.info("【framework-mybatisSharding】配置数据库用户名集合为[{}]",userNameList);
        LOGGER.info("【framework-mybatisSharding】配置数据库密码集合个数为[{}]",passwordList.size());
        LOGGER.info("【framework-mybatisSharding】配置数据库驱动类型为[{}]",driveClassName);
        LOGGER.info("【framework-mybatisSharding】配置数据库默认数据为[{}]",defaultDatabaseName);

        int dataBaseSize = databasesList.size();
        int urlSize = connUrlList.size();
        int userNameSize = userNameList.size();
        int passwordSize = passwordList.size();

        if(databasesList.isEmpty()){
            throw  new MybatisException("【framework-mybatisSharding】未配置数据源信息");
        }
        if(dataBaseSize != urlSize && dataBaseSize != userNameSize && dataBaseSize != passwordSize){
            throw  new MybatisException("【framework-mybatisSharding】置数据源信息不全");
        }

        for (int x = 0; x < dataBaseSize; x ++){
            String datasourceName = databasesList.get(x);

            DruidDataSource druidDataSource = new DruidDataSource();
            druidDataSource.setName(databasesList.get(x));
            druidDataSource.setUrl(connUrlList.get(x));
            druidDataSource.setUsername(userNameList.get(x));
            druidDataSource.setPassword(passwordList.get(x));
            druidDataSource.setDriverClassName(driveClassName);

            if(!StringUtils.isEmpty(expansionDatasource)){
                Properties properties = new Properties();
                String connectionProperties = expansionDatasource.getConnectionProperties();
                String[] split = connectionProperties.split("\\;");
                for (String s : split) {
                    String[] single = s.split("=");
                    properties.setProperty(single[0],single[1]);
                }

                druidDataSource.setInitialSize(expansionDatasource.getInitialSize());
                druidDataSource.setMaxActive(expansionDatasource.getMaxActive());
                druidDataSource.setMinIdle(expansionDatasource.getMinIdle());
                druidDataSource.setMaxWait(expansionDatasource.getMaxWait());
                druidDataSource.setDefaultAutoCommit(expansionDatasource.getDefaultAutoCommit());
                druidDataSource.setRemoveAbandoned(expansionDatasource.getRemoveAbandoned());
                druidDataSource.setRemoveAbandonedTimeout(expansionDatasource.getRemoveAbandonedTimeout());
                druidDataSource.setNumTestsPerEvictionRun(expansionDatasource.getNumTestsPerEvictionRun());
                druidDataSource.setConnectProperties(properties);
                druidDataSource.setValidationQuery(expansionDatasource.getValidationQuery());
                druidDataSource.setTestWhileIdle(expansionDatasource.isTestWhileIdle());
                druidDataSource.setMinEvictableIdleTimeMillis(expansionDatasource.getMinEvictableIdleTimeMillis());
                druidDataSource.setTimeBetweenConnectErrorMillis(expansionDatasource.getTimeBetweenEvictionRunsMillis());
                druidDataSource.setTestOnBorrow(expansionDatasource.getTestOnBorrow());
                druidDataSource.setTestOnReturn(expansionDatasource.getTestOnReturn());
            }


            shardingDatasourceMap.put(datasourceName ,druidDataSource);
            if(slaveDatasourceMap.containsKey(datasourceName))
                masterSlaveDatasourceMap.put(datasourceName,slaveDatasourceMap.get(datasourceName));


        }

    }


    @Override
    public void setEnvironment(Environment environment) {
        this.environment = environment;

    }
}
