package com.sharding.jdbc.demo.config;

import com.alibaba.druid.pool.DruidDataSource;
import org.apache.shardingsphere.api.config.sharding.KeyGeneratorConfiguration;
import org.apache.shardingsphere.api.config.sharding.ShardingRuleConfiguration;
import org.apache.shardingsphere.api.config.sharding.TableRuleConfiguration;
import org.apache.shardingsphere.api.config.sharding.strategy.StandardShardingStrategyConfiguration;
import org.apache.shardingsphere.shardingjdbc.api.ShardingDataSourceFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.jdbc.core.JdbcTemplate;

import javax.sql.DataSource;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Properties;

/**
 * 数据库分库分表配置
 */
@Configuration
public class ShardJdbcConfig {

    /**
     * shard_one数据源
     */
    @Value("${spring.datasource.dataOne.druid.url}")
    private String dbUrl1;
    @Value("${spring.datasource.dataOne.druid.username}")
    private String username1;
    @Value("${spring.datasource.dataOne.druid.password}")
    private String password1;
    @Value("${spring.datasource.dataOne.druid.driverClassName}")
    private String driverClassName1;
    @Value("${spring.datasource.dataOne.druid.initial-size}")
    private int initialSize1;
    @Value("${spring.datasource.dataOne.druid.max-active}")
    private int maxActive1;
    @Value("${spring.datasource.dataOne.druid.min-idle}")
    private int minIdle1;
    @Value("${spring.datasource.dataOne.druid.max-wait}")
    private int maxWait1;
    @Value("${spring.datasource.dataOne.druid.pool-prepared-statements}")
    private boolean poolPreparedStatements1;
    @Value("${spring.datasource.dataOne.druid.max-pool-prepared-statement-per-connection-size}")
    private int maxPoolPreparedStatementPerConnectionSize1;
    @Value("${spring.datasource.dataOne.druid.time-between-eviction-runs-millis}")
    private int timeBetweenEvictionRunsMillis1;
    @Value("${spring.datasource.dataOne.druid.min-evictable-idle-time-millis}")
    private int minEvictableIdleTimeMillis1;
    @Value("${spring.datasource.dataOne.druid.max-evictable-idle-time-millis}")
    private int maxEvictableIdleTimeMillis1;
    @Value("${spring.datasource.dataOne.druid.validation-query}")
    private String validationQuery1;
    @Value("${spring.datasource.dataOne.druid.test-while-idle}")
    private boolean testWhileIdle1;
    @Value("${spring.datasource.dataOne.druid.test-on-borrow}")
    private boolean testOnBorrow1;
    @Value("${spring.datasource.dataOne.druid.test-on-return}")
    private boolean testOnReturn1;
    @Value("{spring.datasource.dataOne.druid.connection-properties}")
    private String connectionProperties1;
    @Bean
    public DruidDataSource dataOneSource() {
        DruidDataSource datasource = new DruidDataSource();
        datasource.setUrl(dbUrl1);
        datasource.setUsername(username1);
        datasource.setPassword(password1);
        datasource.setDriverClassName(driverClassName1);
        datasource.setInitialSize(initialSize1);
        datasource.setMinIdle(minIdle1);
        datasource.setMaxActive(maxActive1);
        datasource.setMaxWait(maxWait1);
        datasource.setTimeBetweenEvictionRunsMillis(timeBetweenEvictionRunsMillis1);
        datasource.setMinEvictableIdleTimeMillis(minEvictableIdleTimeMillis1);
        datasource.setMaxEvictableIdleTimeMillis(minEvictableIdleTimeMillis1);
        datasource.setValidationQuery(validationQuery1);
        datasource.setTestWhileIdle(testWhileIdle1);
        datasource.setTestOnBorrow(testOnBorrow1);
        datasource.setTestOnReturn(testOnReturn1);
        datasource.setPoolPreparedStatements(poolPreparedStatements1);
        datasource.setMaxPoolPreparedStatementPerConnectionSize(maxPoolPreparedStatementPerConnectionSize1);
        datasource.setConnectionProperties(connectionProperties1);
        return datasource;
    }

    /**
     * shard_two数据源
     */
    @Value("${spring.datasource.dataTwo.druid.url}")
    private String dbUrl2;
    @Value("${spring.datasource.dataTwo.druid.username}")
    private String username2;
    @Value("${spring.datasource.dataTwo.druid.password}")
    private String password2;
    @Value("${spring.datasource.dataTwo.druid.driverClassName}")
    private String driverClassName2;
    @Value("${spring.datasource.dataTwo.druid.initial-size}")
    private int initialSize2;
    @Value("${spring.datasource.dataTwo.druid.max-active}")
    private int maxActive2;
    @Value("${spring.datasource.dataTwo.druid.min-idle}")
    private int minIdle2;
    @Value("${spring.datasource.dataTwo.druid.max-wait}")
    private int maxWait2;
    @Value("${spring.datasource.dataTwo.druid.pool-prepared-statements}")
    private boolean poolPreparedStatements2;
    @Value("${spring.datasource.dataTwo.druid.max-pool-prepared-statement-per-connection-size}")
    private int maxPoolPreparedStatementPerConnectionSize2;
    @Value("${spring.datasource.dataTwo.druid.time-between-eviction-runs-millis}")
    private int timeBetweenEvictionRunsMillis2;
    @Value("${spring.datasource.dataTwo.druid.min-evictable-idle-time-millis}")
    private int minEvictableIdleTimeMillis2;
    @Value("${spring.datasource.dataTwo.druid.max-evictable-idle-time-millis}")
    private int maxEvictableIdleTimeMillis2;
    @Value("${spring.datasource.dataTwo.druid.validation-query}")
    private String validationQuery2;
    @Value("${spring.datasource.dataTwo.druid.test-while-idle}")
    private boolean testWhileIdle2;
    @Value("${spring.datasource.dataTwo.druid.test-on-borrow}")
    private boolean testOnBorrow2;
    @Value("${spring.datasource.dataTwo.druid.test-on-return}")
    private boolean testOnReturn2;
    @Value("{spring.datasource.dataTwo.druid.connection-properties}")
    private String connectionProperties2;
    @Bean
    public DruidDataSource dataTwoSource() {
        DruidDataSource datasource = new DruidDataSource();
        datasource.setUrl(dbUrl2);
        datasource.setUsername(username2);
        datasource.setPassword(password2);
        datasource.setDriverClassName(driverClassName2);
        datasource.setInitialSize(initialSize2);
        datasource.setMinIdle(minIdle2);
        datasource.setMaxActive(maxActive2);
        datasource.setMaxWait(maxWait2);
        datasource.setTimeBetweenEvictionRunsMillis(timeBetweenEvictionRunsMillis2);
        datasource.setMinEvictableIdleTimeMillis(minEvictableIdleTimeMillis2);
        datasource.setMaxEvictableIdleTimeMillis(minEvictableIdleTimeMillis2);
        datasource.setValidationQuery(validationQuery2);
        datasource.setTestWhileIdle(testWhileIdle2);
        datasource.setTestOnBorrow(testOnBorrow2);
        datasource.setTestOnReturn(testOnReturn2);
        datasource.setPoolPreparedStatements(poolPreparedStatements2);
        datasource.setMaxPoolPreparedStatementPerConnectionSize(maxPoolPreparedStatementPerConnectionSize2);
        datasource.setConnectionProperties(connectionProperties2);
        return datasource;
    }

    /**
     * JDBC操作配置(这里主要用于自定义创建表，controller层中)
     */
    @Bean(name = "dataOneTemplate")
    public JdbcTemplate dataOneTemplate (@Autowired DruidDataSource dataOneSource){
        return new JdbcTemplate(dataOneSource) ;
    }
    @Bean(name = "dataTwoTemplate")
    public JdbcTemplate dataTwoTemplate (@Autowired DruidDataSource dataTwoSource){
        return new JdbcTemplate(dataTwoSource) ;
    }

    /**
     * Shard-JDBC 分库分表配置
     */
    @Bean
    public DataSource dataSource (@Autowired DruidDataSource dataOneSource,
                                  @Autowired DruidDataSource dataTwoSource) throws Exception {
        ShardingRuleConfiguration shardJdbcConfig = new ShardingRuleConfiguration();
        // 配置分库分表规则
        shardJdbcConfig.getTableRuleConfigs().add(getTableRule01());
        shardJdbcConfig.getTableRuleConfigs().add(getTableRule02());
        //配置默认数据源
        shardJdbcConfig.setDefaultDataSourceName("ds_0");
        //绑定表的关联
       shardJdbcConfig.getBindingTableGroups().add("table_one,table_two");
        //2配置读写分离规则
        /*shardingRuleConfig.setMasterSlaveRuleConfigs();*/
       //指定需要分库分表的数据源
        Map<String,DataSource> dataMap = new LinkedHashMap<>() ;
        dataMap.put("ds_0",dataOneSource) ;
        dataMap.put("ds_1",dataTwoSource) ;
        //属性配置项，可以为以下属性
        Properties prop = new Properties();
        //是否打印SQL解析和改写日志
        prop.setProperty("sql.show",Boolean.TRUE.toString());
        //用于SQL执行的工作线程数量，为零则表示无限制
        prop.setProperty("executor.size","4");
        //每个物理数据库为每次查询分配的最大连接数量
        prop.setProperty("max.connections.size.per.query","1");
        //是否在启动时检查分表元数据一致性
        prop.setProperty("check.table.metadata.enabled","false");
        return ShardingDataSourceFactory.createDataSource(dataMap, shardJdbcConfig,prop);
    }

    /**
     * Shard-JDBC 分表配置
     */
    private static TableRuleConfiguration getTableRule01() {
        // param1 : 逻辑表名， param2 ： 真实存在的节点，由数据源 + 表名组成， ds${0..1} 代表 数据库选择 ds 后缀为 0 - 1 之间，table_one 代表数据表 table_one 后缀 1 - 5 之间
        TableRuleConfiguration result = new TableRuleConfiguration("table_one","ds_${0..1}.table_one_${1..5}");
        //主键生成列，默认的主键生成算法是snowflake
        result.setKeyGeneratorConfig(getKeyGeneratorConfiguration());
        // 设置数据源分片规则
        //设置分片策略，这里简单起见直接取模，也可以使用自定义算法来实现分片规则
        result.setDatabaseShardingStrategyConfig(new StandardShardingStrategyConfiguration("phone", new DataSourceAlg()));
        // 设置数据表分片规则
        //设置分片策略，这里简单起见直接取模，也可以使用自定义算法来实现分片规则
        result.setTableShardingStrategyConfig(new StandardShardingStrategyConfiguration("phone", new TableOneAlg()));
        return result;
    }
    private static TableRuleConfiguration getTableRule02() {
        // param1 : 逻辑表名， param2 ： 真实存在的节点，由数据源 + 表名组成， ds${0..1} 代表 数据库选择 ds 后缀为 0 - 1 之间，table_two 代表数据表 table_two 后缀 1 - 5 之间
        TableRuleConfiguration result = new TableRuleConfiguration("table_two","ds_${0..1}.table_two_${1..5}");
        //主键生成列，默认的主键生成算法是snowflake
        result.setKeyGeneratorConfig(getKeyGeneratorConfiguration());
        // 设置数据源分片规则
        //设置分片策略，这里简单起见直接取模，也可以使用自定义算法来实现分片规则
        result.setDatabaseShardingStrategyConfig(new StandardShardingStrategyConfiguration("phone", new DataSourceAlg()));
        // 设置数据表分片规则
        //设置分片策略，这里简单起见直接取模，也可以使用自定义算法来实现分片规则
        result.setTableShardingStrategyConfig(new StandardShardingStrategyConfiguration("phone", new TableTwoAlg()));
        return result;
    }

    /**
     * Shard-JDBC 主键生成列（雪花算法）
     */
    public static KeyGeneratorConfiguration getKeyGeneratorConfiguration(){
        KeyGeneratorConfiguration result = new KeyGeneratorConfiguration("SNOWFLAKE","id");
        return result;

    }
}
