package com.fzh.study.sharedingjdbcdemo.config.datasource;

import com.alibaba.druid.pool.DruidDataSource;
import com.dangdang.ddframe.rdb.sharding.api.ShardingDataSourceFactory;
import com.dangdang.ddframe.rdb.sharding.api.rule.BindingTableRule;
import com.dangdang.ddframe.rdb.sharding.api.rule.DataSourceRule;
import com.dangdang.ddframe.rdb.sharding.api.rule.ShardingRule;
import com.dangdang.ddframe.rdb.sharding.api.rule.TableRule;
import com.dangdang.ddframe.rdb.sharding.api.strategy.database.DatabaseShardingStrategy;
import com.dangdang.ddframe.rdb.sharding.api.strategy.table.TableShardingStrategy;
import com.fzh.study.sharedingjdbcdemo.config.algorithm.DataBaseShardingAlgorithm;
import com.fzh.study.sharedingjdbcdemo.config.algorithm.TableShardingAlgorithm;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.transaction.annotation.EnableTransactionManagement;

import javax.sql.DataSource;
import java.sql.SQLException;
import java.util.*;

@Configuration
@EnableTransactionManagement
@ConditionalOnClass(DruidDataSource.class)
@EnableConfigurationProperties({ShardDataSource0.class, ShardDataSource1.class, ShardDataSource2.class})
public class DataSourceConfig {
    @Autowired
    private ShardDataSource0 dataSource0;
    @Autowired
    private ShardDataSource1 dataSource1;
    @Autowired
    private ShardDataSource2 dataSource2;

    /**
     * 配置数据源0，数据源的名称最好要有一定的规则，方便配置分库的计算规则
     * @return
     */
    private DataSource db0() throws SQLException {
        return this.getDB0(dataSource0);
    }
    /**
     * 配置数据源1，数据源的名称最好要有一定的规则，方便配置分库的计算规则
     * @return
     */
    private DataSource db1() throws SQLException {
        return this.getDB1(dataSource1);
    }

    /**
     * 配置数据源2，数据源的名称最好要有一定的规则，方便配置分库的计算规则
     * @return
     */
    private DataSource db2() throws SQLException {
        return this.getDB2(dataSource2);
    }

    /**
     * 配置数据源规则，即将多个数据源交给sharding-jdbc管理，并且可以设置默认的数据源，
     * 当表没有配置分库规则时会使用默认的数据源
     * @return
     */
    @Bean
    public DataSourceRule dataSourceRule() throws SQLException {
        Map<String, DataSource> dataSourceMap = new HashMap<>();
        dataSourceMap.put("dataSource0", this.db0());
        dataSourceMap.put("dataSource1", this.db1());
        dataSourceMap.put("dataSource2", this.db2());
        return new DataSourceRule(dataSourceMap, "dataSource0");
    }

    /**
     * 配置数据源策略和表策略，具体策略需要自己实现
     * @param dataSourceRule
     * @return
     */
    @Bean
    public ShardingRule shardingRule(@Qualifier("dataSourceRule") DataSourceRule dataSourceRule){
        //具体分库分表策略
        TableRule orderTableRule = TableRule.builder("t_user")
                .actualTables(Arrays.asList("t_user_0", "t_user_1"))
                .tableShardingStrategy(new TableShardingStrategy("order_id", new TableShardingAlgorithm()))
                .dataSourceRule(dataSourceRule)
                .build();

        //绑定表策略，在查询时会使用主表策略计算路由的数据源，因此需要约定绑定表策略的表的规则需要一致，可以一定程度提高效率
        List<BindingTableRule> bindingTableRuleList = new ArrayList<BindingTableRule>();
        bindingTableRuleList.add(new BindingTableRule(Arrays.asList(orderTableRule)));
        return ShardingRule.builder().dataSourceRule(dataSourceRule)
                .tableRules(Arrays.asList(orderTableRule))
                .bindingTableRules(bindingTableRuleList)
                .databaseShardingStrategy(new DatabaseShardingStrategy("user_id", new DataBaseShardingAlgorithm()))
                .tableShardingStrategy(new TableShardingStrategy("order_id", new TableShardingAlgorithm()))
                .build();
    }

    /**
     * 创建sharding-jdbc的数据源DataSource，MybatisAutoConfiguration会使用此数据源
     * @param shardingRule
     * @return
     * @throws SQLException
     */
    @Bean
    public DataSource shardingDataSource(@Qualifier("shardingRule") ShardingRule shardingRule) throws SQLException {
        return ShardingDataSourceFactory.createDataSource(shardingRule);
    }

    private DruidDataSource getDB0(ShardDataSource0 db0) throws SQLException {
        DruidDataSource ds = new DruidDataSource();
        ds.setDriverClassName(db0.getDriverClassName());
        ds.setUrl(db0.getUrl());
        ds.setUsername(db0.getUsername());
        ds.setPassword(db0.getPassword());
        ds.setFilters(db0.getFilters());
        ds.setMaxActive(db0.getMaxActive());
        ds.setInitialSize(db0.getInitialSize());
        ds.setMaxWait(db0.getMaxWait());
        ds.setMinIdle(db0.getMinIdle());
        ds.setTimeBetweenEvictionRunsMillis(db0.getTimeBetweenEvictionRunsMillis());
        ds.setMinEvictableIdleTimeMillis(db0.getMinEvictableIdleTimeMillis());
        ds.setValidationQuery(db0.getValidationQuery());
        ds.setTestWhileIdle(db0.isTestWhileIdle());
        ds.setTestOnBorrow(db0.isTestOnBorrow());
        ds.setTestOnReturn(db0.isTestOnReturn());
        ds.setPoolPreparedStatements(db0.isPoolPreparedStatements());
        ds.setMaxPoolPreparedStatementPerConnectionSize(
                db0.getMaxPoolPreparedStatementPerConnectionSize());
        ds.setRemoveAbandoned(db0.isRemoveAbandoned());
        ds.setRemoveAbandonedTimeout(db0.getRemoveAbandonedTimeout());
        ds.setLogAbandoned(db0.isLogAbandoned());
        ds.setConnectionInitSqls(db0.getConnectionInitSqls());
        ds.setConnectionProperties(db0.getConnectionProperties());
        return ds;
    }

    private DruidDataSource getDB1(ShardDataSource1 db1) throws SQLException {
        DruidDataSource ds = new DruidDataSource();
        ds.setDriverClassName(db1.getDriverClassName());
        ds.setUrl(db1.getUrl());
        ds.setUsername(db1.getUsername());
        ds.setPassword(db1.getPassword());
        ds.setFilters(db1.getFilters());
        ds.setMaxActive(db1.getMaxActive());
        ds.setInitialSize(db1.getInitialSize());
        ds.setMaxWait(db1.getMaxWait());
        ds.setMinIdle(db1.getMinIdle());
        ds.setTimeBetweenEvictionRunsMillis(db1.getTimeBetweenEvictionRunsMillis());
        ds.setMinEvictableIdleTimeMillis(db1.getMinEvictableIdleTimeMillis());
        ds.setValidationQuery(db1.getValidationQuery());
        ds.setTestWhileIdle(db1.isTestWhileIdle());
        ds.setTestOnBorrow(db1.isTestOnBorrow());
        ds.setTestOnReturn(db1.isTestOnReturn());
        ds.setPoolPreparedStatements(db1.isPoolPreparedStatements());
        ds.setMaxPoolPreparedStatementPerConnectionSize(
                db1.getMaxPoolPreparedStatementPerConnectionSize());
        ds.setRemoveAbandoned(db1.isRemoveAbandoned());
        ds.setRemoveAbandonedTimeout(db1.getRemoveAbandonedTimeout());
        ds.setLogAbandoned(db1.isLogAbandoned());
        ds.setConnectionInitSqls(db1.getConnectionInitSqls());
        ds.setConnectionProperties(db1.getConnectionProperties());
        return ds;
    }

    private DruidDataSource getDB2(ShardDataSource2 db2) throws SQLException {
        DruidDataSource ds = new DruidDataSource();
        ds.setDriverClassName(db2.getDriverClassName());
        ds.setUrl(db2.getUrl());
        ds.setUsername(db2.getUsername());
        ds.setPassword(db2.getPassword());
        ds.setFilters(db2.getFilters());
        ds.setMaxActive(db2.getMaxActive());
        ds.setInitialSize(db2.getInitialSize());
        ds.setMaxWait(db2.getMaxWait());
        ds.setMinIdle(db2.getMinIdle());
        ds.setTimeBetweenEvictionRunsMillis(db2.getTimeBetweenEvictionRunsMillis());
        ds.setMinEvictableIdleTimeMillis(db2.getMinEvictableIdleTimeMillis());
        ds.setValidationQuery(db2.getValidationQuery());
        ds.setTestWhileIdle(db2.isTestWhileIdle());
        ds.setTestOnBorrow(db2.isTestOnBorrow());
        ds.setTestOnReturn(db2.isTestOnReturn());
        ds.setPoolPreparedStatements(db2.isPoolPreparedStatements());
        ds.setMaxPoolPreparedStatementPerConnectionSize(
                db2.getMaxPoolPreparedStatementPerConnectionSize());
        ds.setRemoveAbandoned(db2.isRemoveAbandoned());
        ds.setRemoveAbandonedTimeout(db2.getRemoveAbandonedTimeout());
        ds.setLogAbandoned(db2.isLogAbandoned());
        ds.setConnectionInitSqls(db2.getConnectionInitSqls());
        ds.setConnectionProperties(db2.getConnectionProperties());
        return ds;
    }
}