package com.yhou.sharding.config;

import com.alibaba.druid.spring.boot.autoconfigure.DruidDataSourceBuilder;
import io.shardingsphere.core.api.ShardingDataSourceFactory;
import io.shardingsphere.core.api.config.ShardingRuleConfiguration;
import io.shardingsphere.core.api.config.TableRuleConfiguration;
import io.shardingsphere.core.api.config.strategy.StandardShardingStrategyConfiguration;
import org.apache.ibatis.session.SqlSessionFactory;
import org.mybatis.spring.SqlSessionFactoryBean;
import org.mybatis.spring.SqlSessionTemplate;
import org.mybatis.spring.annotation.MapperScan;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
import org.springframework.core.env.Environment;
import org.springframework.core.io.support.PathMatchingResourcePatternResolver;
import org.springframework.jdbc.datasource.DataSourceTransactionManager;
import org.springframework.transaction.annotation.EnableTransactionManagement;

import javax.sql.DataSource;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;

/**
 * @author houyong
 * @ProjectName sharding_demo
 * @Description:
 * @date 2018/11/16 上午11:55
 */
@Configuration
@EnableTransactionManagement
@MapperScan(basePackages = "com.yhou.sharding.mapper")
public class DataSourcesConfig {

    private final Logger log = LoggerFactory.getLogger(DataSourcesConfig.class);

    @Autowired
    private Environment env;

    /**
     * 获取配置文件数据源datasource
     */
    @Bean(name = "firstDatasource")
    @Primary
    @Qualifier("firstDatasource")
    @ConfigurationProperties(prefix = "spring.datasource.druid.ds1")
    public DataSource firstDatasource() {
        log.info("Configuring JDBC datasource from a cloud provider");
        try {
            return DruidDataSourceBuilder.create().build(env, "spring.datasource.druid.ds1");
        } catch (Exception e) {
            log.error("datasource create exception", e);
        }
        return null;
    }

    /**
     * 获取配置文件数据源datasource0
     */
    @Bean(name = "secondDatasource")
    @Qualifier("secondDatasource")
    @ConfigurationProperties(prefix = "spring.datasource.druid.ds2")
    public DataSource secondDatasource() {
        log.info("Configuring JDBC datasource from a cloud provider");
        try {
            return DruidDataSourceBuilder.create().build(env, "spring.datasource.druid.ds2");
        } catch (Exception e) {
            log.error("datasource create exception", e);
        }
        return null;
    }

//    public DataSource shardingDataSource() throws SQLException {
//        Map<String, DataSource> dataSourceMap = new HashMap<>(8);
//        dataSourceMap.put("ds_0", firstDatasource());
//        dataSourceMap.put("ds_1", secondDatasource());
//        ShardingRuleConfiguration shardingRuleConfig = new ShardingRuleConfiguration();
//        // 默认数据源
//        shardingRuleConfig.setDefaultDataSourceName("ds_0");
//        TableRuleConfiguration tableRuleConfiguration = new TableRuleConfiguration();
//        tableRuleConfiguration.setLogicTable("user");
//        tableRuleConfiguration.setActualDataNodes("user_0");
//        //设置分片策略，自定义算法来实现分片规则
//        shardingRuleConfig.setDefaultDatabaseShardingStrategyConfig(new StandardShardingStrategyConfiguration("id", new DatabaseShardingAlgorithm()));
//        shardingRuleConfig.setDefaultTableShardingStrategyConfig(new StandardShardingStrategyConfiguration("id", new TableShardingAlgorithm()));
//        Collection<TableRuleConfiguration> tableRuleConfigurations = new LinkedHashSet<>();
//        shardingRuleConfig.setTableRuleConfigs(tableRuleConfigurations);
//        Map<String, Object> configMap = Maps.newConcurrentMap();
//        Properties props = new Properties();
//        DataSource dataSource = null;
//        dataSource = ShardingDataSourceFactory.createDataSource(dataSourceMap, shardingRuleConfig, configMap, props);
//        return dataSource;
//    }

    @Bean(name = "shardingDataSource")
    DataSource getShardingDataSource() throws SQLException {
        ShardingRuleConfiguration shardingRuleConfig = new ShardingRuleConfiguration();
        shardingRuleConfig.getTableRuleConfigs().add(getUserTableRuleConfiguration());
        //如果有多个表，可以用逗号“,”分隔 ，比如user_info,t_order
        shardingRuleConfig.getBindingTableGroups().add("user");
        shardingRuleConfig.setDefaultDataSourceName("ds_0");
        //设置分片策略，自定义算法来实现分片规则
        shardingRuleConfig.setDefaultDatabaseShardingStrategyConfig(new StandardShardingStrategyConfiguration("id", new DatabaseShardingAlgorithm()));
        shardingRuleConfig.setDefaultTableShardingStrategyConfig(new StandardShardingStrategyConfiguration("id", new TableShardingAlgorithm()));

        return ShardingDataSourceFactory.createDataSource(createDataSourceMap(), shardingRuleConfig, new ConcurrentHashMap(), new Properties());
    }

    /**
     * 配置表规则
     *
     * @return
     */
    @Bean
    TableRuleConfiguration getUserTableRuleConfiguration() {
        TableRuleConfiguration orderTableRuleConfig = new TableRuleConfiguration();
        //配置表名
        orderTableRuleConfig.setLogicTable("user");
        //配置真实的数据节点，即数据库中真实存在的节点，由数据源名 + 表名组成
        //user_${0..1}分库，user_info_${0..1}分表
        orderTableRuleConfig.setActualDataNodes("ds_${0..1}.user_${0..3}");
        //主键生成列，默认的主键生成算法是snowflake
        orderTableRuleConfig.setKeyGeneratorColumnName("id");
        return orderTableRuleConfig;
    }

    /**
     * 创建数据源
     *
     * @return
     */
    private Map<String, DataSource> createDataSourceMap() {
        Map<String, DataSource> result = new HashMap<>();
        result.put("ds_0", firstDatasource());
        result.put("ds_1", secondDatasource());
        return result;
    }

    /**
     * 需要手动配置事务管理器
     *
     * @param shardingDataSource
     * @return
     */
    @Bean
    public DataSourceTransactionManager transactitonManager(@Qualifier("shardingDataSource") DataSource shardingDataSource) {
        return new DataSourceTransactionManager(shardingDataSource);
    }

    @Bean
    @Primary
    public SqlSessionFactory sqlSessionFactory(@Qualifier("shardingDataSource") DataSource shardingDataSource) throws Exception {
        SqlSessionFactoryBean bean = new SqlSessionFactoryBean();
        bean.setDataSource(shardingDataSource);
        bean.setMapperLocations(new PathMatchingResourcePatternResolver().getResources("classpath:mapper/*.xml"));
        return bean.getObject();
    }

    @Bean
    @Primary
    public SqlSessionTemplate testSqlSessionTemplate(SqlSessionFactory sqlSessionFactory) throws Exception {
        return new SqlSessionTemplate(sqlSessionFactory);
    }


}
