package com.xq.config;

import com.alibaba.druid.pool.DruidDataSource;
import com.alibaba.druid.spring.boot.autoconfigure.DruidDataSourceBuilder;
import com.xq.service.shareding.SnowflakeIdWorker;
import com.xq.service.shareding.TableShardingAlgorithm;
import io.shardingsphere.api.config.rule.ShardingRuleConfiguration;
import io.shardingsphere.api.config.rule.TableRuleConfiguration;
import io.shardingsphere.api.config.strategy.StandardShardingStrategyConfiguration;
import io.shardingsphere.core.keygen.DefaultKeyGenerator;
import io.shardingsphere.shardingjdbc.api.ShardingDataSourceFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.ApplicationContext;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.ImportResource;
import org.springframework.context.annotation.Primary;

import javax.sql.DataSource;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;

@Configuration
public class DataSourceConfig {

    @Autowired()
    private ApplicationContext applicationContext;


    @ConfigurationProperties(prefix = "spring.datasource")
    @Bean
    public DataSource druidDataSource() {
        DruidDataSource dataSource = DruidDataSourceBuilder.create().build();
        return dataSource;
    }


    @Bean(name = "shardingDataSource")
    @Primary
    DataSource getShardingDataSource() throws SQLException {
        //分库分表规则
        ShardingRuleConfiguration shardingRuleConfig = new ShardingRuleConfiguration();
        // 如果有多个表，可以用逗号“,”分隔 ，比如tb_user_test,
        shardingRuleConfig.getTableRuleConfigs().add(getTableRuleConfiguration());
        //shardingRuleConfig.getTableRuleConfigs().add(getTerminalRecordTableRuleConfiguration());
        shardingRuleConfig.getBindingTableGroups().add("act_telephone_contact");
        // 设置分片策略，自定义算法来实现分片规则 //shardingRuleConfig.setDefaultDatabaseShardingStrategyConfig(new
        // StandardShardingStrategyConfiguration("user_id", new DemoDatabaseShardingAlgorithm()));
        TableShardingAlgorithm tableShardingAlgorithm = applicationContext.getBean(TableShardingAlgorithm.class);
        shardingRuleConfig.setDefaultTableShardingStrategyConfig(//
                new StandardShardingStrategyConfiguration("user_id", tableShardingAlgorithm));
        Properties props = new Properties();
        props.put("sql.show", "true");
        return ShardingDataSourceFactory.createDataSource(createDataSourceMap(), shardingRuleConfig, //
                new ConcurrentHashMap(), props);
    }

    /**
     * 配置表规则
     *
     * @return
     */
    TableRuleConfiguration getTableRuleConfiguration() {
        // 配置表名
        TableRuleConfiguration test = new TableRuleConfiguration();
        // 配置真实的数据节点，即数据库中真实存在的节点，由数据源名 + 表名组成
        test.setLogicTable("act_telephone_contact");
        // user_${0..1}分库，t_user_${0..1}分表
        test.setActualDataNodes("msxdb.act_telephone_contact," +
                "msxdb.act_telephone_contact_2021_05," +
                "msxdb.act_telephone_contact_2021_06," +
                "msxdb.act_telephone_contact_2021_07");
        // 主键生成列，默认的主键生成算法是snowflake
        test.setKeyGeneratorColumnName("id");
        test.setKeyGenerator(new DefaultKeyGenerator());
        //IPKeyGenerator.initWorkerId();
        return test;
    }

    /**
     * 创建数据源
     *
     * @return
     */
    private Map<String, DataSource> createDataSourceMap() {
        Map<String, DataSource> result = new HashMap<>();
        result.put("msxdb", druidDataSource());
        return result;
    }
}
