/**
 * 
 */
package org.perfect.batch.partition;

import java.util.HashMap;
import java.util.Map;

import org.apache.shardingsphere.core.rule.TableRule;
import org.apache.shardingsphere.shardingjdbc.jdbc.core.datasource.ShardingDataSource;
import org.apache.shardingsphere.underlying.common.rule.DataNode;
import org.perfect.batch.BatchConstants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.batch.core.partition.support.Partitioner;
import org.springframework.batch.item.ExecutionContext;
import org.springframework.beans.factory.annotation.Autowired;

/**
 * @author billmse
 *
 */
public class DataSourcePartitioner implements Partitioner {

	@Autowired
	private ShardingDataSource dataSource;

	private Logger logger = LoggerFactory.getLogger(getClass());

	@Override
	public Map<String, ExecutionContext> partition(int gridSize) {
		Map<String, ExecutionContext> partitions = new HashMap<>();
		if (null != dataSource) {
			int index = 0;
			for (TableRule rule : dataSource.getRuntimeContext().getRule().getTableRules()) {
				for (DataNode node : rule.getActualDataNodes()) {
					String logicTableName = rule.getLogicTable();
					String dataSourceName = node.getDataSourceName();
					String tableName = node.getTableName();
					ExecutionContext context = new ExecutionContext();
					context.put(BatchConstants.BATCH_PARTITION_LOGIC_TABLE_NAME, logicTableName);
					context.put(BatchConstants.BATCH_PARTITION_DATASOURCE_NAME, dataSourceName);
					context.put(BatchConstants.BATCH_PARTITION_TABLE_NAME, tableName);
					String partitionKey = BatchConstants.BATCH_PARTITION_KEY_PREFIX + (index++);
					logger.info("{}: {}", partitionKey, context);
					partitions.put(partitionKey, context);
				}
			}
		}
		return partitions;
	}

}
