package com.batch.adance.migrate;

import java.util.HashMap;
import java.util.List;
import java.util.Map;

import javax.annotation.Resource;
import javax.sql.DataSource;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.batch.core.Job;
import org.springframework.batch.core.Step;
import org.springframework.batch.core.configuration.annotation.JobBuilderFactory;
import org.springframework.batch.core.configuration.annotation.StepBuilderFactory;
import org.springframework.batch.item.ItemProcessor;
import org.springframework.batch.item.ItemReader;
import org.springframework.batch.item.ItemWriter;
import org.springframework.batch.item.database.JdbcPagingItemReader;
import org.springframework.batch.item.database.Order;
import org.springframework.batch.item.database.support.MySqlPagingQueryProvider;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;

import com.batch.adance.jdbc.Person;
import com.batch.adance.jdbc.PersonRowMapper;

/**
 * :@EnableBatchProcessing提供用于构建批处理作业的基本配置
 * 245760条数据做迁移
 * 批次10000时候。Job Cost Time : 143274ms
 * 
 * 批次10000时候。Job Cost Time : 117856ms
 * 
 * 批次100000时候。且取消processor； Job Cost Time : 54216ms
 */
@Configuration
public class DataBatchMigrateConfiguration {
	private static final Logger log = LoggerFactory.getLogger(DataBatchMigrateConfiguration.class);

	@Resource
	private JobBuilderFactory jobBuilderFactory; // 用于构建JOB

	@Resource
	private StepBuilderFactory stepBuilderFactory; // 用于构建Step

	@Resource
	private JobListener jobListener; // 简单的JOB listener

	@Resource
	private DataSource dataSource;

	@Resource
	private PersonDao personDao;

	/**
	 * 一个简单基础的Job通常由一个或者多个Step组成
	 */
	@Bean
	public Job dataMigrateHandleJob() {
		return jobBuilderFactory.get("dataMigrateHandleJob2")
                .start(handleDataStep()).listener(jobListener)
                .build();
	}

	/**
	 * 一个简单基础的Step主要分为三个部分 ItemReader : 用于读取数据 ItemProcessor : 用于处理数据 ItemWriter :
	 * 用于写数据
	 */
	@Bean
	public Step handleDataStep() {
		return stepBuilderFactory.get("migrateData2").<Person, Person>chunk(100000). // <输入,输出> 。chunk通俗的讲类似于SQL的commit;
				faultTolerant().retryLimit(3).retry(Exception.class).skipLimit(100).skip(Exception.class). // 捕捉到异常就重试,重试100次还是异常,JOB就停止并标志失败
				reader(getDataReader()). // 指定ItemReader
				//processor(getDataProcessor()). // 指定ItemProcessor
				writer(getDataWriter()). // 指定ItemWriter
				build();
	}

	@Bean
	public ItemReader<? extends Person> getDataReader() {
		JdbcPagingItemReader<Person> reader = new JdbcPagingItemReader<>();
        reader.setDataSource(this.dataSource); // 设置数据源
        reader.setFetchSize(100000); // 设置一次最大读取条数
        reader.setRowMapper(new PersonRowMapper()); // 把数据库中的每条数据映射到Person对中
        MySqlPagingQueryProvider queryProvider = new MySqlPagingQueryProvider();
        queryProvider.setSelectClause("id,name,per_desc,create_time,update_time,sex,score,price"); // 设置查询的列
        queryProvider.setFromClause("from person_buf"); // 设置要查询的表
        Map<String, Order> sortKeys = new HashMap<String, Order>();// 定义一个集合用于存放排序列
        sortKeys.put("id", Order.ASCENDING);// 按照升序排序
        queryProvider.setSortKeys(sortKeys);
        reader.setQueryProvider(queryProvider);// 设置排序列
        return reader;
	}

	@Bean
	public ItemProcessor<Person, Person> getDataProcessor() {
		return new ItemProcessor<Person, Person>() {
			@Override
			public Person process(Person access) throws Exception {
				log.info("processor data : " + access.toString()); // 模拟 假装处理数据,这里处理就是打印一下
				return access;
			}
		};
	}

	@Bean
	public ItemWriter<Person> getDataWriter() {
		return new ItemWriter<Person>() {

			@Override
			public void write(List<? extends Person> items) throws Exception {
				for (Person item : items) {
					personDao.save(item);
				}
			}
		};
	}
}