package com.example.batchprocessing;

import org.springframework.batch.core.Job;
import org.springframework.batch.core.Step;
import org.springframework.batch.core.configuration.annotation.EnableBatchProcessing;
import org.springframework.batch.core.configuration.annotation.JobBuilderFactory;
import org.springframework.batch.core.configuration.annotation.StepBuilderFactory;
import org.springframework.batch.core.launch.support.RunIdIncrementer;
import org.springframework.batch.item.database.BeanPropertyItemSqlParameterSourceProvider;
import org.springframework.batch.item.database.JdbcBatchItemWriter;
import org.springframework.batch.item.database.JdbcCursorItemReader;
import org.springframework.batch.item.database.builder.JdbcBatchItemWriterBuilder;
import org.springframework.batch.item.file.FlatFileItemReader;
import org.springframework.batch.item.file.builder.FlatFileItemReaderBuilder;
import org.springframework.batch.item.file.mapping.BeanWrapperFieldSetMapper;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.io.ClassPathResource;

import javax.sql.DataSource;
/**
 * Put Together a Batch Job
 * Now you need to put together the actual batch job. Spring Batch provides many utility classes that reduce
 * the need to write custom code. Instead, you can focus on the business logic.
 *
 * To configure your job, you must first create a Spring @Configuration class like the following example in
 * src/main/java/com/exampe/batchprocessing/BatchConfiguration.java:
 * */

/**
 * 在spring batch中一个job可以定义很多的步骤step，
 * 在每一个step里面可以定义其专属的
 * A.ItemReader用于读取数据，
 * B.ItemProcesseor用于处理数据，
 * C.ItemWriter用于写数据，
 * 而每一个定义的job则都在JobRepository里面，
 * 我们可以通过JobLauncher来启动某一个job。
 *
 * JobLauncher start our jobs 1
 * JobRepository--->[job]
 * Job --->[step1,step2,....]
 * step--->[A,B,C]
 *
 * */


// tag::setup[]
@Configuration
@EnableBatchProcessing
public class BatchConfiguration {

	@Autowired
	public JobBuilderFactory jobBuilderFactory;

	@Autowired
	public StepBuilderFactory stepBuilderFactory;
	// end::setup[]

	// tag::readerwriterprocessor[]

	/**
	 * For starters, the @EnableBatchProcessing annotation adds many critical beans that support jobs and
	 * save you a lot of leg work. This example uses a memory-based database (provided by @EnableBatchProcessing),
	 * meaning that, when it is done, the data is gone. It also autowires a couple factories needed further below.
	 * Now add the following beans to your BatchConfiguration class to define a reader, a processor, and a writer:
	 * */

	/**
	 * The first chunk of code defines the input, processor, and output.
	 *
	 * reader() creates an ItemReader. It looks for a file called sample-data.csv and parses each line item with enough
	 * information to turn it into a People.
	 *
	 * processor() creates an instance of the PersonItemProcessor that you defined earlier, meant to converth the data
	 * to upper case.
	 *
	 * writer(DataSource) creates an ItemWriter. This one is aimed at a JDBC destination and automatically gets a copy
	 * of the dataSource created by @EnableBatchProcessing. It includes the SQL statement needed to insert a single People,
	 * driven by Java bean properties.
	 * */
	@Bean
	//A.ItemReader用于读取数据 csv
	public FlatFileItemReader<Person> reader() {
		return new FlatFileItemReaderBuilder<Person>()
			.name("personItemReader")
			.resource(new ClassPathResource("sample-data.csv")) // class path
			.delimited()
			.names(new String[]{"firstName", "lastName"})
			.fieldSetMapper(new BeanWrapperFieldSetMapper<Person>() {{
				setTargetType(Person.class);
			}})
			.build();
	}



	@Bean
	//B.ItemProcesseor用于处理数据
	public PersonItemProcessor processor() {
		return new PersonItemProcessor();
	}

	@Bean
	//C.ItemWriter用于写数据
	public JdbcBatchItemWriter<Person> writer(DataSource dataSource) {
		return new JdbcBatchItemWriterBuilder<Person>()
			.itemSqlParameterSourceProvider(new BeanPropertyItemSqlParameterSourceProvider<>())
			.sql("INSERT INTO people (first_name, last_name) VALUES (:firstName, :lastName)")
			.dataSource(dataSource)
			.build();
	}
	// end::readerwriterprocessor[]

	// tag::jobstep[]
	/**
	 * The last chunk (from src/main/java/com/example/batchprocessing/BatchConfiguration.java)
	 * shows the actual job configuration:
	 * */
	@Bean
	// defines the job  the job contains Some steps
	public Job importUserJob(JobCompletionNotificationListener listener, Step step1) {
		return jobBuilderFactory.get("importUserJob")
			.incrementer(new RunIdIncrementer())
			.listener(listener)
			.flow(step1)
			.end()
			.build();
	}

	@Bean
	//second one defines a single step  one step cloud contains read/process/writer
	public Step step1(JdbcBatchItemWriter<Person> writer) {
		return stepBuilderFactory.get("step1")
			.<Person, Person> chunk(5)// set chunk if chunkSize==5 then we will submit data to db also is the step of writer
			.reader(reader())//A.read
			.processor(processor())//B.process
			.writer(writer)//writer
			.build();
	}
	// end::jobstep[]

	/**
	 * The first method defines the job, and the second one defines a single step. Jobs are built from steps,
	 * where each step can involve a reader, a processor, and a writer.
	 *
	 * In this job definition, you need an incrementer, because jobs use a database to maintain execution state.
	 * You then list each step, (though this job has only one step). The job ends, and the Java API produces a
	 * perfectly configured job.
	 *
	 * In the step definition, you define how much data to write at a time. In this case, it writes up to ten records
	 * at a time. Next, you configure the reader, processor, and writer by using the beans injected earlier.
	 *
	 * chunk() is prefixed <People,People> because it is a generic method. This represents the input and output types
	 * of each “chunk” of processing and lines up with ItemReader<People> and ItemWriter<People>.
	 * */

}
