package flink.batch;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.java.ExecutionEnvironment;
import org.apache.flink.api.java.operators.AggregateOperator;
import org.apache.flink.api.java.operators.DataSource;
import org.apache.flink.api.java.operators.FlatMapOperator;
import org.apache.flink.api.java.operators.MapOperator;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.util.Collector;

/**
 * 使用Flink计算引擎实现离线批处理：词频统计WordCount
	 * 1.准备环境-env
	 * 2.准备数据-source
	 * 3.处理数据-transformation
	 * 4.输出结果-sink
	 * 5.触发执行-execute
 */
public class _01BatchWordCount {

	public static void main(String[] args) throws Exception {
		// 1.准备环境-env
		ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
		// 2.准备数据-source
		DataSource<String> inputDataSet =env.readTextFile("D:\\0615\\bigdata-flink\\datas\\wordcount.data");
		// 3.处理数据-transformation
		/*
			step1. 将文件中每行数据按照分隔符分割单词
			step2. 将每个单词转换为二元组
			step3. 按照单词分组，进行求和，获取次数
		 */
		// step1. 将文件中每行数据按照分隔符分割单词
		FlatMapOperator<String, String> wordDataSet = inputDataSet.flatMap(new FlatMapFunction<String, String>() {
			@Override
			public void flatMap(String line, Collector<String> out) throws Exception {
			  String[] words =line.trim().split("\\s+");
				for(String word: words){
					out.collect(word);
				}
			}
		});
		// step2. 将每个单词转换为二元组
		MapOperator<String, org.apache.flink.api.java.tuple.Tuple2<String, Integer>> tupleDataSet = wordDataSet.map(new MapFunction<String, org.apache.flink.api.java.tuple.Tuple2<String, Integer>>() {
			@Override
			public org.apache.flink.api.java.tuple.Tuple2<String, Integer> map(String word) throws Exception {
				return Tuple2.of(word, 1);
			}
		});
		// step3. 按照单词分组，进行求和，获取次数
		/*
			spark spark hadoop
						|
			(spark, 1) (spark, 1) (hadoop, 1)
						|
			spark -> [(spark, 1) (spark, 1) ]   hadoop -> [(hadoop, 1) ]
		 */
		AggregateOperator<Tuple2<String, Integer>> resultDataSet = tupleDataSet.groupBy(0).sum(1);

		// 4.输出结果-sink
		resultDataSet.print();
		// 5.触发执行-execute
//		env.execute(_01BatchWordCount.class.getSimpleName());
	}

}
