package com.atguigu.flink.chapter01_wordcount;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.java.ExecutionEnvironment;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.operators.AggregateOperator;
import org.apache.flink.api.java.operators.DataSource;
import org.apache.flink.api.java.operators.FlatMapOperator;
import org.apache.flink.api.java.operators.UnsortedGrouping;
import org.apache.flink.api.java.tuple.Tuple;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.runtime.execution.Environment;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

/**
 * Created by Smexy on 2022/10/19
 *
 *      统计 data/words.txt
 *
 *      批处理： 很少写。基本都是流处理
 */
public class Demo1_BatchExecution
{
    public static void main(String[] args) throws Exception {

        //批处理的环境  针对有界流，针对批
        ExecutionEnvironment executionEnvironment = ExecutionEnvironment.getExecutionEnvironment();

        //流处理的环境 针对无界流，真正的流
        //StreamExecutionEnvironment streamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment();

        //数据在不同的地方，读取不同的数据源。
        // DataSource: 代表一个批处理的数据源，有界流，类似 RDD
        DataSource<String> dataSource = executionEnvironment.readTextFile("data/words.txt");

        //拆分单词
       dataSource
            .flatMap(new FlatMapFunction<String, Tuple2<String, Integer>>()
            {
                /*
                        对DataSource的数据进行处理
                            line： 一行数据
                            Collector<String> out： 负责收集输出的数据

                            (hello,1):  Tuple
                 */
                @Override
                public void flatMap(String line, Collector<Tuple2<String, Integer>> out) throws Exception {
                    String[] words = line.split(" ");
                    for (String word : words) {
                        out.collect(Tuple2.of(word, 1));
                    }
                }
            })

            //对单次分组统计，累加
            /*
                    KeySelector<IN, KEY>
                        IN: 输入什么类型
                        KEY:  拿IN中什么作为KEY
             */
            .groupBy(0)
            .sum(1)
            .print();






    }

}
