package com.shujia.flink.core;

import org.apache.flink.api.common.RuntimeExecutionMode;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

public class Demo2BatchWordCount {
    public static void main(String[] args) throws Exception {
        //1、创建FLink执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        /*
         *处理模式
         * RuntimeExecutionMode.BATCH：批处理模式（MapReduce模型）
         * 1、输出最终结果
         * 2、批处理模式只能用于处理有界流
         *
         * RuntimeExecutionMode.STREAMING：流处理模式（持续流模型）
         * 1、输出连续结果
         * 2、流处理模式，有界流核无界流都可以处理
         */
        //修改处理模式
        env.setRuntimeMode(RuntimeExecutionMode.BATCH);

        //2、读取文件 --有界流
        DataStream<String> wordsDS = env.readTextFile("flink/data/words.txt");

        //3、统计单词的数量
        DataStream<Tuple2<String, Integer>> kvDS = wordsDS
                .map(word -> Tuple2.of(word, 1), Types.TUPLE(Types.STRING, Types.INT));

        //分组统计单词的数量
        KeyedStream<Tuple2<String, Integer>, String> keyByDS = kvDS.keyBy(kv -> kv.f0);

        //对下标为1的列求和
        DataStream<Tuple2<String, Integer>> countDS = keyByDS.sum(1);

        //打印数据
        countDS.print();

        //启动flink
        env.execute();
    }
}
