package com.atguigu.flink.day03;

import org.apache.flink.api.common.RuntimeExecutionMode;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

/**
 * @author Felix
 * @date 2024/8/12
 * 该案例演示了执行模式
 *      DataStreamAPI默认执行模式是STREAMING,流中每来一条数据处理一次
 *      BATCH: 处理的是有界数据，将所有数据收集齐后处理一次
 *      AUTOMATIC: 根据数据来源自动设置流(无界)或者批(有界)
 * 有两种方式可以设置运行模式
 *      代码：env.setRuntimeMode(RuntimeExecutionMode.BATCH);
 *      在提交作业的时候，通过命令行参数设置:  bin/flink run -Dexecution.runtime-mode=BATCH
 */
public class Flink02_Exe_M {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setRuntimeMode(RuntimeExecutionMode.BATCH);
        env
                .readTextFile("D:\\dev\\workspace\\bigdata-0318\\input\\words.txt")
                .flatMap(
                        new FlatMapFunction<String, Tuple2<String,Long>>() {
                            @Override
                            public void flatMap(String lineStr, Collector<Tuple2<String, Long>> out) throws Exception {
                                String[] wordArr = lineStr.split(" ");
                                for (String word : wordArr) {
                                    out.collect(Tuple2.of(word,1L));
                                }
                            }
                        }
                )
                .keyBy(0)
                .sum(1)
                .print();

        env.execute();
    }
}
