package com.shujia.flink.source;

import org.apache.flink.api.common.RuntimeExecutionMode;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

import java.util.ArrayList;

public class Demo1ListSource {
    public static void main(String[] args) throws Exception {
        //创建flink环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //修改执行模式
        env.setRuntimeMode(RuntimeExecutionMode.BATCH);

        /*
         * 基于java 集合构建source  -- 有界流
         */
        ArrayList<String> linesList = new ArrayList<>();
        linesList.add("java,spark");
        linesList.add("hive,hadoop");
        linesList.add("hive,flink");
        linesList.add("hive,hive");

        DataStream<String> listDS = env.fromCollection(linesList);

        //一行转换成多行
        DataStream<Tuple2<String, Integer>> wordsDS = listDS.flatMap((line, collect) -> {
            for (String word : line.split(",")) {
                collect.collect(Tuple2.of(word, 1));
            }
        }, Types.TUPLE(Types.STRING, Types.INT));

        wordsDS.keyBy(kv -> kv.f0)
                .sum(1)
                .print();

        env.execute();
    }
}
