package com.zyh.flink.day01;

import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

import java.util.Arrays;

public class FlinkWordCountJobWithLambda {
    public static void main(String[] args) throws Exception{
        //1.获取运行环境
        StreamExecutionEnvironment executionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment();
        //2.创建DataStream
        DataStreamSource<String> hadoop10 = executionEnvironment.socketTextStream("hadoop10", 9999);
        /*
        * 通过Types工具获取各种类型的类型信息
        * jdk内置类型Integer,String,List,Map,Tuple  Types有同名的属性和方法用于获取其TypeInformation
        * 第三方类型:
        *   Types.POJO(第三方类型.class)
        *   Types.GENERIC(第三方类型.class)
        * */
        //3.使用算子对流上的数据进行转换：
        SingleOutputStreamOperator<Tuple2<String, Integer>> result = hadoop10.flatMap((String line, Collector<String> collector) -> {
            /*String[] words = line.split("\\s+");
            for (String word : words) {
                collector.collect(word);
            }*/
            Arrays.stream(line.split("\\s+")).forEach(collector::collect);
        }).returns(Types.STRING)//TypeInformation是Flink自己定义的和class相似的用于描述类型信息的类
                .map((String word) -> Tuple2.of(word, 1)).returns(Types.TUPLE(Types.STRING,Types.INT))
                .keyBy(0)
                .sum(1).returns(Types.TUPLE(Types.STRING,Types.INT));
        //4.通过print这个sink算子输出结果到控制台
        result.print();
        //5.发布执行流处理任务
        executionEnvironment.execute("wordcount");
    }
}
