package net.xuele.learn.flink.example;

import org.apache.flink.api.common.JobExecutionResult;
import org.apache.flink.api.common.JobID;
import org.apache.flink.api.common.functions.FilterFunction;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.co.KeyedBroadcastProcessFunction;
import org.apache.flink.streaming.api.functions.source.SourceFunction;
import org.apache.flink.util.Collector;

public class Example {
    public static void main(String[] args) throws Exception {
        // 每个 Flink 应用都需要有执行环境，流式应用需要用到 StreamExecutionEnvironment
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        // source的产生方式有很多种，从文件、socket等
        // 最常用的数据源是那些支持低延迟，高吞吐并行读取以及重复（高性能和容错能力为先决条件）的数据源，kafka
        DataStream<Person> ds = env.fromElements(new Person("hh", 28),
                new Person("pp", 18),
                new Person("kk", 2));
        DataStream<Person> adults  = ds.filter((FilterFunction<Person>) person -> person.age > 17);
        // sink
        adults.print();
        /**
         * DataStream API 将你的应用构建为一个 job graph，并附加到 StreamExecutionEnvironment 。
         * 当调用 env.execute() 时此 graph 就被打包并发送到 JobManager 上
         */
        JobExecutionResult result = env.execute("Filter sensor readings");
        JobID jobID = result.getJobID();
        System.out.println("++++++++++++++++++++");
        System.out.println(jobID.toString());
    }
}
