package source;

import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;

import java.io.Serializable;
import java.util.Properties;

public class KafkaSourceUtils  implements Serializable {
    public     static     StreamExecutionEnvironment env=StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration());
//      env.enableCheckpointing(1000 * 60 * 2);


    public static  DataStream<String>  createKafkaSource(){
        Properties properties=new Properties();
        properties.setProperty("bootstrap.servers", "master3:9092,master4:9092,master5:9092");
        properties.setProperty("group.id", "moudletest");
        //如果没有记录便偏移量，就从最开始的地方进行消费，如果记录过，就接着读。
        properties.setProperty("auto.offset.reset", "earliest");
        properties.setProperty("enable.auto.commiit", "true");
        return   env.addSource(new FlinkKafkaConsumer<String>("myv1test", new SimpleStringSchema(), properties));

    }


//        env.setStateBackend("")/

    //创建kafka数据流

}
