package com.zenitera.bigdata.source;

import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;

import java.util.Properties;

/**
 * 从Kafka读取数据
 */
public class Flink04_Source_Kafka {
    public static void main(String[] args) {

        // Kafka相关配置
        Properties properties = new Properties();
        properties.setProperty("bootstrap.servers", "wangt-flink01:9092,wangt-flink02:9092,wangt-flink03:9092");
        properties.setProperty("group.id", "Flink04_Source_Kafka");
        properties.setProperty("auto.offset.reset", "latest");


        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env
                .addSource(new FlinkKafkaConsumer<>("Flink04_Source_Kafka", new SimpleStringSchema(), properties))
                .print();
        try {
            env.execute();
        } catch (Exception e) {
            e.printStackTrace();
        }

    }
}

/*
[wangting@wangt-flink03 ~]$ kafka-topics.sh --bootstrap-server wangt-flink03:9092 --create --partitions 1 --replication-factor 3 --topic Flink04_Source_Kafka
Created topic Flink04_Source_Kafka.
[wangting@wangt-flink03 ~]$ kafka-console-producer.sh --broker-list wangt-flink03:9092 --topic Flink04_Source_Kafka
>helloworld1
>helloworld2
>helloworld3
>

==================
4> helloworld1
4> helloworld2
4> helloworld3

 */