package com.atguigu.flink.chapter05.source;

import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

/*
    从Kafka读取数据
 */
public class KafkaSourceDemo {
    public static void main(String[] args) {
        Configuration conf = new Configuration();
        conf.setInteger("rest.port",20000);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        env.setParallelism(2);  //一般和kafka的分区数保持一致

        //<String>builder() --------> 泛型方法，手动指定
        KafkaSource<String> source = KafkaSource.<String>builder()
                .setBootstrapServers("hadoop102:9092,hadoop103:9092,hadoop104:9092")
                .setGroupId("atguigu")
                .setTopics("s1")
                .setStartingOffsets(OffsetsInitializer.latest())
                .setValueOnlyDeserializer(new SimpleStringSchema())    //反序列化为字符串
                .build();
        DataStreamSource<String> stream = env.fromSource(
                source,
                WatermarkStrategy.noWatermarks(),
                "kafka-source");

        stream.print();

        try {
            env.execute();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}

//public class KafkaSourceDemo {
//    public static void main(String[] args) {
//        Configuration conf = new Configuration();
//        conf.setInteger("rest.port",20000);
//        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
//        env.setParallelism(2);   //一般和kafka的分区数保持一致
//
//        //<String>builder() 泛型方法--手动指定
//        KafkaSource<String> source = KafkaSource.<String>builder()
//                .setBootstrapServers("hadoop102:9092,hadoop103:9092,hadoop104:9092")
//                .setGroupId("atguigu")
//                .setTopics("s1")
//                .setStartingOffsets(OffsetsInitializer.latest())  //反序列化为字符串
//                .setValueOnlyDeserializer(new SimpleStringSchema())
//                .build();
//        DataStreamSource<String> stream = env.fromSource(
//                source,
//                WatermarkStrategy.noWatermarks(),
//                "kafka-source"
//        );
//
//        stream.print();
//        try {
//            env.execute();
//        } catch (Exception e) {
//            e.printStackTrace();
//        }
//    }
//}

//public class KafkaSourceDemo {
//    public static void main(String[] args) {
//        Configuration conf = new Configuration();
//        conf.setInteger("rest.port",20000);
//        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
//        env.setParallelism(2);  //一般和kafka的分区数保持一致
//
//        KafkaSource<String> source = KafkaSource.<String>builder()
//                .setBootstrapServers("hadoop102:9092,hadoop103:9092,hadoop104:9092")
//                .setGroupId("atguigu")
//                .setTopics("s1")
//                .setStartingOffsets(OffsetsInitializer.latest())   //反序列化为字符串
//                .setValueOnlyDeserializer(new SimpleStringSchema())
//                .build();
//
//        DataStreamSource<String> stream = env.fromSource(
//                source,
//                WatermarkStrategy.noWatermarks(),
//                "kafka-source"
//        );
//        stream.print();
//
//        try {
//            env.execute();
//        } catch (Exception e) {
//            e.printStackTrace();
//        }
//    }
//}