package com.flink.wc.demo.demo_20241225;

import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;

import java.util.Properties;

public class FlinkReadKafka {
    public static void main(String[] args) throws Exception {
        // 1. 创建运行环境
        StreamExecutionEnvironment env =
                StreamExecutionEnvironment.getExecutionEnvironment();
        // 2. 设置Kafka服务连接信息
        Properties properties = new Properties();
        properties.setProperty("bootstrap.servers", "192.168.151.168:9092,192.168.151.169:9092,192.168.151.170:9092");
        properties.setProperty("group.id", "flink_group");
        // 3. 创建Kafka消费端
        FlinkKafkaConsumer flinkKafkaConsumer = new FlinkKafkaConsumer(
                "test_tan_emn", // 目标topic
                new SimpleStringSchema(), // 序列化配置
                properties
        );
        // flinkKafkaConsumer.setStartFromEarliest(); // 尽可能从最早的记录开始
        // flinkKafkaConsumer.setStartFromLatest(); // 从最新的记录开始
        // flinkKafkaConsumer.setStartFromTimestamp(...); // 从指定的时间开始（毫秒）
        // flinkKafkaConsumer.setStartFromGroupOffsets(); // 默认方法
        // 4. 读取Kafka数据源
        DataStreamSource dataStreamSource =
                env.addSource(flinkKafkaConsumer);
        dataStreamSource.print().setParallelism(1);
// 5. 执行任务
        env.execute("Flink kafka source");
    }

}
