package com.flink.wc.demo.demo_20241225;

import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;

import java.util.Arrays;
import java.util.List;

public class FlinkWriteKafka {
    public static void main(String[] args) throws Exception {
        // 1. 创建运行环境
        StreamExecutionEnvironment env =
                StreamExecutionEnvironment.getExecutionEnvironment();

        // 2. 创建集合数据
        List<String> list = Arrays.asList(
                "192.168.116.141\t1601297294548\tPOST\taddOrder",
                "192.168.116.142\t1601297294549\tGET\tgetOrder"
        );
        DataStreamSource<String> socketTextStream = env.fromCollection(list);
// 3. kafka生产者配置
        FlinkKafkaProducer flinkKafkaProducer = new FlinkKafkaProducer(
                "192.168.151.168:9092,192.168.151.169:9092,192.168.151.170:9092", // broker 列表
                "test_tan_emn", // 目标 topic
                new SimpleStringSchema() // 序列化方式
        );
// 4. 添加Kafka写入器
        socketTextStream.addSink(flinkKafkaProducer);
        socketTextStream.print().setParallelism(1);
// 5. 执行任务
        env.execute("flink kafka sink");
    }
}
