package com.atguigu.flink.chapter05.Sink;

import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;

/**
 * TODO
 *
 * @author cjp
 * @version 1.0
 * @date 2021/8/10 8:59
 */
public class Flink01_Kafka {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        //TODO Sink - kafka
        DataStreamSource<String> socketDS = env.socketTextStream("localhost", 9999);

        FlinkKafkaProducer<String> kafkaSink = new FlinkKafkaProducer<>(
                "hadoop1:9092,hadoop2:9092,hadoop3:9092",
                "flink210323",
                new SimpleStringSchema()
        );

        socketDS.addSink(kafkaSink);


        env.execute();
    }
}
/*
    Kafka的分区器： FlinkFixedPartitioner

    sink的并行度 > kafka的分区数  === 》 可能多个 并行实例 ，写同一个 kafka分区
    sink的并行度 < kafka的分区数  === 》 只会写入 kafka 部分 分区中
 */