package streaming.demo.mq.kafka;

import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import streaming.demo.mq.kafka.config.MyPartitioner;
import streaming.demo.mq.kafka.config.MySchema;
import utils.PropertiesReader;

import java.util.Properties;

/**
 * 案例一：数据来源Kafka,输出到Kafka-可根据部分字段的Hash值Sink到指定的Kafka分区
 * PS: Kafka数据格式-JSON格式 方式-TableAPI
 * kafka -> kafka
 * 数据来源： kafka topic[csvTest1]
 * Sink目标： kafka topic[csvTest2] 根据某个Key的Hash值到指定的分区
 */
public class KafkaSinkKafka02_1 {

    private static String kafkaServers = PropertiesReader.get("default.kafka.servers");
    private static String topicFrom = PropertiesReader.get("default.kafka.topic.csv.A");
    private static String topicTo = PropertiesReader.get("default.kafka.topic.csv.B");

    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        Properties prop1 = new Properties();
        prop1.setProperty("bootstrap.servers", kafkaServers);
        prop1.setProperty("group.id", "flink-consumer-KafkaSinkKafka01");

        Properties prop2 = new Properties();
        prop2.setProperty("bootstrap.servers", kafkaServers);

        // 从Kafka中读取数据
        DataStream<String> inputStream = env.addSource(new FlinkKafkaConsumer(topicFrom, new SimpleStringSchema(), prop1));
        // 将空格转成逗号
        DataStream<String> dataStream = inputStream.map(line -> {
            return line.replace(",", " ");
        });

        dataStream.print();
        // Sink到固定分区，通过指定key的hash值
        FlinkKafkaProducer FlinkProducer = new FlinkKafkaProducer(topicTo, new MySchema(), prop2, java.util.Optional.of(new MyPartitioner()));
//        FlinkKafkaProducer FlinkProducer = new FlinkKafkaProducer(topicTo, new MySchema2(), prop2, java.util.Optional.of(new MyPartitioner()));
        // 将数据写入Kafka
        dataStream.addSink(FlinkProducer);
        env.execute();
    }

}
