package com.sdg.kafkastream;

import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.processor.TopologyBuilder;

import java.util.Properties;

//kafka的标识


public class Application {
    private static final Object KAFKA_BOOTSTRAP_SERVERS = "vm0:9092,vm1:9092,vm2:9092";
    ;
    //kafka的流处理的标识
    public static final String APPLICATION_ID_CONFIG = "logProcesser";
    private static final String ZOOKEEPERS_SERVERS = "vm0:2181,vm1:2181,vm2:2181";
    //数据来源的topic
    private static final String KAFKA_INPUT_TOPIC = "source";
    //数据目的地的topic
    private static final String KAFKA_OUTPUT_TOPIC = "sink";

    public static void main(String[] args) {
        //参数是传递过滤的,当然测试的时候可以直接写死
        Properties properties = new Properties();
        //流处理的标识
        properties.put(StreamsConfig.APPLICATION_ID_CONFIG, APPLICATION_ID_CONFIG);
        properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, KAFKA_BOOTSTRAP_SERVERS);
        properties.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG, ZOOKEEPERS_SERVERS);


        //创建kafka的配置
        StreamsConfig config = new StreamsConfig(properties);
        //建立kafka处理的拓扑
        TopologyBuilder builder = new TopologyBuilder();
        //传入什么数据
        builder.addSource("SOURCE", KAFKA_INPUT_TOPIC)
                //进行什么样的处理 () -> new LogProcessor()  这是lamda 表 jdk8 才有的特性
                .addProcessor("PROCESS", () -> new LogProcessor(), "SOURCE")
                //数据存到什么位置
                .addSink("SINK", KAFKA_OUTPUT_TOPIC, "PROCESS");
        //把拓扑结构和配置信息传递给KafkaStreams 方法
        KafkaStreams streams = new KafkaStreams(builder, config);
        //程序启动运行
        streams.start();
    }
}
