package com.inspur.springkafka.stream1;

import org.apache.kafka.common.serialization.IntegerSerializer;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.common.serialization.StringSerializer;
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.Topology;

import java.util.Properties;

public class WordCountTopologyDemo {
    public static void main(String[] args) {
        //0.配置KafkaStreams的连接信息
        Properties props = new Properties();
        props.put(StreamsConfig.APPLICATION_ID_CONFIG,"word-count-lowlevel");
        props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG,"CentOSA:9092,CentOSB:9092,CentOSC:9092");
        //配置默认的key序列化和反序列化
        props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
        props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());

        //1.定义计算拓扑
        Topology topology=new Topology();

        topology.addSource("s1","topic01");
        topology.addProcessor("p1",() -> new WordCountProcessor(),"s1");
        topology.addSink("sk1","topic02",
                new StringSerializer(),
                new IntegerSerializer(),"p1");

        //3.创建KafkaStreams
        KafkaStreams kafkaStreams=new KafkaStreams(topology,props);
        //4.启动计算
        kafkaStreams.start();
    }
}
